code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
example1-simpleloop
~~~~~~~~~~~~~~~~~~~
This example shows how to use the loop block backend and frontend.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
# From lantz, you import a helper function.
from lantz.ui.app import start_gui_app
# and the loop block and its user interface
from lantz.ui.blocks import Loop, LoopUi
# the drivers you need (In this case just simulated dummy drivers).
from lantz.drivers.examples.dummydrivers import DummyOsci
# Drivers are instantiated in the usual way.
osci = DummyOsci('COM2')
# You create a function that will be called by the loop
# It requires three parameters
# counter - the iteration number
# iterations - total number of iterations
# overrun - a boolean indicating if the time required for the operation
# is longer than the interval.
def measure(counter, iterations, overrun):
print(counter, iterations, overrun)
data = osci.measure()
print(data)
# You instantiate the loop
app = Loop()
# and assign the function to the body of the loop
app.body = measure
# Finally you start the program
start_gui_app(app, LoopUi)
# This contains a very complete GUI for a loop you can easily create a customized version!
|
varses/awsch
|
examples/using_blocks/example1-simpleloop.py
|
Python
|
bsd-3-clause
| 1,304
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.is_mysqldba_oncall'
db.add_column(u'user_profiles', 'is_mysqldba_oncall',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'UserProfile.is_pgsqldba_oncall'
db.add_column(u'user_profiles', 'is_pgsqldba_oncall',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'UserProfile.current_mysqldba_oncall'
db.add_column(u'user_profiles', 'current_mysqldba_oncall',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'UserProfile.current_pgsqldba_oncall'
db.add_column(u'user_profiles', 'current_pgsqldba_oncall',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.is_mysqldba_oncall'
db.delete_column(u'user_profiles', 'is_mysqldba_oncall')
# Deleting field 'UserProfile.is_pgsqldba_oncall'
db.delete_column(u'user_profiles', 'is_pgsqldba_oncall')
# Deleting field 'UserProfile.current_mysqldba_oncall'
db.delete_column(u'user_profiles', 'current_mysqldba_oncall')
# Deleting field 'UserProfile.current_pgsqldba_oncall'
db.delete_column(u'user_profiles', 'current_pgsqldba_oncall')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dhcp.dhcp': {
'Meta': {'object_name': 'DHCP', 'db_table': "u'dhcp_scopes'"},
'allow_booting': ('django.db.models.fields.IntegerField', [], {'max_length': '32'}),
'allow_bootp': ('django.db.models.fields.IntegerField', [], {'max_length': '32'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option_domain_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'option_domain_name_servers': ('django.db.models.fields.CharField', [], {'max_length': '48', 'null': 'True', 'blank': 'True'}),
'option_ntp_servers': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'option_routers': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'option_subnet_mask': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'pool_deny_dynamic_bootp_agents': ('django.db.models.fields.IntegerField', [], {'max_length': '32'}),
'pool_range_end': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'pool_range_start': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'scope_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scope_netmask': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'scope_notes': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'scope_start': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'systems.advisorydata': {
'Meta': {'object_name': 'AdvisoryData', 'db_table': "u'advisory_data'"},
'advisory': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'references': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'severity': ('django.db.models.fields.FloatField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'systems.allocation': {
'Meta': {'ordering': "['name']", 'object_name': 'Allocation', 'db_table': "u'allocations'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.contract': {
'Meta': {'object_name': 'Contract', 'db_table': "u'contracts'"},
'contract_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'contract_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'support_level': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']"}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'systems.keyvalue': {
'Meta': {'object_name': 'KeyValue', 'db_table': "u'key_value'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'systems.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location', 'db_table': "u'locations'"},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'systems.mac': {
'Meta': {'object_name': 'Mac', 'db_table': "u'macs'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '17'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']"})
},
'systems.networkadapter': {
'Meta': {'object_name': 'NetworkAdapter', 'db_table': "u'network_adapters'"},
'adapter_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'dhcp_scope': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dhcp.DHCP']", 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mac_address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'option_domain_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_host_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'switch_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'switch_port': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'system_id': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.operatingsystem': {
'Meta': {'ordering': "['name', 'version']", 'object_name': 'OperatingSystem', 'db_table': "u'operating_systems'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.portdata': {
'Meta': {'object_name': 'PortData', 'db_table': "u'port_data'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '13', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'})
},
'systems.scheduledtask': {
'Meta': {'ordering': "['task']", 'object_name': 'ScheduledTask', 'db_table': "u'scheduled_tasks'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.servermodel': {
'Meta': {'ordering': "['vendor', 'model']", 'object_name': 'ServerModel', 'db_table': "u'server_models'"},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'part_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.system': {
'Meta': {'object_name': 'System', 'db_table': "u'systems'"},
'allocation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Allocation']", 'null': 'True', 'blank': 'True'}),
'asset_tag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'change_password': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_dhcp_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_dns_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_nagios_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_puppet_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_switch': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'licenses': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'oob_ip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'oob_switch_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'operating_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.OperatingSystem']", 'null': 'True', 'blank': 'True'}),
'patch_panel_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'purchase_price': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rack_order': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'ram': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'server_model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.ServerModel']", 'null': 'True', 'blank': 'True'}),
'switch_ports': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'system_rack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemRack']", 'null': 'True', 'blank': 'True'}),
'system_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemStatus']", 'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'systems.systemchangelog': {
'Meta': {'object_name': 'SystemChangeLog', 'db_table': "u'systems_change_log'"},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {}),
'changed_text': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']"})
},
'systems.systemrack': {
'Meta': {'ordering': "['name']", 'object_name': 'SystemRack', 'db_table': "u'system_racks'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemstatus': {
'Meta': {'ordering': "['status']", 'object_name': 'SystemStatus', 'db_table': "u'system_statuses'"},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'color_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemtype': {
'Meta': {'object_name': 'SystemType', 'db_table': "u'system_types'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "u'user_profiles'"},
'api_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'current_desktop_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_mysqldba_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_pgsqldba_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_services_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'current_sysadmin_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epager_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_nick': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'is_desktop_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_mysqldba_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_pgsqldba_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_services_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sysadmin_oncall': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pager_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pager_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['systems']
|
mozilla/inventory
|
systems/migrations/0002_auto__add_field_userprofile_is_mysqldba_oncall__add_field_userprofile_.py
|
Python
|
bsd-3-clause
| 21,325
|
from zope.interface import implementer
from six import iteritems
from twisted.internet.defer import DeferredQueue, inlineCallbacks, maybeDeferred, returnValue
from .utils import get_spider_queues
from .interfaces import IPoller
@implementer(IPoller)
class QueuePoller(object):
def __init__(self, config):
self.config = config
self.update_projects()
self.dq = DeferredQueue()
@inlineCallbacks
def poll(self):
if not self.dq.waiting:
return
for p, q in iteritems(self.queues):
c = yield maybeDeferred(q.count)
if c:
msg = yield maybeDeferred(q.pop)
if msg is not None: # In case of a concurrently accessed queue
returnValue(self.dq.put(self._message(msg, p)))
def next(self):
return self.dq.get()
def update_projects(self):
self.queues = get_spider_queues(self.config)
def _message(self, queue_msg, project):
d = queue_msg.copy()
d['_project'] = project
d['_spider'] = d.pop('name')
return d
|
wujuguang/scrapyd
|
scrapyd/poller.py
|
Python
|
bsd-3-clause
| 1,098
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import jam.webserver
from jam.server import server
jam.webserver.run(server)
|
dnacreative/jam-py
|
tests/server.py
|
Python
|
bsd-3-clause
| 232
|
__author__ = "John Kirkham <kirkhamj@janelia.hhmi.org>"
__date__ = "$Mar 30, 2015 08:25:33 EDT$"
import collections
import json
import os
import os.path
import shutil
import tempfile
import numpy
import h5py
import vigra
import vigra.impex
import nanshe.util.iters
import nanshe.util.xnumpy
import nanshe.io.xtiff
import nanshe.converter
class TestConverter(object):
def setup(self):
self.temp_dir = ""
self.filedata = collections.OrderedDict()
self.data = None
self.data = numpy.random.random_integers(0, 255, (1000, 1, 102, 101, 1)).astype(numpy.uint8)
self.temp_dir = tempfile.mkdtemp()
for i, i_str, (a_b, a_e) in nanshe.util.iters.filled_stringify_enumerate(
nanshe.util.iters.izip(
*nanshe.util.iters.lagged_generators(
nanshe.util.iters.irange(
0,
self.data.shape[0] + 100 - 1,
100
)
)
)
):
each_filename = os.path.join(self.temp_dir, "test_tiff_" + str(i) + ".tif")
each_data = self.data[a_b:a_e]
self.filedata[each_filename] = each_data
vigra.impex.writeVolume(nanshe.util.xnumpy.tagging_reorder_array(each_data, to_axis_order="czyxt")[0, 0],
os.path.join(self.temp_dir, "test_tiff_" + str(i) + ".tif"), "")
def test_main(self):
params = {
"axis" : 0,
"channel" : 0,
"z_index" : 0,
"pages_to_channel" : 1
}
config_filename = os.path.join(self.temp_dir, "config.json")
hdf5_filename = os.path.join(self.temp_dir, "test.h5")
hdf5_filepath = hdf5_filename + "/data"
with open(config_filename, "w") as fid:
json.dump(params, fid)
fid.write("\n")
main_args = ["./converter.py"] + ["tiff"] + [config_filename] + list(self.filedata.keys()) + [hdf5_filepath]
assert (nanshe.converter.main(*main_args) == 0)
assert os.path.exists(hdf5_filename)
data = None
with h5py.File(hdf5_filename, "r") as hdf5_handle:
data = hdf5_handle["data"].value
self_data_h5 = nanshe.util.xnumpy.tagging_reorder_array(self.data, to_axis_order="cztyx")[0, 0]
assert (data == self_data_h5).all()
os.remove(hdf5_filename)
def teardown(self):
shutil.rmtree(self.temp_dir)
self.temp_dir = ""
self.filedata = collections.OrderedDict()
self.data = None
|
DudLab/nanshe
|
tests/test_nanshe/test_converter.py
|
Python
|
bsd-3-clause
| 2,908
|
# -*- coding: utf-8 -*-
"""
.. _tut-overview:
Overview of MEG/EEG analysis with MNE-Python
============================================
This tutorial covers the basic EEG/MEG pipeline for event-related analysis:
loading data, epoching, averaging, plotting, and estimating cortical activity
from sensor data. It introduces the core MNE-Python data structures
`~mne.io.Raw`, `~mne.Epochs`, `~mne.Evoked`, and `~mne.SourceEstimate`, and
covers a lot of ground fairly quickly (at the expense of depth). Subsequent
tutorials address each of these topics in greater detail.
.. contents:: Page contents
:local:
:depth: 1
We begin by importing the necessary Python modules:
"""
import os
import numpy as np
import mne
###############################################################################
# Loading data
# ^^^^^^^^^^^^
#
# MNE-Python data structures are based around the FIF file format from
# Neuromag, but there are reader functions for :ref:`a wide variety of other
# data formats <data-formats>`. MNE-Python also has interfaces to a
# variety of :ref:`publicly available datasets <datasets>`,
# which MNE-Python can download and manage for you.
#
# We'll start this tutorial by loading one of the example datasets (called
# ":ref:`sample-dataset`"), which contains EEG and MEG data from one subject
# performing an audiovisual experiment, along with structural MRI scans for
# that subject. The `mne.datasets.sample.data_path` function will automatically
# download the dataset if it isn't found in one of the expected locations, then
# return the directory path to the dataset (see the documentation of
# `~mne.datasets.sample.data_path` for a list of places it checks before
# downloading). Note also that for this tutorial to run smoothly on our
# servers, we're using a filtered and downsampled version of the data
# (:file:`sample_audvis_filt-0-40_raw.fif`), but an unfiltered version
# (:file:`sample_audvis_raw.fif`) is also included in the sample dataset and
# could be substituted here when running the tutorial locally.
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
###############################################################################
# By default, `~mne.io.read_raw_fif` displays some information about the file
# it's loading; for example, here it tells us that there are four "projection
# items" in the file along with the recorded data; those are :term:`SSP
# projectors <projector>` calculated to remove environmental noise from the MEG
# signals, plus a projector to mean-reference the EEG channels; these are
# discussed in the tutorial :ref:`tut-projectors-background`. In addition to
# the information displayed during loading, you can get a glimpse of the basic
# details of a `~mne.io.Raw` object by printing it; even more is available by
# printing its ``info`` attribute (a `dictionary-like object <mne.Info>` that
# is preserved across `~mne.io.Raw`, `~mne.Epochs`, and `~mne.Evoked` objects).
# The ``info`` data structure keeps track of channel locations, applied
# filters, projectors, etc. Notice especially the ``chs`` entry, showing that
# MNE-Python detects different sensor types and handles each appropriately. See
# :ref:`tut-info-class` for more on the `~mne.Info` class.
print(raw)
print(raw.info)
###############################################################################
# `~mne.io.Raw` objects also have several built-in plotting methods; here we
# show the power spectral density (PSD) for each sensor type with
# `~mne.io.Raw.plot_psd`, as well as a plot of the raw sensor traces with
# `~mne.io.Raw.plot`. In the PSD plot, we'll only plot frequencies below 50 Hz
# (since our data are low-pass filtered at 40 Hz). In interactive Python
# sessions, `~mne.io.Raw.plot` is interactive and allows scrolling, scaling,
# bad channel marking, annotation, projector toggling, etc.
raw.plot_psd(fmax=50)
raw.plot(duration=5, n_channels=30)
###############################################################################
# Preprocessing
# ^^^^^^^^^^^^^
#
# MNE-Python supports a variety of preprocessing approaches and techniques
# (maxwell filtering, signal-space projection, independent components analysis,
# filtering, downsampling, etc); see the full list of capabilities in the
# :mod:`mne.preprocessing` and :mod:`mne.filter` submodules. Here we'll clean
# up our data by performing independent components analysis
# (`~mne.preprocessing.ICA`); for brevity we'll skip the steps that helped us
# determined which components best capture the artifacts (see
# :ref:`tut-artifact-ica` for a detailed walk-through of that process).
# set up and fit the ICA
ica = mne.preprocessing.ICA(n_components=20, random_state=97, max_iter=800)
ica.fit(raw)
ica.exclude = [1, 2] # details on how we picked these are omitted here
ica.plot_properties(raw, picks=ica.exclude)
###############################################################################
# Once we're confident about which component(s) we want to remove, we pass them
# as the ``exclude`` parameter and then apply the ICA to the raw signal. The
# `~mne.preprocessing.ICA.apply` method requires the raw data to be loaded into
# memory (by default it's only read from disk as-needed), so we'll use
# `~mne.io.Raw.load_data` first. We'll also make a copy of the `~mne.io.Raw`
# object so we can compare the signal before and after artifact removal
# side-by-side:
orig_raw = raw.copy()
raw.load_data()
ica.apply(raw)
# show some frontal channels to clearly illustrate the artifact removal
chs = ['MEG 0111', 'MEG 0121', 'MEG 0131', 'MEG 0211', 'MEG 0221', 'MEG 0231',
'MEG 0311', 'MEG 0321', 'MEG 0331', 'MEG 1511', 'MEG 1521', 'MEG 1531',
'EEG 001', 'EEG 002', 'EEG 003', 'EEG 004', 'EEG 005', 'EEG 006',
'EEG 007', 'EEG 008']
chan_idxs = [raw.ch_names.index(ch) for ch in chs]
orig_raw.plot(order=chan_idxs, start=12, duration=4)
raw.plot(order=chan_idxs, start=12, duration=4)
###############################################################################
# .. _overview-tut-events-section:
#
# Detecting experimental events
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The sample dataset includes several :term:`"STIM" channels <stim channel>`
# that recorded electrical signals sent from the stimulus delivery computer (as
# brief DC shifts / squarewave pulses). These pulses (often called "triggers")
# are used in this dataset to mark experimental events: stimulus onset,
# stimulus type, and participant response (button press). The individual STIM
# channels are combined onto a single channel, in such a way that voltage
# levels on that channel can be unambiguously decoded as a particular event
# type. On older Neuromag systems (such as that used to record the sample data)
# this summation channel was called ``STI 014``, so we can pass that channel
# name to the `mne.find_events` function to recover the timing and identity of
# the stimulus events.
events = mne.find_events(raw, stim_channel='STI 014')
print(events[:5]) # show the first 5
###############################################################################
# The resulting events array is an ordinary 3-column :class:`NumPy array
# <numpy.ndarray>`, with sample number in the first column and integer event ID
# in the last column; the middle column is usually ignored. Rather than keeping
# track of integer event IDs, we can provide an *event dictionary* that maps
# the integer IDs to experimental conditions or events. In this dataset, the
# mapping looks like this:
#
# .. _sample-data-event-dict-table:
#
# +----------+----------------------------------------------------------+
# | Event ID | Condition |
# +==========+==========================================================+
# | 1 | auditory stimulus (tone) to the left ear |
# +----------+----------------------------------------------------------+
# | 2 | auditory stimulus (tone) to the right ear |
# +----------+----------------------------------------------------------+
# | 3 | visual stimulus (checkerboard) to the left visual field |
# +----------+----------------------------------------------------------+
# | 4 | visual stimulus (checkerboard) to the right visual field |
# +----------+----------------------------------------------------------+
# | 5 | smiley face (catch trial) |
# +----------+----------------------------------------------------------+
# | 32 | subject button press |
# +----------+----------------------------------------------------------+
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'smiley': 5, 'buttonpress': 32}
###############################################################################
# Event dictionaries like this one are used when extracting epochs from
# continuous data; the ``/`` character in the dictionary keys allows pooling
# across conditions by requesting partial condition descriptors (i.e.,
# requesting ``'auditory'`` will select all epochs with Event IDs 1 and 2;
# requesting ``'left'`` will select all epochs with Event IDs 1 and 3). An
# example of this is shown in the next section. There is also a convenient
# `~mne.viz.plot_events` function for visualizing the distribution of events
# across the duration of the recording (to make sure event detection worked as
# expected). Here we'll also make use of the `~mne.Info` attribute to get the
# sampling frequency of the recording (so our x-axis will be in seconds instead
# of in samples).
fig = mne.viz.plot_events(events, event_id=event_dict, sfreq=raw.info['sfreq'],
first_samp=raw.first_samp)
###############################################################################
# For paradigms that are not event-related (e.g., analysis of resting-state
# data), you can extract regularly spaced (possibly overlapping) spans of data
# by creating events using `mne.make_fixed_length_events` and then proceeding
# with epoching as described in the next section.
#
#
# .. _tut-section-overview-epoching:
#
# Epoching continuous data
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# The `~mne.io.Raw` object and the events array are the bare minimum needed to
# create an `~mne.Epochs` object, which we create with the `~mne.Epochs` class
# constructor. Here we'll also specify some data quality constraints: we'll
# reject any epoch where peak-to-peak signal amplitude is beyond reasonable
# limits for that channel type. This is done with a *rejection dictionary*; you
# may include or omit thresholds for any of the channel types present in your
# data. The values given here are reasonable for this particular dataset, but
# may need to be adapted for different hardware or recording conditions. For a
# more automated approach, consider using the `autoreject package`_.
reject_criteria = dict(mag=4000e-15, # 4000 fT
grad=4000e-13, # 4000 fT/cm
eeg=150e-6, # 150 µV
eog=250e-6) # 250 µV
###############################################################################
# We'll also pass the event dictionary as the ``event_id`` parameter (so we can
# work with easy-to-pool event labels instead of the integer event IDs), and
# specify ``tmin`` and ``tmax`` (the time relative to each event at which to
# start and end each epoch). As mentioned above, by default `~mne.io.Raw` and
# `~mne.Epochs` data aren't loaded into memory (they're accessed from disk only
# when needed), but here we'll force loading into memory using the
# ``preload=True`` parameter so that we can see the results of the rejection
# criteria being applied:
epochs = mne.Epochs(raw, events, event_id=event_dict, tmin=-0.2, tmax=0.5,
reject=reject_criteria, preload=True)
###############################################################################
# Next we'll pool across left/right stimulus presentations so we can compare
# auditory versus visual responses. To avoid biasing our signals to the left or
# right, we'll use `~mne.Epochs.equalize_event_counts` first to randomly sample
# epochs from each condition to match the number of epochs present in the
# condition with the fewest good epochs.
conds_we_care_about = ['auditory/left', 'auditory/right',
'visual/left', 'visual/right']
epochs.equalize_event_counts(conds_we_care_about) # this operates in-place
aud_epochs = epochs['auditory']
vis_epochs = epochs['visual']
del raw, epochs # free up memory
###############################################################################
# Like `~mne.io.Raw` objects, `~mne.Epochs` objects also have a number of
# built-in plotting methods. One is `~mne.Epochs.plot_image`, which shows each
# epoch as one row of an image map, with color representing signal magnitude;
# the average evoked response and the sensor location are shown below the
# image:
aud_epochs.plot_image(picks=['MEG 1332', 'EEG 021'])
##############################################################################
# .. note::
#
# Both `~mne.io.Raw` and `~mne.Epochs` objects have `~mne.Epochs.get_data`
# methods that return the underlying data as a
# :class:`NumPy array <numpy.ndarray>`. Both methods have a ``picks``
# parameter for subselecting which channel(s) to return; ``raw.get_data()``
# has additional parameters for restricting the time domain. The resulting
# matrices have dimension ``(n_channels, n_times)`` for `~mne.io.Raw` and
# ``(n_epochs, n_channels, n_times)`` for `~mne.Epochs`.
#
# Time-frequency analysis
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# The :mod:`mne.time_frequency` submodule provides implementations of several
# algorithms to compute time-frequency representations, power spectral density,
# and cross-spectral density. Here, for example, we'll compute for the auditory
# epochs the induced power at different frequencies and times, using Morlet
# wavelets. On this dataset the result is not especially informative (it just
# shows the evoked "auditory N100" response); see :ref:`here
# <inter-trial-coherence>` for a more extended example on a dataset with richer
# frequency content.
frequencies = np.arange(7, 30, 3)
power = mne.time_frequency.tfr_morlet(aud_epochs, n_cycles=2, return_itc=False,
freqs=frequencies, decim=3)
power.plot(['MEG 1332'])
###############################################################################
# Estimating evoked responses
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Now that we have our conditions in ``aud_epochs`` and ``vis_epochs``, we can
# get an estimate of evoked responses to auditory versus visual stimuli by
# averaging together the epochs in each condition. This is as simple as calling
# the `~mne.Epochs.average` method on the `~mne.Epochs` object, and then using
# a function from the :mod:`mne.viz` module to compare the global field power
# for each sensor type of the two `~mne.Evoked` objects:
aud_evoked = aud_epochs.average()
vis_evoked = vis_epochs.average()
mne.viz.plot_compare_evokeds(dict(auditory=aud_evoked, visual=vis_evoked),
legend='upper left', show_sensors='upper right')
###############################################################################
# We can also get a more detailed view of each `~mne.Evoked` object using other
# plotting methods such as `~mne.Evoked.plot_joint` or
# `~mne.Evoked.plot_topomap`. Here we'll examine just the EEG channels, and see
# the classic auditory evoked N100-P200 pattern over dorso-frontal electrodes,
# then plot scalp topographies at some additional arbitrary times:
# sphinx_gallery_thumbnail_number = 13
aud_evoked.plot_joint(picks='eeg')
aud_evoked.plot_topomap(times=[0., 0.08, 0.1, 0.12, 0.2], ch_type='eeg')
##############################################################################
# Evoked objects can also be combined to show contrasts between conditions,
# using the `mne.combine_evoked` function. A simple difference can be
# generated by passing ``weights=[1, -1]``. We'll then plot the difference wave
# at each sensor using `~mne.Evoked.plot_topo`:
evoked_diff = mne.combine_evoked([aud_evoked, vis_evoked], weights=[1, -1])
evoked_diff.pick_types(meg='mag').plot_topo(color='r', legend=False)
##############################################################################
# Inverse modeling
# ^^^^^^^^^^^^^^^^
#
# Finally, we can estimate the origins of the evoked activity by projecting the
# sensor data into this subject's :term:`source space` (a set of points either
# on the cortical surface or within the cortical volume of that subject, as
# estimated by structural MRI scans). MNE-Python supports lots of ways of doing
# this (dynamic statistical parametric mapping, dipole fitting, beamformers,
# etc.); here we'll use minimum-norm estimation (MNE) to generate a continuous
# map of activation constrained to the cortical surface. MNE uses a linear
# :term:`inverse operator` to project EEG+MEG sensor measurements into the
# source space. The inverse operator is computed from the
# :term:`forward solution` for this subject and an estimate of :ref:`the
# covariance of sensor measurements <tut_compute_covariance>`. For this
# tutorial we'll skip those computational steps and load a pre-computed inverse
# operator from disk (it's included with the :ref:`sample data
# <sample-dataset>`). Because this "inverse problem" is underdetermined (there
# is no unique solution), here we further constrain the solution by providing a
# regularization parameter specifying the relative smoothness of the current
# estimates in terms of a signal-to-noise ratio (where "noise" here is akin to
# baseline activity level across all of cortex).
# load inverse operator
inverse_operator_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis-meg-oct-6-meg-inv.fif')
inv_operator = mne.minimum_norm.read_inverse_operator(inverse_operator_file)
# set signal-to-noise ratio (SNR) to compute regularization parameter (λ²)
snr = 3.
lambda2 = 1. / snr ** 2
# generate the source time course (STC)
stc = mne.minimum_norm.apply_inverse(vis_evoked, inv_operator,
lambda2=lambda2,
method='MNE') # or dSPM, sLORETA, eLORETA
##############################################################################
# Finally, in order to plot the source estimate on the subject's cortical
# surface we'll also need the path to the sample subject's structural MRI files
# (the ``subjects_dir``):
# path to subjects' MRI files
subjects_dir = os.path.join(sample_data_folder, 'subjects')
# plot
stc.plot(initial_time=0.1, hemi='split', views=['lat', 'med'],
subjects_dir=subjects_dir)
##############################################################################
# The remaining tutorials have *much more detail* on each of these topics (as
# well as many other capabilities of MNE-Python not mentioned here:
# connectivity analysis, encoding/decoding models, lots more visualization
# options, etc). Read on to learn more!
#
# .. LINKS
#
# .. _`autoreject package`: http://autoreject.github.io/
|
Eric89GXL/mne-python
|
tutorials/intro/plot_10_overview.py
|
Python
|
bsd-3-clause
| 19,522
|
# stdlib
import copy
import mock
import unittest
# project
from utils.service_discovery.config_stores import get_config_store
from utils.service_discovery.consul_config_store import ConsulStore
from utils.service_discovery.etcd_config_store import EtcdStore
from utils.service_discovery.abstract_config_store import AbstractConfigStore
from utils.service_discovery.sd_backend import get_sd_backend
from utils.service_discovery.sd_docker_backend import SDDockerBackend
def clear_singletons(agentConfig):
get_config_store(agentConfig)._drop()
get_sd_backend(agentConfig)._drop()
class Response(object):
"""Dummy response class for mocking purpose"""
def __init__(self, content):
self.content = content
def json(self):
return self.content
def raise_for_status(self):
pass
def _get_container_inspect(c_id):
"""Return a mocked container inspect dict from self.container_inspects."""
for co, _, _ in TestServiceDiscovery.container_inspects:
if co.get('Id') == c_id:
return co
return None
def _get_conf_tpls(image_name, trace_config=False):
"""Return a mocked configuration template from self.mock_templates."""
return copy.deepcopy(TestServiceDiscovery.mock_templates.get(image_name)[0])
def _get_check_tpls(image_name, **kwargs):
if image_name in TestServiceDiscovery.mock_templates:
return [copy.deepcopy(TestServiceDiscovery.mock_templates.get(image_name)[0][0][0:3])]
elif image_name in TestServiceDiscovery.bad_mock_templates:
try:
return [copy.deepcopy(TestServiceDiscovery.bad_mock_templates.get(image_name))]
except Exception:
return None
def client_read(path):
"""Return a mocked string that would normally be read from a config store (etcd, consul...)."""
parts = path.split('/')
config_parts = ['check_names', 'init_configs', 'instances']
image, config_part = parts[-2], parts[-1]
return TestServiceDiscovery.mock_tpls.get(image)[0][config_parts.index(config_part)]
class TestServiceDiscovery(unittest.TestCase):
docker_container_inspect = {
u'Id': u'69ff25598b2314d1cdb7752cc3a659fb1c1352b32546af4f1454321550e842c0',
u'Image': u'6ffc02088cb870652eca9ccd4c4fb582f75b29af2879792ed09bb46fd1c898ef',
u'Name': u'/nginx',
u'NetworkSettings': {u'IPAddress': u'172.17.0.21', u'Ports': {u'443/tcp': None, u'80/tcp': None}}
}
kubernetes_container_inspect = {
u'Id': u'389dc8a4361f3d6c866e9e9a7b6972b26a31c589c4e2f097375d55656a070bc9',
u'Image': u'de309495e6c7b2071bc60c0b7e4405b0d65e33e3a4b732ad77615d90452dd827',
u'Name': u'/k8s_sentinel.38057ab9_redis-master_default_27b84e1e-a81c-11e5-8347-42010af00002_f70875a1',
u'Config': {u'ExposedPorts': {u'6379/tcp': {}}},
u'NetworkSettings': {u'IPAddress': u'', u'Ports': None}
}
malformed_container_inspect = {
u'Id': u'69ff25598b2314d1cdb7752cc3a659fb1c1352b32546af4f1454321550e842c0',
u'Image': u'6ffc02088cb870652eca9ccd4c4fb582f75b29af2879792ed09bb46fd1c898ef',
u'Name': u'/nginx'
}
container_inspects = [
# (inspect_dict, expected_ip, expected_port)
(docker_container_inspect, '172.17.0.21', ['80', '443']),
(kubernetes_container_inspect, None, ['6379']), # arbitrarily defined in the mocked pod_list
(malformed_container_inspect, None, KeyError)
]
# templates with variables already extracted
mock_templates = {
# image_name: ([(check_name, init_tpl, instance_tpl, variables)], (expected_config_template))
'image_0': (
[('check_0', {}, {'host': '%%host%%'}, ['host'])],
('check_0', {}, {'host': '127.0.0.1'})),
'image_1': (
[('check_1', {}, {'port': '%%port%%'}, ['port'])],
('check_1', {}, {'port': '1337'})),
'image_2': (
[('check_2', {}, {'host': '%%host%%', 'port': '%%port%%'}, ['host', 'port'])],
('check_2', {}, {'host': '127.0.0.1', 'port': '1337'})),
}
# raw templates coming straight from the config store
mock_tpls = {
# image_name: ('[check_name]', '[init_tpl]', '[instance_tpl]', expected_python_tpl_list)
'image_0': (
('["check_0"]', '[{}]', '[{"host": "%%host%%"}]'),
[('check_0', {}, {"host": "%%host%%"})]),
'image_1': (
('["check_1"]', '[{}]', '[{"port": "%%port%%"}]'),
[('check_1', {}, {"port": "%%port%%"})]),
'image_2': (
('["check_2"]', '[{}]', '[{"host": "%%host%%", "port": "%%port%%"}]'),
[('check_2', {}, {"host": "%%host%%", "port": "%%port%%"})]),
'bad_image_0': ((['invalid template']), []),
'bad_image_1': (('invalid template'), []),
'bad_image_2': (None, [])
}
bad_mock_templates = {
'bad_image_0': ('invalid template'),
'bad_image_1': [('invalid template')],
'bad_image_2': None
}
def setUp(self):
self.etcd_agentConfig = {
'service_discovery': True,
'service_discovery_backend': 'docker',
'sd_template_dir': '/datadog/check_configs',
'sd_config_backend': 'etcd',
'sd_backend_host': '127.0.0.1',
'sd_backend_port': '2380'
}
self.consul_agentConfig = {
'service_discovery': True,
'service_discovery_backend': 'docker',
'sd_template_dir': '/datadog/check_configs',
'sd_config_backend': 'consul',
'sd_backend_host': '127.0.0.1',
'sd_backend_port': '8500'
}
self.auto_conf_agentConfig = {
'service_discovery': True,
'service_discovery_backend': 'docker',
'sd_template_dir': '/datadog/check_configs',
'additional_checksd': '/etc/dd-agent/checks.d/',
}
self.agentConfigs = [self.etcd_agentConfig, self.consul_agentConfig, self.auto_conf_agentConfig]
# sd_backend tests
@mock.patch('utils.http.requests.get')
@mock.patch('utils.kubeutil.check_yaml')
def test_get_host(self, mock_check_yaml, mock_get):
kubernetes_config = {'instances': [{'kubelet_port': 1337}]}
pod_list = {
'items': [{
'status': {
'podIP': '127.0.0.1',
'containerStatuses': [
{'containerID': 'docker://389dc8a4361f3d6c866e9e9a7b6972b26a31c589c4e2f097375d55656a070bc9'}
]
}
}]
}
mock_check_yaml.return_value = kubernetes_config
mock_get.return_value = Response(pod_list)
for c_ins, expected_ip, _ in self.container_inspects:
with mock.patch.object(AbstractConfigStore, '__init__', return_value=None):
with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
with mock.patch('utils.kubeutil.get_conf_path', return_value=None):
sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
self.assertEqual(sd_backend._get_host(c_ins), expected_ip)
clear_singletons(self.auto_conf_agentConfig)
def test_get_ports(self):
with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
for c_ins, _, expected_ports in self.container_inspects:
sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
if isinstance(expected_ports, list):
self.assertEqual(sd_backend._get_ports(c_ins), expected_ports)
else:
self.assertRaises(expected_ports, sd_backend._get_ports, c_ins)
clear_singletons(self.auto_conf_agentConfig)
@mock.patch('docker.Client.inspect_container', side_effect=_get_container_inspect)
@mock.patch.object(SDDockerBackend, '_get_config_templates', side_effect=_get_conf_tpls)
def test_get_check_configs(self, mock_inspect_container, mock_get_conf_tpls):
"""Test get_check_config with mocked container inspect and config template"""
with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
with mock.patch.object(SDDockerBackend, '_get_host', return_value='127.0.0.1'):
with mock.patch.object(SDDockerBackend, '_get_ports', return_value=['1337']):
c_id = self.docker_container_inspect.get('Id')
for image in self.mock_templates.keys():
sd_backend = get_sd_backend(agentConfig=self.auto_conf_agentConfig)
self.assertEquals(
sd_backend._get_check_configs(c_id, image)[0],
self.mock_templates[image][1])
clear_singletons(self.auto_conf_agentConfig)
@mock.patch.object(AbstractConfigStore, 'get_check_tpls', side_effect=_get_check_tpls)
def test_get_config_templates(self, mock_get_check_tpls):
"""Test _get_config_templates with mocked get_check_tpls"""
with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
with mock.patch.object(EtcdStore, 'get_client', return_value=None):
with mock.patch.object(ConsulStore, 'get_client', return_value=None):
for agentConfig in self.agentConfigs:
sd_backend = get_sd_backend(agentConfig=agentConfig)
# normal cases
for image in self.mock_templates.keys():
template = sd_backend._get_config_templates(image)
expected_template = self.mock_templates.get(image)[0]
self.assertEquals(template, expected_template)
# error cases
for image in self.bad_mock_templates.keys():
self.assertEquals(sd_backend._get_config_templates(image), None)
clear_singletons(agentConfig)
def test_render_template(self):
"""Test _render_template"""
valid_configs = [
(({}, {'host': '%%host%%'}, {'host': 'foo'}),
({}, {'host': 'foo'})),
(({}, {'host': '%%host%%', 'port': '%%port%%'}, {'host': 'foo', 'port': '1337'}),
({}, {'host': 'foo', 'port': '1337'})),
(({'foo': '%%bar%%'}, {}, {'bar': 'w00t'}),
({'foo': 'w00t'}, {})),
(({'foo': '%%bar%%'}, {'host': '%%host%%'}, {'bar': 'w00t', 'host': 'localhost'}),
({'foo': 'w00t'}, {'host': 'localhost'}))
]
invalid_configs = [
({}, {'host': '%%host%%'}, {}), # no value to use
({}, {'host': '%%host%%'}, {'port': 42}), # the variable name doesn't match
({'foo': '%%bar%%'}, {'host': '%%host%%'}, {'host': 'foo'}) # not enough value/no matching var name
]
with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
for agentConfig in self.agentConfigs:
sd_backend = get_sd_backend(agentConfig=agentConfig)
for tpl, res in valid_configs:
init, instance, variables = tpl
config = sd_backend._render_template(init, instance, variables)
self.assertEquals(config, res)
for init, instance, variables in invalid_configs:
config = sd_backend._render_template(init, instance, variables)
self.assertEquals(config, None)
clear_singletons(agentConfig)
def test_fill_tpl(self):
"""Test _fill_tpl with mock _get_ports"""
valid_configs = [
# ((inspect, instance_tpl, variables, tags), (expected_instance_tpl, expected_var_values))
(
({}, {'host': 'localhost'}, [], None),
({'host': 'localhost'}, {})
),
(
({'NetworkSettings': {'IPAddress': '127.0.0.1'}},
{'host': '%%host%%', 'port': 1337}, ['host'], ['foo', 'bar:baz']),
({'host': '%%host%%', 'port': 1337, 'tags': ['foo', 'bar:baz']}, {'host': '127.0.0.1'})
),
(
({'NetworkSettings': {'IPAddress': '127.0.0.1', 'Ports': {'42/tcp': None, '22/tcp': None}}},
{'host': '%%host%%', 'port': '%%port_1%%', 'tags': ['env:test']},
['host', 'port_1'], ['foo', 'bar:baz']),
({'host': '%%host%%', 'port': '%%port_1%%', 'tags': ['env:test', 'foo', 'bar:baz']},
{'host': '127.0.0.1', 'port_1': '42'})
)
]
with mock.patch('utils.dockerutil.DockerUtil.client', return_value=None):
for ac in self.agentConfigs:
sd_backend = get_sd_backend(agentConfig=ac)
try:
for co in valid_configs:
inspect, tpl, variables, tags = co[0]
instance_tpl, var_values = sd_backend._fill_tpl(inspect, tpl, variables, tags)
for key in instance_tpl.keys():
if isinstance(instance_tpl[key], list):
self.assertEquals(len(instance_tpl[key]), len(co[1][0].get(key)))
for elem in instance_tpl[key]:
self.assertTrue(elem in co[1][0].get(key))
else:
self.assertEquals(instance_tpl[key], co[1][0].get(key))
self.assertEquals(var_values, co[1][1])
clear_singletons(ac)
except Exception:
clear_singletons(ac)
raise
# config_stores tests
def test_get_auto_config(self):
"""Test _get_auto_config"""
expected_tpl = {
'redis': ('redisdb', None, {"host": "%%host%%", "port": "%%port%%"}),
'consul': ('consul', None, {"url": "http://%%host%%:%%port%%", "catalog_checks": True, "new_leader_checks": True}),
'foobar': None
}
config_store = get_config_store(self.auto_conf_agentConfig)
for image in expected_tpl.keys():
config = config_store._get_auto_config(image)
self.assertEquals(config, expected_tpl.get(image))
@mock.patch.object(AbstractConfigStore, 'client_read', side_effect=client_read)
def test_get_check_tpls(self, mock_client_read):
"""Test get_check_tpls"""
valid_config = ['image_0', 'image_1', 'image_2']
invalid_config = ['bad_image_0', 'bad_image_1']
config_store = get_config_store(self.auto_conf_agentConfig)
for image in valid_config:
tpl = self.mock_tpls.get(image)[1]
self.assertEquals(tpl, config_store.get_check_tpls(image))
for image in invalid_config:
tpl = self.mock_tpls.get(image)[1]
self.assertEquals(tpl, config_store.get_check_tpls(image))
|
tebriel/dd-agent
|
tests/core/test_service_discovery.py
|
Python
|
bsd-3-clause
| 15,146
|
#! /usr/bin/env python
import os
import sys
import glob
version = (int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4]))
def substitute_file(name):
subst = ''
f = open(name)
for l in f:
if '#define LIBTORRENT_VERSION_MAJOR' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION_MAJOR %d\n' % version[0]
elif '#define LIBTORRENT_VERSION_MINOR' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION_MINOR %d\n' % version[1]
elif '#define LIBTORRENT_VERSION_TINY' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION_TINY %d\n' % version[2]
elif '#define LIBTORRENT_VERSION ' in l and name.endswith('.hpp'):
l = '#define LIBTORRENT_VERSION "%d.%d.%d.%d"\n' % (version[0], version[1], version[2], version[3])
elif 'AC_INIT([libtorrent-rasterbar]' in l and name.endswith('.ac'):
l = 'AC_INIT([libtorrent-rasterbar],[%d.%d.%d],[arvid@libtorrent.org],\n' % (version[0], version[1], version[2])
elif 'set (VERSION ' in l and name.endswith('.txt'):
l = 'set (VERSION "%d.%d.%d")\n' % (version[0], version[1], version[2])
elif ':Version: ' in l and (name.endswith('.rst') or name.endswith('.py')):
l = ':Version: %d.%d.%d\n' % (version[0], version[1], version[2])
elif 'VERSION = ' in l and name.endswith('Jamfile'):
l = 'VERSION = %d.%d.%d ;\n' % (version[0], version[1], version[2])
elif 'version=' in l and name.endswith('setup.py'):
l = "\tversion = '%d.%d.%d',\n" % (version[0], version[1], version[2])
elif "version = '" in l and name.endswith('setup.py'):
l = "\tversion = '%d.%d.%d',\n" % (version[0], version[1], version[2])
subst += l
f.close()
open(name, 'w+').write(subst)
substitute_file('include/libtorrent/version.hpp')
substitute_file('CMakeLists.txt')
substitute_file('configure.ac')
substitute_file('bindings/python/setup.py')
substitute_file('docs/gen_reference_doc.py')
for i in glob.glob('docs/*.rst'):
substitute_file(i)
substitute_file('Jamfile')
|
steeve/libtorrent
|
set_version.py
|
Python
|
bsd-3-clause
| 1,975
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2015 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Pause scene"""
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
from cocos.director import director
from cocos.layer import Layer, ColorLayer
from cocos.scene import Scene
import pyglet
from pyglet.gl import *
__pause_scene_generator__ = None
def get_pause_scene():
return __pause_scene_generator__()
def set_pause_scene_generator(generator):
global __pause_scene_generator__
__pause_scene_generator__ = generator
def default_pause_scene():
w, h = director.window.width, director.window.height
texture = pyglet.image.Texture.create_for_size(
GL_TEXTURE_2D, w, h, GL_RGBA)
texture.blit_into(pyglet.image.get_buffer_manager().get_color_buffer(), 0, 0, 0)
return PauseScene(texture.get_region(0, 0, w, h),
ColorLayer(25, 25, 25, 205), PauseLayer())
set_pause_scene_generator(default_pause_scene)
class PauseScene(Scene):
"""Pause Scene"""
def __init__(self, background, *layers):
super(PauseScene, self).__init__(*layers)
self.bg = background
self.width, self.height = director.get_window_size()
def draw(self):
self.bg.blit(0, 0, width=self.width, height=self.height)
super(PauseScene, self).draw()
class PauseLayer(Layer):
"""Layer that shows the text 'PAUSED'
"""
is_event_handler = True #: enable pyglet's events
def __init__(self):
super(PauseLayer, self).__init__()
x, y = director.get_window_size()
ft = pyglet.font.load('Arial', 36)
self.text = pyglet.font.Text(ft,
'PAUSED',
halign=pyglet.font.Text.CENTER)
self.text.x = x // 2
self.text.y = y // 2
def draw(self):
self.text.draw()
def on_key_press(self, k, m):
if k == pyglet.window.key.P and m & pyglet.window.key.MOD_ACCEL:
director.pop()
return True
|
dangillet/cocos
|
cocos/scenes/pause.py
|
Python
|
bsd-3-clause
| 3,820
|
# -*- coding: utf-8 -*-
# Copyright 2010-2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Copy Qt frameworks to the target application's frameworks directory.
Typical usage:
% python copy_qt_frameworks.py --qtdir=/path/to/qtdir/ \
--target=/path/to/target.app/Contents/Frameworks/
"""
__author__ = "horo"
import optparse
import os
from copy_file import CopyFiles
from util import PrintErrorAndExit
from util import RunOrDie
def ParseOption():
"""Parse command line options."""
parser = optparse.OptionParser()
parser.add_option('--qtdir', dest='qtdir')
parser.add_option('--target', dest='target')
(opts, _) = parser.parse_args()
return opts
def main():
opt = ParseOption()
if not opt.qtdir:
PrintErrorAndExit('--qtdir option is mandatory.')
if not opt.target:
PrintErrorAndExit('--target option is mandatory.')
qtdir = os.path.abspath(opt.qtdir)
target = os.path.abspath(opt.target)
# Copies QtCore. For codesign, Info.plist should be copied to Resources/.
CopyFiles(['%s/lib/QtCore.framework/Versions/4/QtCore' % qtdir],
'%s/QtCore.framework/Versions/4/QtCore' % target)
CopyFiles(['%s/lib/QtCore.framework/Contents/Info.plist' % qtdir],
'%s/QtCore.framework/Resources/' % target)
# Copies QtGui. For codesign, Info.plist should be copied to Resources/.
CopyFiles(['%s/lib/QtGui.framework/Versions/4/QtGui' % qtdir],
'%s/QtGui.framework/Versions/4/QtGui' % target)
CopyFiles(['%s/lib/QtGui.framework/Contents/Info.plist' % qtdir],
'%s/QtGui.framework/Resources/' % target)
# Copies Resources of QtGui
CopyFiles(['%s/lib/QtGui.framework/Versions/4/Resources' % qtdir],
'%s/QtGui.framework/Resources' % target,
recursive=True)
# Changes QtGui id
cmd = ["install_name_tool", "-id",
"@executable_path/../Frameworks/QtGui.framework/Versions/4/QtGui",
"%s/QtGui.framework/Versions/4/QtGui" % target]
RunOrDie(cmd)
# Changes QtCore id
cmd = ["install_name_tool", "-id",
"@executable_path/../Frameworks/QtCore.framework/Versions/4/QtCore",
'%s/QtCore.framework/Versions/4/QtCore' % target]
RunOrDie(cmd)
# Changes the reference to QtCore framework from QtGui
cmd = ["install_name_tool", "-change",
"%s/lib/QtCore.framework/Versions/4/QtCore" % qtdir,
"@executable_path/../Frameworks/QtCore.framework/Versions/4/QtCore",
"%s/QtGui.framework/Versions/4/QtGui" % target]
RunOrDie(cmd)
if __name__ == '__main__':
main()
|
takahashikenichi/mozc
|
src/build_tools/copy_qt_frameworks_mac.py
|
Python
|
bsd-3-clause
| 4,010
|
import sys
import inspect
from functools import partial
__all__ = ['decorator', 'wraps', 'unwrap', 'ContextDecorator', 'contextmanager']
def decorator(deco):
# Any arguments after first become decorator arguments
has_args = get_argcounts(deco) != (1, False, False)
if has_args:
# A decorator with arguments is essentialy a decorator fab
def decorator_fab(*dargs, **dkwargs):
return make_decorator(deco, dargs, dkwargs)
return wraps(deco)(decorator_fab)
else:
return wraps(deco)(make_decorator(deco))
def make_decorator(deco, dargs=(), dkwargs={}):
def _decorator(func):
def wrapper(*args, **kwargs):
call = Call(func, args, kwargs)
return deco(call, *dargs, **dkwargs)
return wraps(func)(wrapper)
return _decorator
class Call(object):
"""
A call object to pass as first argument to decorator.
Call object is just a proxy for decorated function
with call arguments saved in its attributes.
"""
def __init__(self, func, args, kwargs):
self._func, self._args, self._kwargs = func, args, kwargs
def __call__(self, *a, **kw):
if not a and not kw:
return self._func(*self._args, **self._kwargs)
else:
return self._func(*(self._args + a), **dict(self._kwargs, **kw))
def __getattr__(self, name):
try:
res = self.__dict__[name] = arggetter(self._func)(name, self._args, self._kwargs)
return res
except TypeError as e:
raise AttributeError(*e.args)
def get_argcounts(func):
spec = inspect.getargspec(func)
return (len(spec.args), bool(spec.varargs), bool(spec.keywords))
def get_argnames(func):
func = getattr(func, '__original__', None) or unwrap(func)
return func.__code__.co_varnames[:func.__code__.co_argcount]
def arggetter(func, _cache={}):
if func in _cache:
return _cache[func]
argnames = get_argnames(func)
argcount = len(argnames)
def get_arg(name, args, kwargs):
if name not in argnames:
raise TypeError("%s() doesn't have argument named %s" % (func.__name__, name))
else:
if name in kwargs:
return kwargs[name]
elif name in argnames:
index = argnames.index(name)
if index < len(args):
return args[index]
else:
return func.__defaults__[index - argcount]
_cache[func] = get_arg
return get_arg
### Backport python 3.4 contextlib utilities
### namely ContextDecorator and contextmanager (also producing decorator)
if sys.version_info >= (3, 4):
from contextlib import ContextDecorator, contextmanager
else:
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManager(ContextDecorator):
"""Helper for @contextmanager decorator."""
def __init__(self, func, *args, **kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
# Issue 19330: ensure context manager instances have good docstrings
doc = getattr(func, "__doc__", None)
if doc is None:
doc = type(self).__doc__
self.__doc__ = doc
# Unfortunately, this still doesn't provide good help output when
# inspecting the created context manager instances, since pydoc
# currently bypasses the instance docstring and shows the docstring
# for the class instead.
# See http://bugs.python.org/issue19404 for more details.
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, *self.args, **self.kwds)
def __enter__(self):
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""
A decorator helping to create context managers. Resulting functions also
behave as decorators.
A simple example::
@contextmanager
def tag(name):
print "<%s>" % name,
yield
print "</%s>" % name
with tag("h1"):
print "foo",
# -> <h1> foo </h1>
Using as decorator::
@tag('strong')
def shout(text):
print text.upper()
shout('hooray')
# -> <strong> HOORAY </strong>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, *args, **kwds)
return helper
### Fix functools.wraps to make it safely work with callables without all the attributes
### We also add __original__ to it
from functools import WRAPPER_ASSIGNMENTS, WRAPPER_UPDATES
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Set it after to not gobble it in __dict__ update
wrapper.__wrapped__ = wrapped
# Set an original ref for faster and more convenient access
wrapper.__original__ = getattr(wrapped, '__original__', None) or unwrap(wrapped)
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""
An utility to pass function metadata from wrapped function to a wrapper.
Copies all function attributes including ``__name__``, ``__module__`` and
``__doc__``.
In addition adds ``__wrapped__`` attribute referring to the wrapped function
and ``__original__`` attribute referring to innermost wrapped one.
Mostly used to create decorators::
def some_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
do_something(*args, **kwargs)
return func(*args, **kwargs)
But see also :func:`decorator` for that.
"""
return partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
### Backport of python 3.4 inspect.unwrap utility
try:
from inspect import unwrap
except ImportError:
# A simplified version, no stop keyword-only argument
def unwrap(func):
"""
Get the object wrapped by ``func``.
Follows the chain of :attr:`__wrapped__` attributes returning the last
object in the chain.
"""
f = func # remember the original func for error reporting
memo = set([id(f)]) # Memoise by id to tolerate non-hashable objects
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
id_func = id(func)
if id_func in memo:
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo.add(id_func)
return func
|
musicpax/funcy
|
funcy/decorators.py
|
Python
|
bsd-3-clause
| 9,692
|
import numpy as np
from pyquante2.dft.functionals import xs,cvwn5
# Maybe move these to the functionals module and import from there?
xname = dict(lda=xs,xs=xs,svwn=xs)
cname = dict(lda=cvwn5,svwn=cvwn5,xs=None)
def get_xc(grid,D,**kwargs):
xcname = kwargs.get('xcname','lda')
# Does not work on either gradient corrected functionals or spin-polarized functionals yet.
xfunc = xname[xcname]
cfunc = cname[xcname]
rho = grid.getdens(D)
fx,dfxa = xfunc(rho)
if cfunc:
fc,dfca,dfcb = cfunc(rho,rho)
else:
fc=dfca=dfcb=0
w = grid.points[:,3]
Vxc = np.einsum('g,g,gI,gJ->IJ',w,dfxa+dfca,grid.bfamps,grid.bfamps)
# The fx comes from either the up or the down spin, whereas the fc comes from
# both (which is why x is called with either one, and c is called with both
Exc = np.dot(w,2*fx+fc)
return Exc,Vxc
|
berquist/pyquante2
|
pyquante2/dft/dft.py
|
Python
|
bsd-3-clause
| 899
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_tests.gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class ContextLostExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('ContextLost.WebGLContextLostFromGPUProcessExit',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
# AMD Radeon 6450
self.Fail('ContextLost.WebGLContextLostFromGPUProcessExit',
['linux', ('amd', 0x6779)], bug=479975)
# Win7 bots
self.Flaky('ContextLost.WebGLContextLostFromGPUProcessExit',
['win7'], bug=603329)
# Win8 Release and Debug NVIDIA bots.
self.Skip('ContextLost.WebGLContextLostFromSelectElement',
['win8', 'nvidia'], bug=524808)
# Flaky on Mac 10.7 and 10.8 resulting in crashes during browser
# startup, so skip this test in those configurations.
self.Skip('ContextLost.WebGLContextLostFromSelectElement',
['mountainlion', 'debug'], bug=497411)
self.Skip('ContextLost.WebGLContextLostFromSelectElement',
['lion', 'debug'], bug=498149)
# 'Browser must support tab control' raised on Android
self.Fail('GpuCrash.GPUProcessCrashesExactlyOnce',
['android'], bug=609629)
self.Fail('ContextLost.WebGLContextLostFromGPUProcessExit',
['android'], bug=609629)
self.Fail('ContextLost.WebGLContextLostInHiddenTab',
['android'], bug=609629)
# Nexus 6
# The Nexus 6 times out on these tests while waiting for the JS to complete
self.Fail('ContextLost.WebGLContextLostFromLoseContextExtension',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=611906)
self.Fail('ContextLost.WebGLContextLostFromQuantity',
['android', ('qualcomm', 'Adreno (TM) 420')], bug=611906)
|
wuhengzhi/chromium-crosswalk
|
content/test/gpu/gpu_tests/context_lost_expectations.py
|
Python
|
bsd-3-clause
| 1,984
|
""" This module loads all the classes from the VTK IO library into its
namespace. This is a required module."""
import os
if os.name == 'posix':
from libvtkIOPython import *
else:
from vtkIOPython import *
|
naucoin/VTKSlicerWidgets
|
Wrapping/Python/vtk/io.py
|
Python
|
bsd-3-clause
| 217
|
# -*- coding: utf-8 -*-
"""
lantz.drivers.legacy.olympus.ixbx
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When talking about the z-axis of a microscope, use "near" and "far" instead of "up" and "down." "Nearer" always means the objective ends closer to the sample; "farther" means the objective ends farther away. On an inverted microscope, "near" is up and "far" is down; on an upright microscope it is exactly the reverse. Better to use "near" and "far" to avoid confusion.
You can always get the current state of the system by sending the command you would use to change that state followed by ?. For example, to get the current objective position, send 1OB?. The microscope returns 1OB 3, say, if the current objective is position 3 on the nosepiece.
The microscope only understands positive integers, no negative numbers, no floating point. All distances are sent as positive integers measured in hundredths of a micron. All voltages are sent as tenths of a volt. Where negative numbers are needed, such as to specify relative motion, an extra argument is used to tell the microscope the sign of the number.
Sources::
- Olympus IX-81 Chassis Commands `link <http://madhadron.com/?p=89>`_
- Labview IX BX Series Driver `link <http://sine.ni.com/apps/utf8/niid_web_display.download_page?p_id_guid=0472CB8CEE4473B8E0440003BA7CCD71>`_
- Lantz reverse engineering
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from lantz import Feat, Action, Q_
from lantz.errors import InstrumentError
from lantz.drivers.legacy.serial import SerialDriver
# Physical units used by the IX/BX microscopes
DECIVOLT = Q_(0.1, 'V')
ZSTEP = Q_(0.01, 'micrometer')
# Booleans mappings used by the IX/BX microscopes
ON_OFF = {True: 'ON', False: 'OFF'}
IN_OUT = {True: 'IN', False: 'OUT'}
CLOSED_OPEN = {True: 'IN', False: 'OUT'}
ONE_ZERO = {True: '1', False: '0'}
ONE_TWO = {True: '1', False: '2'}
FH_FRM = {True: 'FH', False: 'FRM'}
EPI_DIA = {True: 'EPI', False: 'DIA'}
INTSTR = (int, str)
def ofeat(command, doc, **kwargs):
"""Build Feat
:param command: command root (without ?)
:param doc: docstring to be applied to the feature
"""
def _get(self):
response = self.query(command + '?')
return response
def _set(self, value):
self.query('{} {}'.format(command, value))
return Feat(_get, _set, doc=doc, **kwargs)
class IXBX(SerialDriver):
""" IX or BX Olympus microscope body.
"""
RECV_TERMINATION = '\r\n'
SEND_TERMINATION = '\r\n'
def __init__(self, port=1, baudrate=19200, bytesize=8, parity='Even',
stopbits=1, flow=0, timeout=None, write_timeout=None):
super().__init__(port, timeout=timeout, write_timeout=write_timeout,
baudrate=baudrate, bytesize=bytesize, parity=parity,
stopbits=stopbits, flow=flow)
self.send('1LOG IN\n')
self.send('2LOG IN')
def query(self, command, *, send_args=(None, None), recv_args=(None, None)):
"""Query the instrument and parse the response.
:raises: InstrumentError
"""
response = super().query(command, send_args=recv_args, recv_args=recv_args)
command = command.strip()[0]
if response in ('1x', '2x'):
raise InstrumentError("Unknown command: '{}'".format(command))
if not response.startswith(command):
raise InstrumentError("Unknown response: '{}'".format(response))
if response == 'X' or 'x':
raise InstrumentError('Unable to set')
elif not response == '+':
raise InstrumentError("Unknown response: '{}'".format(response))
return response
@Feat(read_once=True)
def idn(self):
"""Microscope identification
"""
return parse_response(self.query('1UNIT?'))
fluo_shutter = ofeat('1LED',
'External shutter for the fluorescent light source',
values=ONE_ZERO)
lamp_epi_enabled = ofeat('1LMPSEL',
'Illumination source lamp.',
values=EPI_DIA)
lamp_enabled = ofeat('1LMPSW',
'Turn the currently selected lamp onf and off',
values=ON_OFF)
lamp_intensity = ofeat('1LMP',
'Transmitted light intensity',
procs=(INTSTR, ))
def lamp_status(self):
#LMPSTS OK, X
pass
objective = ofeat('1OB',
'Objective nosepiece position',
procs=(INTSTR, ))
body_locked = ofeat('1LOG',
'Turn the currently selected lamp on and off',
values=ON_OFF)
focus_locked = ofeat('2LOG',
'Turn the currently selected lamp on and off',
values=ON_OFF)
@Feat(units=(ZSTEP, ZSTEP))
def soft_limits(self):
near = self.query('2NEARLMT?')
far = self.query('2FARLMT?')
return near, far
@soft_limits.setter
def soft_limits(self, near, far):
self.query('2NEARLMT {:d}'.format(near))
self.query('2FARLMT {:d}'.format(far))
move_to_start_enabled = ofeat('INITRET',
'Sets / cancels returning operation to the start '
'position after initializing the origin.',
values=ON_OFF)
jog_enabled = ofeat('JOG', 'Jog enabled', values=ON_OFF)
jog_sensitivity = ofeat('JOGSNS',' Jog sensitivity', procs=(INTSTR, ))
jog_dial = ofeat('JOGSEL', 'Jog selection (Handle/BLA) ???', values=FH_FRM)
jog_limit_enabled = ofeat('joglmt', 'Jog limit enabled', values=ON_OFF)
@Feat()
def movement_status(self):
return self.query('ZDRV?')
@Action(units=ZSTEP)
def move_relative(self, distance):
if distance == 0:
return
elif distance < 0:
distance = -distance
direction = 'N'
else:
direction = 'F'
self.query('2MOV {:s} {:d}'.format(distance, direction))
@Feat(units=ZSTEP)
def z(self):
"""Position of the objective.
"""
# OPTIMAL?? start accel, speed tenth of microns/s, end accel
return int(self.query('2POS'))
@z.setter
def z(self, value):
# OPTIMAL?? start accel, speed tenth of microns/s, end accel
self.query('2MOV D {:d}'.format(value))
def stop(self):
"""Stop any currently executing motion
"""
# Stop any currently executing motion. Always responds with 2STOP +.
# If there is a 2MOV command in progress,
# it also aborts and returns an error condition with 2MOV !,E02133.
self.query('2STOP')
def init_origin(self):
"""Init origin
"""
#INITORG
pass
class IX2(IXBX):
""" Olympus IX2 Body
"""
bottom_port_closed = ofeat('1BPORT', 'Bottom port', values=CLOSED_OPEN)
shutter1_closed = ofeat('SHUT1', 'Shutter', values=IN_OUT)
shutter2_closed = ofeat('SHUT2', 'Shutter', values=IN_OUT)
filter_wheel = ofeat('FW', 'Filter wheel position', procs=(INTSTR, ))
condensor = ofeat('CD', 'Condensor position', procs=(INTSTR, ))
mirror_unit = ofeat('MU', 'Mirror unit position', procs=(INTSTR, ))
camera_port_enabled= ofeat('PRISM', 'Prism position', values=ONE_TWO)
class BX2A(IXBX):
""" Olympus BX2A Body
"""
shutter_closed = ofeat('SHUTTER', 'Shutter RFAA', values=IN_OUT)
aperture_stop_diameter = ofeat('EAS', 'Aperture stop diameter (EPI AS RLAA)', procs=(INTSTR, ))
aperture_stop_diameter = ofeat('DAS', 'Aperture stop diameter (DIA AS UCD)', procs=(INTSTR, ))
condenser_top_lens_enabled = ofeat('CDTOP', 'Condenser top lens (UCD)', values=IN_OUT)
turret = ofeat('TURRET', 'Turret position (UCD)', procs=(INTSTR, ))
cube = ofeat('CUBE', 'Cube position (RFAA/RLAA)', procs=(INTSTR, ))
configure_filterwheel = ofeat('FW', 'Configure filterwheel', procs=(INTSTR, ))
|
varses/awsch
|
lantz/drivers/legacy/olympus/ixbx.py
|
Python
|
bsd-3-clause
| 8,205
|
from unicodecsv import DictReader
class CSVImporter(object):
""" A CSV-backed resource with the datas in it. """
def __init__(self, fh):
self.reader = DictReader(fh)
self.data = list(self.reader)
@property
def headers(self):
headers = set()
for row in self.data:
headers = headers.union(row.keys())
return headers
def __len__(self):
return len(self.data)
def __iter__(self):
return self.data.__iter__()
|
CodeForAfrica/grano
|
grano/lib/data.py
|
Python
|
mit
| 502
|
from __future__ import absolute_import, print_function, division
import petl as etl
table = [['foo', 'bar'],
['a', 1],
['b', None]]
# raises exception under Python 3
etl.select(table, 'bar', lambda v: v > 0)
# no error under Python 3
etl.selectgt(table, 'bar', 0)
# or ...
etl.select(table, 'bar', lambda v: v > etl.Comparable(0))
|
psnj/petl
|
examples/comparison.py
|
Python
|
mit
| 353
|
from chainer import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
import numpy
class ClippedReLU(function.Function):
"""Clipped Rectifier Unit function.
Clipped ReLU is written as :math:`ClippedReLU(x, z) = \min(\max(0, x), z)`,
where :math:`z(>0)` is a parameter to cap return value of ReLU.
"""
def __init__(self, z):
if not isinstance(z, float):
raise TypeError('z must be float value')
# z must be positive.
assert z > 0
self.cap = z
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(x_type.dtype == numpy.float32)
def forward_cpu(self, x):
return utils.force_array(numpy.minimum(
numpy.maximum(0, x[0]), self.cap)).astype(numpy.float32),
def backward_cpu(self, x, gy):
return utils.force_array(
gy[0] * (0 < x[0]) * (x[0] < self.cap)).astype(numpy.float32),
def forward_gpu(self, x):
return cuda.elementwise(
'T x, T cap', 'T y', 'y = min(max(x, (T)0), cap)',
'clipped_relu_fwd')(x[0], self.cap),
def backward_gpu(self, x, gy):
gx = cuda.elementwise(
'T x, T gy, T z', 'T gx',
'gx = ((x > 0) and (x < z))? gy : 0',
'clipped_relu_bwd')(x[0], gy[0], self.cap)
return gx,
def clipped_relu(x, z=20.0):
"""Clipped Rectifier Unit function.
This function is expressed as :math:`ClippedReLU(x, z)
= \min(\max(0, x), z)`, where :math:`z(>0)` is a clipping value.
Args:
x (~chainer.Variable): Input variable.
z (float): Clipping value. (default = 20.0)
Returns:
~chainer.Variable: Output variable.
"""
return ClippedReLU(z)(x)
|
masia02/chainer
|
chainer/functions/clipped_relu.py
|
Python
|
mit
| 1,841
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
}
complete_apps = ['website']
|
r-singh/Test2
|
webapp_project/website/migrations/0003_initial.py
|
Python
|
mit
| 351
|
# $Id$
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This module contains practical examples of Docutils client code.
Importing this module from client code is not recommended; its contents are
subject to change in future Docutils releases. Instead, it is recommended
that you copy and paste the parts you need into your own code, modifying as
necessary.
"""
from docutils import core, io
def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=True,
initial_header_level=1):
"""
Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts
def html_body(input_string, source_path=None, destination_path=None,
input_encoding='unicode', output_encoding='unicode',
doctitle=True, initial_header_level=1):
"""
Given an input string, returns an HTML fragment as a string.
The return value is the contents of the <body> element.
Parameters (see `html_parts()` for the remainder):
- `output_encoding`: The desired encoding of the output. If a Unicode
string is desired, use the default value of "unicode" .
"""
parts = html_parts(
input_string=input_string, source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding, doctitle=doctitle,
initial_header_level=initial_header_level)
fragment = parts['html_body']
if output_encoding != 'unicode':
fragment = fragment.encode(output_encoding)
return fragment
def internals(input_string, source_path=None, destination_path=None,
input_encoding='unicode', settings_overrides=None):
"""
Return the document tree and publisher, for exploring Docutils internals.
Parameters: see `html_parts()`.
"""
if settings_overrides:
overrides = settings_overrides.copy()
else:
overrides = {}
overrides['input_encoding'] = input_encoding
output, pub = core.publish_programmatically(
source_class=io.StringInput, source=input_string,
source_path=source_path,
destination_class=io.NullOutput, destination=None,
destination_path=destination_path,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='null',
settings=None, settings_spec=None, settings_overrides=overrides,
config_section=None, enable_exit_status=None)
return pub.writer.document, pub
|
Lyleo/OmniMarkupPreviewer
|
OmniMarkupLib/Renderers/libs/python3/docutils/examples.py
|
Python
|
mit
| 3,913
|
from django.contrib.auth.models import Permission
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from cms.api import create_page
from cms.constants import PUBLISHER_STATE_DIRTY
from cms.models import Page
from cms.test_utils.project.extensionapp.models import MyPageExtension, MyTitleExtension
from cms.test_utils.testcases import SettingsOverrideTestCase as TestCase
from cms.extensions import extension_pool
from cms.extensions import TitleExtension
from cms.extensions import PageExtension
from cms.tests import AdminTestsBase
from cms.compat import get_user_model
class ExtensionsTestCase(TestCase):
def test_register_extension(self):
initial_extension_count = len(extension_pool.page_extensions)
# --- None extension registering -----------------------------
from cms.exceptions import SubClassNeededError
none_extension = self.get_none_extension_class()
self.assertRaises(SubClassNeededError, extension_pool.register, none_extension)
self.assertEqual(len(extension_pool.page_extensions), initial_extension_count)
self.assertEqual(len(extension_pool.title_extensions), initial_extension_count)
# --- Page registering ---------------------------------------
page_extension = self.get_page_extension_class()
# register first time
extension_pool.register(page_extension)
self.assertEqual(len(extension_pool.page_extensions), initial_extension_count+1)
# register second time
extension_pool.register(page_extension)
self.assertEqual(len(extension_pool.page_extensions), initial_extension_count+1)
self.assertIs(extension_pool.signaling_activated, True)
# --- Title registering --------------------------------------
title_extension = self.get_title_extension_class()
# register first time
extension_pool.register(title_extension)
self.assertEqual(len(extension_pool.title_extensions), initial_extension_count+1)
# register second time
extension_pool.register(title_extension)
self.assertEqual(len(extension_pool.title_extensions), initial_extension_count+1)
self.assertIs(extension_pool.signaling_activated, True)
# --- Unregister ---------------------------------------------
extension_pool.unregister(page_extension)
self.assertEqual(len(extension_pool.page_extensions), initial_extension_count)
extension_pool.unregister(title_extension)
self.assertEqual(len(extension_pool.title_extensions), initial_extension_count)
# Unregister an object that is not registered yet
extension_pool.unregister(page_extension)
extension_pool.unregister(title_extension)
def get_page_extension_class(self):
from django.db import models
class TestPageExtension(PageExtension):
content = models.CharField('Content', max_length=50)
return TestPageExtension
def get_title_extension_class(self):
from django.db import models
class TestTitleExtension(TitleExtension):
content = models.CharField('Content', max_length=50)
return TestTitleExtension
def get_none_extension_class(self):
class TestNoneExtension(object):
pass
return TestNoneExtension
def test_publish_page_extension(self):
page = create_page('Test Page Extension', "nav_playground.html", "en")
page_extension = MyPageExtension(extended_object=page, extra='page extension 1')
page_extension.save()
page.mypageextension = page_extension
# publish first time
page.publish('en')
self.assertEqual(page_extension.extra, page.publisher_public.mypageextension.extra)
self.assertEqual(page.get_publisher_state('en'), 0)
# change and publish again
page = Page.objects.get(pk=page.pk)
page_extension = page.mypageextension
page_extension.extra = 'page extension 1 - changed'
page_extension.save()
self.assertEqual(page.get_publisher_state('en', True), PUBLISHER_STATE_DIRTY)
page.publish('en')
self.assertEqual(page.get_publisher_state('en', True), 0)
# delete
page_extension.delete()
self.assertFalse(MyPageExtension.objects.filter(pk=page_extension.pk).exists())
self.assertEqual(page.get_publisher_state('en', True), PUBLISHER_STATE_DIRTY)
def test_publish_title_extension(self):
page = create_page('Test Title Extension', "nav_playground.html", "en")
title = page.get_title_obj()
title_extension = MyTitleExtension(extended_object=title, extra_title='title extension 1')
title_extension.save()
page.mytitleextension = title_extension
# publish first time
page.publish('en')
# import ipdb; ipdb.set_trace()
self.assertEqual(page.get_publisher_state('en'), 0)
self.assertEqual(title_extension.extra_title, page.publisher_public.get_title_obj().mytitleextension.extra_title)
# change and publish again
page = Page.objects.get(pk=page.pk)
title = page.get_title_obj()
title_extension = title.mytitleextension
title_extension.extra_title = 'title extension 1 - changed'
title_extension.save()
self.assertEqual(page.get_publisher_state('en', True), PUBLISHER_STATE_DIRTY)
page.publish('en')
self.assertEqual(page.get_publisher_state('en', True), 0)
# delete
title_extension.delete()
self.assertFalse(MyTitleExtension.objects.filter(pk=title_extension.pk).exists())
class ExtensionAdminTestCase(AdminTestsBase):
def setUp(self):
User = get_user_model()
self.admin, self.normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.no_page_permission_user = User.objects.create_user('no_page_permission', 'test2@test.com', 'test2@test.com')
else:
self.no_page_permission_user = User.objects.create_user('no_page_permission', 'test2@test.com', 'no_page_permission')
self.no_page_permission_user.is_staff = True
self.no_page_permission_user.is_active = True
self.no_page_permission_user.save()
[self.no_page_permission_user.user_permissions.add(p) for p in Permission.objects.filter(
codename__in=[
'change_mypageextension', 'change_mytitleextension',
'add_mypageextension', 'add_mytitleextension',
'delete_mypageextension', 'delete_mytitleextension',
]
)]
self.site = Site.objects.get(pk=1)
self.page = create_page(
'My Extension Page', 'nav_playground.html', 'en',
site=self.site, created_by=self.admin)
self.page_title = self.page.get_title_obj()
self.page_extension = MyPageExtension.objects.create(
extended_object=self.page,
extra="page extension text")
self.title_extension = MyTitleExtension.objects.create(
extended_object=self.page.get_title_obj(),
extra_title="title extension text")
self.page_without_extension = create_page(
'A Page', 'nav_playground.html', 'en',
site=self.site, created_by=self.admin)
self.page_title_without_extension = self.page_without_extension.get_title_obj()
def test_admin_page_extension(self):
with self.login_user_context(self.admin):
# add a new extension
response = self.client.get(
reverse('admin:extensionapp_mypageextension_add') + '?extended_object=%s' % self.page_without_extension.pk
)
self.assertEqual(response.status_code, 200)
# make sure there is no extension yet
self.assertFalse(MyPageExtension.objects.filter(extended_object=self.page_without_extension).exists())
post_data = {
'extra': 'my extra'
}
response = self.client.post(
reverse('admin:extensionapp_mypageextension_add') + '?extended_object=%s' % self.page_without_extension.pk,
post_data, follow=True
)
created_page_extension = MyPageExtension.objects.get(extended_object=self.page_without_extension)
# can delete extension
response = self.client.post(
reverse('admin:extensionapp_mypageextension_delete', args=(created_page_extension.pk,)),
{'post': 'yes'}, follow=True
)
self.assertFalse(MyPageExtension.objects.filter(extended_object=self.page_without_extension).exists())
# accessing the add view on a page that already has an extension should redirect
response = self.client.get(
reverse('admin:extensionapp_mypageextension_add') + '?extended_object=%s' % self.page.pk
)
self.assertRedirects(response, reverse('admin:extensionapp_mypageextension_change', args=(self.page_extension.pk,)))
# saving an extension should work without the GET parameter
post_data = {
'extra': 'my extra text'
}
self.client.post(
reverse('admin:extensionapp_mypageextension_change', args=(self.page_extension.pk,)),
post_data, follow=True
)
self.assertTrue(MyPageExtension.objects.filter(extra='my extra text', pk=self.page_extension.pk).exists())
with self.login_user_context(self.no_page_permission_user):
# can't save if user does not have permissions to change the page
post_data = {
'extra': 'try to change extra text'
}
response = self.client.post(
reverse('admin:extensionapp_mypageextension_change', args=(self.page_extension.pk,)),
post_data, follow=True
)
self.assertEqual(response.status_code, 403)
# can't delete without page permission
response = self.client.post(
reverse('admin:extensionapp_mypageextension_delete', args=(self.page_extension.pk,)),
{'post': 'yes'}, follow=True
)
self.assertEqual(response.status_code, 403)
self.assertTrue(MyPageExtension.objects.filter(extended_object=self.page).exists())
def test_admin_title_extension(self):
with self.login_user_context(self.admin):
# add a new extension
response = self.client.get(
reverse('admin:extensionapp_mytitleextension_add') + '?extended_object=%s' % self.page_title_without_extension.pk
)
self.assertEqual(response.status_code, 200)
# make sure there is no extension yet
self.assertFalse(MyTitleExtension.objects.filter(extended_object=self.page_title_without_extension).exists())
post_data = {
'extra_title': 'my extra title'
}
self.client.post(
reverse('admin:extensionapp_mytitleextension_add') + '?extended_object=%s' % self.page_title_without_extension.pk,
post_data, follow=True
)
created_title_extension = MyTitleExtension.objects.get(extended_object=self.page_title_without_extension)
# can delete extension
self.client.post(
reverse('admin:extensionapp_mytitleextension_delete', args=(created_title_extension.pk,)),
{'post': 'yes'}, follow=True
)
self.assertFalse(MyTitleExtension.objects.filter(extended_object=self.page_title_without_extension).exists())
# accessing the add view on a page that already has an extension should redirect
response = self.client.get(
reverse('admin:extensionapp_mytitleextension_add') + '?extended_object=%s' % self.page_title.pk
)
self.assertRedirects(response, reverse('admin:extensionapp_mytitleextension_change', args=(self.title_extension.pk,)))
# saving an extension should work without the GET parameter
post_data = {
'extra_title': 'my extra text'
}
self.client.post(
reverse('admin:extensionapp_mytitleextension_change', args=(self.title_extension.pk,)),
post_data, follow=True
)
self.assertTrue(MyTitleExtension.objects.filter(extra_title='my extra text', pk=self.title_extension.pk).exists())
with self.login_user_context(self.no_page_permission_user):
# can't save if user does not have permissions to change the page
post_data = {
'extra_title': 'try to change extra text'
}
response = self.client.post(
reverse('admin:extensionapp_mytitleextension_change', args=(self.title_extension.pk,)),
post_data, follow=True
)
self.assertEqual(response.status_code, 403)
# can't delete without page permission
response = self.client.post(
reverse('admin:extensionapp_mytitleextension_delete', args=(self.title_extension.pk,)),
{'post': 'yes'}, follow=True
)
self.assertEqual(response.status_code, 403)
self.assertTrue(MyTitleExtension.objects.filter(extended_object=self.page_title).exists())
|
SurfasJones/djcmsrc3
|
venv/lib/python2.7/site-packages/cms/tests/extensions.py
|
Python
|
mit
| 13,596
|
#!/usr/bin/env python
## vim:ts=4:et:nowrap
"""A user-defined wrapper around string objects
Note: string objects have grown methods in Python 1.6
This module requires Python 1.6 or later.
"""
from types import StringType, UnicodeType
import sys
__all__ = ["UserString","MutableString"]
class UserString:
def __init__(self, seq):
if isinstance(seq, StringType) or isinstance(seq, UnicodeType):
self.data = seq
elif isinstance(seq, UserString):
self.data = seq.data[:]
else:
self.data = str(seq)
def __str__(self): return str(self.data)
def __repr__(self): return repr(self.data)
def __int__(self): return int(self.data)
def __long__(self): return long(self.data)
def __float__(self): return float(self.data)
def __complex__(self): return complex(self.data)
def __hash__(self): return hash(self.data)
def __cmp__(self, string):
if isinstance(string, UserString):
return cmp(self.data, string.data)
else:
return cmp(self.data, string)
def __contains__(self, char):
return char in self.data
def __len__(self): return len(self.data)
def __getitem__(self, index): return self.__class__(self.data[index])
def __getslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
return self.__class__(self.data[start:end])
def __add__(self, other):
if isinstance(other, UserString):
return self.__class__(self.data + other.data)
elif isinstance(other, StringType) or isinstance(other, UnicodeType):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + str(other))
def __radd__(self, other):
if isinstance(other, StringType) or isinstance(other, UnicodeType):
return self.__class__(other + self.data)
else:
return self.__class__(str(other) + self.data)
def __iadd__(self, other):
if isinstance(other, UserString):
self.data += other.data
elif isinstance(other, StringType) or isinstance(other, UnicodeType):
self.data += other
else:
self.data += str(other)
return self
def __mul__(self, n):
return self.__class__(self.data*n)
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
return self
# the following methods are defined in alphabetical order:
def capitalize(self): return self.__class__(self.data.capitalize())
def center(self, width): return self.__class__(self.data.center(width))
def count(self, sub, start=0, end=sys.maxint):
return self.data.count(sub, start, end)
def encode(self, encoding=None, errors=None): # XXX improve this?
if encoding:
if errors:
return self.__class__(self.data.encode(encoding, errors))
else:
return self.__class__(self.data.encode(encoding))
else:
return self.__class__(self.data.encode())
def endswith(self, suffix, start=0, end=sys.maxint):
return self.data.endswith(suffix, start, end)
def expandtabs(self, tabsize=8):
return self.__class__(self.data.expandtabs(tabsize))
def find(self, sub, start=0, end=sys.maxint):
return self.data.find(sub, start, end)
def index(self, sub, start=0, end=sys.maxint):
return self.data.index(sub, start, end)
def isalpha(self): return self.data.isalpha()
def isalnum(self): return self.data.isalnum()
def isdecimal(self): return self.data.isdecimal()
def isdigit(self): return self.data.isdigit()
def islower(self): return self.data.islower()
def isnumeric(self): return self.data.isnumeric()
def isspace(self): return self.data.isspace()
def istitle(self): return self.data.istitle()
def isupper(self): return self.data.isupper()
def join(self, seq): return self.data.join(seq)
def ljust(self, width): return self.__class__(self.data.ljust(width))
def lower(self): return self.__class__(self.data.lower())
def lstrip(self): return self.__class__(self.data.lstrip())
def replace(self, old, new, maxsplit=-1):
return self.__class__(self.data.replace(old, new, maxsplit))
def rfind(self, sub, start=0, end=sys.maxint):
return self.data.rfind(sub, start, end)
def rindex(self, sub, start=0, end=sys.maxint):
return self.data.rindex(sub, start, end)
def rjust(self, width): return self.__class__(self.data.rjust(width))
def rstrip(self): return self.__class__(self.data.rstrip())
def split(self, sep=None, maxsplit=-1):
return self.data.split(sep, maxsplit)
def splitlines(self, keepends=0): return self.data.splitlines(keepends)
def startswith(self, prefix, start=0, end=sys.maxint):
return self.data.startswith(prefix, start, end)
def strip(self): return self.__class__(self.data.strip())
def swapcase(self): return self.__class__(self.data.swapcase())
def title(self): return self.__class__(self.data.title())
def translate(self, *args):
return self.__class__(self.data.translate(*args))
def upper(self): return self.__class__(self.data.upper())
class MutableString(UserString):
"""mutable string objects
Python strings are immutable objects. This has the advantage, that
strings may be used as dictionary keys. If this property isn't needed
and you insist on changing string values in place instead, you may cheat
and use MutableString.
But the purpose of this class is an educational one: to prevent
people from inventing their own mutable string class derived
from UserString and than forget thereby to remove (override) the
__hash__ method inherited from ^UserString. This would lead to
errors that would be very hard to track down.
A faster and better solution is to rewrite your program using lists."""
def __init__(self, string=""):
self.data = string
def __hash__(self):
raise TypeError, "unhashable type (it is mutable)"
def __setitem__(self, index, sub):
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + sub + self.data[index+1:]
def __delitem__(self, index):
if index < 0 or index >= len(self.data): raise IndexError
self.data = self.data[:index] + self.data[index+1:]
def __setslice__(self, start, end, sub):
start = max(start, 0); end = max(end, 0)
if isinstance(sub, UserString):
self.data = self.data[:start]+sub.data+self.data[end:]
elif isinstance(sub, StringType) or isinstance(sub, UnicodeType):
self.data = self.data[:start]+sub+self.data[end:]
else:
self.data = self.data[:start]+str(sub)+self.data[end:]
def __delslice__(self, start, end):
start = max(start, 0); end = max(end, 0)
self.data = self.data[:start] + self.data[end:]
def immutable(self):
return UserString(self.data)
if __name__ == "__main__":
# execute the regression test to stdout, if called as a script:
import os
called_in_dir, called_as = os.path.split(sys.argv[0])
called_in_dir = os.path.abspath(called_in_dir)
called_as, py = os.path.splitext(called_as)
sys.path.append(os.path.join(called_in_dir, 'test'))
if '-q' in sys.argv:
import test_support
test_support.verbose = 0
__import__('test_' + called_as.lower())
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.1/Lib/UserString.py
|
Python
|
mit
| 7,530
|
#!/usr/bin/python
# axes3d.py, original mplot3d version by John Porter
# Created: 23 Sep 2005
# Parts fixed by Reinier Heeres <reinier@heeres.eu>
# Minor additions by Ben Axelrod <baxelrod@coroware.com>
# Significant updates and revisions by Ben Root <ben.v.root@gmail.com>
"""
Module containing Axes3D, an object which can plot 3D objects on a
2D matplotlib figure.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
from matplotlib.externals import six
from matplotlib.externals.six.moves import map, xrange, zip, reduce
import warnings
from operator import itemgetter
import matplotlib.axes as maxes
from matplotlib.axes import Axes, rcParams
from matplotlib import cbook
import matplotlib.transforms as mtransforms
from matplotlib.transforms import Bbox
import matplotlib.collections as mcoll
from matplotlib import docstring
import matplotlib.scale as mscale
from matplotlib.tri.triangulation import Triangulation
import numpy as np
from matplotlib.colors import Normalize, colorConverter, LightSource
from . import art3d
from . import proj3d
from . import axis3d
def unit_bbox():
box = Bbox(np.array([[0, 0], [1, 1]]))
return box
class Axes3D(Axes):
"""
3D axes object.
"""
name = '3d'
_shared_z_axes = cbook.Grouper()
def __init__(self, fig, rect=None, *args, **kwargs):
'''
Build an :class:`Axes3D` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*azim* Azimuthal viewing angle (default -60)
*elev* Elevation viewing angle (default 30)
*zscale* [%(scale)s]
*sharez* Other axes to share z-limits with
================ =========================================
.. versionadded :: 1.2.1
*sharez*
''' % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
if rect is None:
rect = [0.0, 0.0, 1.0, 1.0]
self._cids = []
self.initial_azim = kwargs.pop('azim', -60)
self.initial_elev = kwargs.pop('elev', 30)
zscale = kwargs.pop('zscale', None)
sharez = kwargs.pop('sharez', None)
self.xy_viewLim = unit_bbox()
self.zz_viewLim = unit_bbox()
self.xy_dataLim = unit_bbox()
self.zz_dataLim = unit_bbox()
# inihibit autoscale_view until the axes are defined
# they can't be defined until Axes.__init__ has been called
self.view_init(self.initial_elev, self.initial_azim)
self._ready = 0
self._sharez = sharez
if sharez is not None:
self._shared_z_axes.join(self, sharez)
self._adjustable = 'datalim'
Axes.__init__(self, fig, rect,
frameon=True,
*args, **kwargs)
# Disable drawing of axes by base class
Axes.set_axis_off(self)
# Enable drawing of axes by Axes3D class
self.set_axis_on()
self.M = None
# func used to format z -- fall back on major formatters
self.fmt_zdata = None
if zscale is not None :
self.set_zscale(zscale)
if self.zaxis is not None :
self._zcid = self.zaxis.callbacks.connect('units finalize',
self.relim)
else :
self._zcid = None
self._ready = 1
self.mouse_init()
self.set_top_view()
self.axesPatch.set_linewidth(0)
# Calculate the pseudo-data width and height
pseudo_bbox = self.transLimits.inverted().transform([(0, 0), (1, 1)])
self._pseudo_w, self._pseudo_h = pseudo_bbox[1] - pseudo_bbox[0]
self.figure.add_axes(self)
def set_axis_off(self):
self._axis3don = False
self.stale = True
def set_axis_on(self):
self._axis3don = True
self.stale = True
def have_units(self):
"""
Return *True* if units are set on the *x*, *y*, or *z* axes
"""
return (self.xaxis.have_units() or self.yaxis.have_units() or
self.zaxis.have_units())
def convert_zunits(self, z):
"""
For artists in an axes, if the zaxis has units support,
convert *z* using zaxis unit type
.. versionadded :: 1.2.1
"""
return self.zaxis.convert_units(z)
def _process_unit_info(self, xdata=None, ydata=None, zdata=None,
kwargs=None):
"""
Look for unit *kwargs* and update the axis instances as necessary
"""
Axes._process_unit_info(self, xdata=xdata, ydata=ydata, kwargs=kwargs)
if self.xaxis is None or self.yaxis is None or self.zaxis is None:
return
if zdata is not None:
# we only need to update if there is nothing set yet.
if not self.zaxis.have_units():
self.zaxis.update_units(xdata)
# process kwargs 2nd since these will override default units
if kwargs is not None:
zunits = kwargs.pop('zunits', self.zaxis.units)
if zunits != self.zaxis.units:
self.zaxis.set_units(zunits)
# If the units being set imply a different converter,
# we need to update.
if zdata is not None:
self.zaxis.update_units(zdata)
def set_top_view(self):
# this happens to be the right view for the viewing coordinates
# moved up and to the left slightly to fit labels and axes
xdwl = (0.95/self.dist)
xdw = (0.9/self.dist)
ydwl = (0.95/self.dist)
ydw = (0.9/self.dist)
# This is purposely using the 2D Axes's set_xlim and set_ylim,
# because we are trying to place our viewing pane.
Axes.set_xlim(self, -xdwl, xdw, auto=None)
Axes.set_ylim(self, -ydwl, ydw, auto=None)
def _init_axis(self):
'''Init 3D axes; overrides creation of regular X/Y axes'''
self.w_xaxis = axis3d.XAxis('x', self.xy_viewLim.intervalx,
self.xy_dataLim.intervalx, self)
self.xaxis = self.w_xaxis
self.w_yaxis = axis3d.YAxis('y', self.xy_viewLim.intervaly,
self.xy_dataLim.intervaly, self)
self.yaxis = self.w_yaxis
self.w_zaxis = axis3d.ZAxis('z', self.zz_viewLim.intervalx,
self.zz_dataLim.intervalx, self)
self.zaxis = self.w_zaxis
for ax in self.xaxis, self.yaxis, self.zaxis:
ax.init3d()
def get_children(self):
return [self.zaxis, ] + Axes.get_children(self)
def _get_axis_list(self):
return super(Axes3D, self)._get_axis_list() + (self.zaxis, )
def unit_cube(self, vals=None):
minx, maxx, miny, maxy, minz, maxz = vals or self.get_w_lims()
xs, ys, zs = ([minx, maxx, maxx, minx, minx, maxx, maxx, minx],
[miny, miny, maxy, maxy, miny, miny, maxy, maxy],
[minz, minz, minz, minz, maxz, maxz, maxz, maxz])
return list(zip(xs, ys, zs))
def tunit_cube(self, vals=None, M=None):
if M is None:
M = self.M
xyzs = self.unit_cube(vals)
tcube = proj3d.proj_points(xyzs, M)
return tcube
def tunit_edges(self, vals=None, M=None):
tc = self.tunit_cube(vals, M)
edges = [(tc[0], tc[1]),
(tc[1], tc[2]),
(tc[2], tc[3]),
(tc[3], tc[0]),
(tc[0], tc[4]),
(tc[1], tc[5]),
(tc[2], tc[6]),
(tc[3], tc[7]),
(tc[4], tc[5]),
(tc[5], tc[6]),
(tc[6], tc[7]),
(tc[7], tc[4])]
return edges
def draw(self, renderer):
# draw the background patch
self.axesPatch.draw(renderer)
self._frameon = False
# first, set the aspect
# this is duplicated from `axes._base._AxesBase.draw`
# but must be called before any of the artist are drawn as
# it adjusts the view limits and the size of the bounding box
# of the axes
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
# add the projection matrix to the renderer
self.M = self.get_proj()
renderer.M = self.M
renderer.vvec = self.vvec
renderer.eye = self.eye
renderer.get_axis_position = self.get_axis_position
# Calculate projection of collections and zorder them
zlist = [(col.do_3d_projection(renderer), col) \
for col in self.collections]
zlist.sort(key=itemgetter(0), reverse=True)
for i, (z, col) in enumerate(zlist):
col.zorder = i
# Calculate projection of patches and zorder them
zlist = [(patch.do_3d_projection(renderer), patch) \
for patch in self.patches]
zlist.sort(key=itemgetter(0), reverse=True)
for i, (z, patch) in enumerate(zlist):
patch.zorder = i
if self._axis3don:
axes = (self.xaxis, self.yaxis, self.zaxis)
# Draw panes first
for ax in axes:
ax.draw_pane(renderer)
# Then axes
for ax in axes:
ax.draw(renderer)
# Then rest
Axes.draw(self, renderer)
def get_axis_position(self):
vals = self.get_w_lims()
tc = self.tunit_cube(vals, self.M)
xhigh = tc[1][2] > tc[2][2]
yhigh = tc[3][2] > tc[2][2]
zhigh = tc[0][2] > tc[2][2]
return xhigh, yhigh, zhigh
def update_datalim(self, xys, **kwargs):
pass
def get_autoscale_on(self) :
"""
Get whether autoscaling is applied for all axes on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
return Axes.get_autoscale_on(self) and self.get_autoscalez_on()
def get_autoscalez_on(self) :
"""
Get whether autoscaling for the z-axis is applied on plot commands
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
return self._autoscaleZon
def set_autoscale_on(self, b) :
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
Axes.set_autoscale_on(self, b)
self.set_autoscalez_on(b)
def set_autoscalez_on(self, b) :
"""
Set whether autoscaling for the z-axis is applied on plot commands
accepts: [ *True* | *False* ]
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self._autoscalez_on = b
def set_zmargin(self, m) :
"""
Set padding of Z data limits prior to autoscaling.
*m* times the data interval will be added to each
end of that interval before it is used in autoscaling.
accepts: float in range 0 to 1
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if m < 0 or m > 1 :
raise ValueError("margin must be in range 0 to 1")
self._zmargin = m
self.stale = True
def margins(self, *args, **kw) :
"""
Convenience method to set or retrieve autoscaling margins.
signatures::
margins()
returns xmargin, ymargin, zmargin
::
margins(margin)
margins(xmargin, ymargin, zmargin)
margins(x=xmargin, y=ymargin, z=zmargin)
margins(..., tight=False)
All forms above set the xmargin, ymargin and zmargin
parameters. All keyword parameters are optional. A single argument
specifies xmargin, ymargin and zmargin. The *tight* parameter
is passed to :meth:`autoscale_view`, which is executed after
a margin is changed; the default here is *True*, on the
assumption that when margins are specified, no additional
padding to match tick marks is usually desired. Setting
*tight* to *None* will preserve the previous setting.
Specifying any margin changes only the autoscaling; for example,
if *xmargin* is not None, then *xmargin* times the X data
interval will be added to each end of that interval before
it is used in autoscaling.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if not args and not kw:
return self._xmargin, self._ymargin, self._zmargin
tight = kw.pop('tight', True)
mx = kw.pop('x', None)
my = kw.pop('y', None)
mz = kw.pop('z', None)
if len(args) == 1:
mx = my = mz = args[0]
elif len(args) == 2:
# Maybe put out a warning because mz is not set?
mx, my = args
elif len(args) == 3:
mx, my, mz = args
else:
raise ValueError("more than three arguments were supplied")
if mx is not None:
self.set_xmargin(mx)
if my is not None:
self.set_ymargin(my)
if mz is not None:
self.set_zmargin(mz)
scalex = (mx is not None)
scaley = (my is not None)
scalez = (mz is not None)
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley,
scalez=scalez)
def autoscale(self, enable=True, axis='both', tight=None) :
"""
Convenience method for simple axis view autoscaling.
See :meth:`matplotlib.axes.Axes.autoscale` for full explanation.
Note that this function behaves the same, but for all
three axes. Therfore, 'z' can be passed for *axis*,
and 'both' applies to all three axes.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if enable is None:
scalex = True
scaley = True
scalez = True
else:
scalex = False
scaley = False
scalez = False
if axis in ['x', 'both']:
self._autoscaleXon = bool(enable)
scalex = self._autoscaleXon
if axis in ['y', 'both']:
self._autoscaleYon = bool(enable)
scaley = self._autoscaleYon
if axis in ['z', 'both']:
self._autoscaleZon = bool(enable)
scalez = self._autoscaleZon
self.autoscale_view(tight=tight, scalex=scalex, scaley=scaley,
scalez=scalez)
def auto_scale_xyz(self, X, Y, Z=None, had_data=None):
x, y, z = list(map(np.asarray, (X, Y, Z)))
try:
x, y = x.flatten(), y.flatten()
if Z is not None:
z = z.flatten()
except AttributeError:
raise
# This updates the bounding boxes as to keep a record as
# to what the minimum sized rectangular volume holds the
# data.
self.xy_dataLim.update_from_data_xy(np.array([x, y]).T, not had_data)
if z is not None:
self.zz_dataLim.update_from_data_xy(np.array([z, z]).T, not had_data)
# Let autoscale_view figure out how to use this data.
self.autoscale_view()
def autoscale_view(self, tight=None, scalex=True, scaley=True,
scalez=True) :
"""
Autoscale the view limits using the data limits.
See :meth:`matplotlib.axes.Axes.autoscale_view` for documentation.
Note that this function applies to the 3D axes, and as such
adds the *scalez* to the function arguments.
.. versionchanged :: 1.1.0
Function signature was changed to better match the 2D version.
*tight* is now explicitly a kwarg and placed first.
.. versionchanged :: 1.2.1
This is now fully functional.
"""
if not self._ready:
return
# This method looks at the rectangular volume (see above)
# of data and decides how to scale the view portal to fit it.
if tight is None:
# if image data only just use the datalim
_tight = self._tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)
else:
_tight = self._tight = bool(tight)
if scalex and self._autoscaleXon:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = self.xy_dataLim.intervalx
xlocator = self.xaxis.get_major_locator()
try:
x0, x1 = xlocator.nonsingular(x0, x1)
except AttributeError:
x0, x1 = mtransforms.nonsingular(x0, x1, increasing=False,
expander=0.05)
if self._xmargin > 0:
delta = (x1 - x0) * self._xmargin
x0 -= delta
x1 += delta
if not _tight:
x0, x1 = xlocator.view_limits(x0, x1)
self.set_xbound(x0, x1)
if scaley and self._autoscaleYon:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = self.xy_dataLim.intervaly
ylocator = self.yaxis.get_major_locator()
try:
y0, y1 = ylocator.nonsingular(y0, y1)
except AttributeError:
y0, y1 = mtransforms.nonsingular(y0, y1, increasing=False,
expander=0.05)
if self._ymargin > 0:
delta = (y1 - y0) * self._ymargin
y0 -= delta
y1 += delta
if not _tight:
y0, y1 = ylocator.view_limits(y0, y1)
self.set_ybound(y0, y1)
if scalez and self._autoscaleZon:
zshared = self._shared_z_axes.get_siblings(self)
dl = [ax.dataLim for ax in zshared]
bb = mtransforms.BboxBase.union(dl)
z0, z1 = self.zz_dataLim.intervalx
zlocator = self.zaxis.get_major_locator()
try:
z0, z1 = zlocator.nonsingular(z0, z1)
except AttributeError:
z0, z1 = mtransforms.nonsingular(z0, z1, increasing=False,
expander=0.05)
if self._zmargin > 0:
delta = (z1 - z0) * self._zmargin
z0 -= delta
z1 += delta
if not _tight:
z0, z1 = zlocator.view_limits(z0, z1)
self.set_zbound(z0, z1)
def get_w_lims(self):
'''Get 3D world limits.'''
minx, maxx = self.get_xlim3d()
miny, maxy = self.get_ylim3d()
minz, maxz = self.get_zlim3d()
return minx, maxx, miny, maxy, minz, maxz
def _determine_lims(self, xmin=None, xmax=None, *args, **kwargs):
if xmax is None and cbook.iterable(xmin):
xmin, xmax = xmin
if xmin == xmax:
xmin -= 0.05
xmax += 0.05
return (xmin, xmax)
def set_xlim3d(self, left=None, right=None, emit=True, auto=False, **kw):
"""
Set 3D x limits.
See :meth:`matplotlib.axes.Axes.set_xlim` for full documentation.
"""
if 'xmin' in kw:
left = kw.pop('xmin')
if 'xmax' in kw:
right = kw.pop('xmax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if right is None and cbook.iterable(left):
left, right = left
self._process_unit_info(xdata=(left, right))
if left is not None:
left = self.convert_xunits(left)
if right is not None:
right = self.convert_xunits(right)
old_left, old_right = self.get_xlim()
if left is None:
left = old_left
if right is None:
right = old_right
if left == right:
warnings.warn(('Attempting to set identical left==right results\n'
'in singular transformations; automatically expanding.\n'
'left=%s, right=%s') % (left, right))
left, right = mtransforms.nonsingular(left, right, increasing=False)
left, right = self.xaxis.limit_range_for_scale(left, right)
self.xy_viewLim.intervalx = (left, right)
if auto is not None:
self._autoscaleXon = bool(auto)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.xy_viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
self.stale = True
return left, right
set_xlim = set_xlim3d
def set_ylim3d(self, bottom=None, top=None, emit=True, auto=False, **kw):
"""
Set 3D y limits.
See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation.
"""
if 'ymin' in kw:
bottom = kw.pop('ymin')
if 'ymax' in kw:
top = kw.pop('ymax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if top is None and cbook.iterable(bottom):
bottom, top = bottom
self._process_unit_info(ydata=(bottom, top))
if bottom is not None:
bottom = self.convert_yunits(bottom)
if top is not None:
top = self.convert_yunits(top)
old_bottom, old_top = self.get_ylim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if top == bottom:
warnings.warn(('Attempting to set identical bottom==top results\n'
'in singular transformations; automatically expanding.\n'
'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.yaxis.limit_range_for_scale(bottom, top)
self.xy_viewLim.intervaly = (bottom, top)
if auto is not None:
self._autoscaleYon = bool(auto)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.xy_viewLim.intervaly,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
self.stale = True
return bottom, top
set_ylim = set_ylim3d
def set_zlim3d(self, bottom=None, top=None, emit=True, auto=False, **kw):
"""
Set 3D z limits.
See :meth:`matplotlib.axes.Axes.set_ylim` for full documentation
"""
if 'zmin' in kw:
bottom = kw.pop('zmin')
if 'zmax' in kw:
top = kw.pop('zmax')
if kw:
raise ValueError("unrecognized kwargs: %s" % kw.keys())
if top is None and cbook.iterable(bottom):
bottom, top = bottom
self._process_unit_info(zdata=(bottom, top))
if bottom is not None:
bottom = self.convert_zunits(bottom)
if top is not None:
top = self.convert_zunits(top)
old_bottom, old_top = self.get_zlim()
if bottom is None:
bottom = old_bottom
if top is None:
top = old_top
if top == bottom:
warnings.warn(('Attempting to set identical bottom==top results\n'
'in singular transformations; automatically expanding.\n'
'bottom=%s, top=%s') % (bottom, top))
bottom, top = mtransforms.nonsingular(bottom, top, increasing=False)
bottom, top = self.zaxis.limit_range_for_scale(bottom, top)
self.zz_viewLim.intervalx = (bottom, top)
if auto is not None:
self._autoscaleZon = bool(auto)
if emit:
self.callbacks.process('zlim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_z_axes.get_siblings(self):
if other is not self:
other.set_zlim(self.zz_viewLim.intervalx,
emit=False, auto=auto)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
self.stale = True
return bottom, top
set_zlim = set_zlim3d
def get_xlim3d(self):
return self.xy_viewLim.intervalx
get_xlim3d.__doc__ = maxes.Axes.get_xlim.__doc__
get_xlim = get_xlim3d
get_xlim.__doc__ += """
.. versionchanged :: 1.1.0
This function now correctly refers to the 3D x-limits
"""
def get_ylim3d(self):
return self.xy_viewLim.intervaly
get_ylim3d.__doc__ = maxes.Axes.get_ylim.__doc__
get_ylim = get_ylim3d
get_ylim.__doc__ += """
.. versionchanged :: 1.1.0
This function now correctly refers to the 3D y-limits.
"""
def get_zlim3d(self):
'''Get 3D z limits.'''
return self.zz_viewLim.intervalx
get_zlim = get_zlim3d
def get_zscale(self) :
"""
Return the zaxis scale string %s
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
""" % (", ".join(mscale.get_scale_names()))
return self.zaxis.get_scale()
# We need to slightly redefine these to pass scalez=False
# to their calls of autoscale_view.
def set_xscale(self, value, **kwargs) :
self.xaxis._set_scale(value, **kwargs)
self.autoscale_view(scaley=False, scalez=False)
self._update_transScale()
set_xscale.__doc__ = maxes.Axes.set_xscale.__doc__ + """
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
def set_yscale(self, value, **kwargs) :
self.yaxis._set_scale(value, **kwargs)
self.autoscale_view(scalex=False, scalez=False)
self._update_transScale()
self.stale = True
set_yscale.__doc__ = maxes.Axes.set_yscale.__doc__ + """
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
@docstring.dedent_interpd
def set_zscale(self, value, **kwargs) :
"""
call signature::
set_zscale(value)
Set the scaling of the z-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
.. note ::
Currently, Axes3D objects only supports linear scales.
Other scales may or may not work, and support for these
is improving with each release.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self.zaxis._set_scale(value, **kwargs)
self.autoscale_view(scalex=False, scaley=False)
self._update_transScale()
self.stale = True
def set_zticks(self, *args, **kwargs):
"""
Set z-axis tick locations.
See :meth:`matplotlib.axes.Axes.set_yticks` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return self.zaxis.set_ticks(*args, **kwargs)
def get_zticks(self, minor=False):
"""
Return the z ticks as a list of locations
See :meth:`matplotlib.axes.Axes.get_yticks` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return self.zaxis.get_ticklocs(minor=minor)
def get_zmajorticklabels(self) :
"""
Get the ztick labels as a list of Text instances
.. versionadded :: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_majorticklabels())
def get_zminorticklabels(self) :
"""
Get the ztick labels as a list of Text instances
.. note::
Minor ticks are not supported. This function was added
only for completeness.
.. versionadded :: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_minorticklabels())
def set_zticklabels(self, *args, **kwargs) :
"""
Set z-axis tick labels.
See :meth:`matplotlib.axes.Axes.set_yticklabels` for more details.
.. note::
Minor ticks are not supported by Axes3D objects.
.. versionadded:: 1.1.0
"""
return self.zaxis.set_ticklabels(*args, **kwargs)
def get_zticklabels(self, minor=False) :
"""
Get ztick labels as a list of Text instances.
See :meth:`matplotlib.axes.Axes.get_yticklabels` for more details.
.. note::
Minor ticks are not supported.
.. versionadded:: 1.1.0
"""
return cbook.silent_list('Text zticklabel',
self.zaxis.get_ticklabels(minor=minor))
def zaxis_date(self, tz=None) :
"""
Sets up z-axis ticks and labels that treat the z data as dates.
*tz* is a timezone string or :class:`tzinfo` instance.
Defaults to rc value.
.. note::
This function is merely provided for completeness.
Axes3D objects do not officially support dates for ticks,
and so this may or may not work as expected.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
self.zaxis.axis_date(tz)
def get_zticklines(self) :
"""
Get ztick lines as a list of Line2D instances.
Note that this function is provided merely for completeness.
These lines are re-calculated as the display changes.
.. versionadded:: 1.1.0
"""
return self.zaxis.get_ticklines()
def clabel(self, *args, **kwargs):
"""
This function is currently not implemented for 3D axes.
Returns *None*.
"""
return None
def view_init(self, elev=None, azim=None):
"""
Set the elevation and azimuth of the axes.
This can be used to rotate the axes programatically.
'elev' stores the elevation angle in the z plane.
'azim' stores the azimuth angle in the x,y plane.
if elev or azim are None (default), then the initial value
is used which was specified in the :class:`Axes3D` constructor.
"""
self.dist = 10
if elev is None:
self.elev = self.initial_elev
else:
self.elev = elev
if azim is None:
self.azim = self.initial_azim
else:
self.azim = azim
def get_proj(self):
"""
Create the projection matrix from the current viewing position.
elev stores the elevation angle in the z plane
azim stores the azimuth angle in the x,y plane
dist is the distance of the eye viewing point from the object
point.
"""
relev, razim = np.pi * self.elev/180, np.pi * self.azim/180
xmin, xmax = self.get_xlim3d()
ymin, ymax = self.get_ylim3d()
zmin, zmax = self.get_zlim3d()
# transform to uniform world coordinates 0-1.0,0-1.0,0-1.0
worldM = proj3d.world_transformation(xmin, xmax,
ymin, ymax,
zmin, zmax)
# look into the middle of the new coordinates
R = np.array([0.5, 0.5, 0.5])
xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist
yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist
zp = R[2] + np.sin(relev) * self.dist
E = np.array((xp, yp, zp))
self.eye = E
self.vvec = R - E
self.vvec = self.vvec / proj3d.mod(self.vvec)
if abs(relev) > np.pi/2:
# upside down
V = np.array((0, 0, -1))
else:
V = np.array((0, 0, 1))
zfront, zback = -self.dist, self.dist
viewM = proj3d.view_transformation(E, R, V)
perspM = proj3d.persp_transformation(zfront, zback)
M0 = np.dot(viewM, worldM)
M = np.dot(perspM, M0)
return M
def mouse_init(self, rotate_btn=1, zoom_btn=3):
"""Initializes mouse button callbacks to enable 3D rotation of
the axes. Also optionally sets the mouse buttons for 3D rotation
and zooming.
============ =======================================================
Argument Description
============ =======================================================
*rotate_btn* The integer or list of integers specifying which mouse
button or buttons to use for 3D rotation of the axes.
Default = 1.
*zoom_btn* The integer or list of integers specifying which mouse
button or buttons to use to zoom the 3D axes.
Default = 3.
============ =======================================================
"""
self.button_pressed = None
canv = self.figure.canvas
if canv is not None:
c1 = canv.mpl_connect('motion_notify_event', self._on_move)
c2 = canv.mpl_connect('button_press_event', self._button_press)
c3 = canv.mpl_connect('button_release_event', self._button_release)
self._cids = [c1, c2, c3]
else:
warnings.warn('Axes3D.figure.canvas is \'None\', mouse rotation disabled. Set canvas then call Axes3D.mouse_init().')
# coerce scalars into array-like, then convert into
# a regular list to avoid comparisons against None
# which breaks in recent versions of numpy.
self._rotate_btn = np.atleast_1d(rotate_btn).tolist()
self._zoom_btn = np.atleast_1d(zoom_btn).tolist()
def can_zoom(self) :
"""
Return *True* if this axes supports the zoom box button functionality.
3D axes objects do not use the zoom box button.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
3D axes objects do not use the pan/zoom button.
"""
return False
def cla(self):
"""
Clear axes
"""
# Disabling mouse interaction might have been needed a long
# time ago, but I can't find a reason for it now - BVR (2012-03)
#self.disable_mouse_rotation()
self.zaxis.cla()
if self._sharez is not None:
self.zaxis.major = self._sharez.zaxis.major
self.zaxis.minor = self._sharez.zaxis.minor
z0, z1 = self._sharez.get_zlim()
self.set_zlim(z0, z1, emit=False, auto=None)
self.zaxis._set_scale(self._sharez.zaxis.get_scale())
else:
self.zaxis._set_scale('linear')
self._autoscaleZon = True
self._zmargin = 0
Axes.cla(self)
self.grid(rcParams['axes3d.grid'])
def disable_mouse_rotation(self):
"""Disable mouse button callbacks.
"""
# Disconnect the various events we set.
for cid in self._cids:
self.figure.canvas.mpl_disconnect(cid)
self._cids = []
def _button_press(self, event):
if event.inaxes == self:
self.button_pressed = event.button
self.sx, self.sy = event.xdata, event.ydata
def _button_release(self, event):
self.button_pressed = None
def format_zdata(self, z):
"""
Return *z* string formatted. This function will use the
:attr:`fmt_zdata` attribute if it is callable, else will fall
back on the zaxis major formatter
"""
try: return self.fmt_zdata(z)
except (AttributeError, TypeError):
func = self.zaxis.get_major_formatter().format_data_short
val = func(z)
return val
def format_coord(self, xd, yd):
"""
Given the 2D view coordinates attempt to guess a 3D coordinate.
Looks for the nearest edge to the point and then assumes that
the point is at the same z location as the nearest point on the edge.
"""
if self.M is None:
return ''
if self.button_pressed in self._rotate_btn:
return 'azimuth=%d deg, elevation=%d deg ' % (self.azim, self.elev)
# ignore xd and yd and display angles instead
p = (xd, yd)
edges = self.tunit_edges()
#lines = [proj3d.line2d(p0,p1) for (p0,p1) in edges]
ldists = [(proj3d.line2d_seg_dist(p0, p1, p), i) for \
i, (p0, p1) in enumerate(edges)]
ldists.sort()
# nearest edge
edgei = ldists[0][1]
p0, p1 = edges[edgei]
# scale the z value to match
x0, y0, z0 = p0
x1, y1, z1 = p1
d0 = np.hypot(x0-xd, y0-yd)
d1 = np.hypot(x1-xd, y1-yd)
dt = d0+d1
z = d1/dt * z0 + d0/dt * z1
x, y, z = proj3d.inv_transform(xd, yd, z, self.M)
xs = self.format_xdata(x)
ys = self.format_ydata(y)
zs = self.format_zdata(z)
return 'x=%s, y=%s, z=%s' % (xs, ys, zs)
def _on_move(self, event):
"""Mouse moving
button-1 rotates by default. Can be set explicitly in mouse_init().
button-3 zooms by default. Can be set explicitly in mouse_init().
"""
if not self.button_pressed:
return
if self.M is None:
return
x, y = event.xdata, event.ydata
# In case the mouse is out of bounds.
if x is None:
return
dx, dy = x - self.sx, y - self.sy
w = self._pseudo_w
h = self._pseudo_h
self.sx, self.sy = x, y
# Rotation
if self.button_pressed in self._rotate_btn:
# rotate viewing point
# get the x and y pixel coords
if dx == 0 and dy == 0:
return
self.elev = art3d.norm_angle(self.elev - (dy/h)*180)
self.azim = art3d.norm_angle(self.azim - (dx/w)*180)
self.get_proj()
self.figure.canvas.draw_idle()
# elif self.button_pressed == 2:
# pan view
# project xv,yv,zv -> xw,yw,zw
# pan
# pass
# Zoom
elif self.button_pressed in self._zoom_btn:
# zoom view
# hmmm..this needs some help from clipping....
minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
df = 1-((h - dy)/h)
dx = (maxx-minx)*df
dy = (maxy-miny)*df
dz = (maxz-minz)*df
self.set_xlim3d(minx - dx, maxx + dx)
self.set_ylim3d(miny - dy, maxy + dy)
self.set_zlim3d(minz - dz, maxz + dz)
self.get_proj()
self.figure.canvas.draw_idle()
def set_zlabel(self, zlabel, fontdict=None, labelpad=None, **kwargs):
'''
Set zlabel. See doc for :meth:`set_ylabel` for description.
'''
if labelpad is not None : self.zaxis.labelpad = labelpad
return self.zaxis.set_label_text(zlabel, fontdict, **kwargs)
def get_zlabel(self) :
"""
Get the z-label text string.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
label = self.zaxis.get_label()
return label.get_text()
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the 3D axes panels are drawn
.. versionadded :: 1.1.0
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the 3D axes panels are drawn
ACCEPTS: [ *True* | *False* ]
.. versionadded :: 1.1.0
"""
self._frameon = bool(b)
self.stale = True
def get_axisbelow(self):
"""
Get whether axis below is true or not.
For axes3d objects, this will always be *True*
.. versionadded :: 1.1.0
This function was added for completeness.
"""
return True
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below
most artists
For axes3d objects, this will ignore any settings and just use *True*
ACCEPTS: [ *True* | *False* ]
.. versionadded :: 1.1.0
This function was added for completeness.
"""
self._axisbelow = True
self.stale = True
def grid(self, b=True, **kwargs):
'''
Set / unset 3D grid.
.. note::
Currently, this function does not behave the same as
:meth:`matplotlib.axes.Axes.grid`, but it is intended to
eventually support that behavior.
.. versionchanged :: 1.1.0
This function was changed, but not tested. Please report any bugs.
'''
# TODO: Operate on each axes separately
if len(kwargs) :
b = True
self._draw_grid = cbook._string_to_bool(b)
self.stale = True
def ticklabel_format(self, **kwargs) :
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes in Axed3D objects.
See :meth:`matplotlib.axes.Axes.ticklabel_format` for full
documentation. Note that this version applies to all three
axes of the Axes3D object. Therefore, the *axis* argument
will also accept a value of 'z' and the value of 'both' will
apply to all three axes.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
useOffset = kwargs.pop('useOffset', None)
axis = kwargs.pop('axis', 'both').lower()
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError("comma style remains to be added")
elif style == '':
sb = None
else:
raise ValueError("%s is not a valid style value")
try:
if sb is not None:
if axis in ['both', 'z']:
self.xaxis.major.formatter.set_scientific(sb)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_scientific(sb)
if axis in ['both', 'z'] :
self.zaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis in ['both', 'x']:
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_powerlimits(scilimits)
if axis in ['both', 'z']:
self.zaxis.major.formatter.set_powerlimits(scilimits)
if useOffset is not None:
if axis in ['both', 'x']:
self.xaxis.major.formatter.set_useOffset(useOffset)
if axis in ['both', 'y']:
self.yaxis.major.formatter.set_useOffset(useOffset)
if axis in ['both', 'z']:
self.zaxis.major.formatter.set_useOffset(useOffset)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def locator_params(self, axis='both', tight=None, **kwargs) :
"""
Convenience method for controlling tick locators.
See :meth:`matplotlib.axes.Axes.locator_params` for full
documentation Note that this is for Axes3D objects,
therefore, setting *axis* to 'both' will result in the
parameters being set for all three axes. Also, *axis*
can also take a value of 'z' to apply parameters to the
z axis.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
_x = axis in ['x', 'both']
_y = axis in ['y', 'both']
_z = axis in ['z', 'both']
if _x:
self.xaxis.get_major_locator().set_params(**kwargs)
if _y:
self.yaxis.get_major_locator().set_params(**kwargs)
if _z:
self.zaxis.get_major_locator().set_params(**kwargs)
self.autoscale_view(tight=tight, scalex=_x, scaley=_y, scalez=_z)
def tick_params(self, axis='both', **kwargs) :
"""
Convenience method for changing the appearance of ticks and
tick labels.
See :meth:`matplotlib.axes.Axes.tick_params` for more complete
documentation.
The only difference is that setting *axis* to 'both' will
mean that the settings are applied to all three axes. Also,
the *axis* parameter also accepts a value of 'z', which
would mean to apply to only the z-axis.
Also, because of how Axes3D objects are drawn very differently
from regular 2D axes, some of these settings may have
ambiguous meaning. For simplicity, the 'z' axis will
accept settings as if it was like the 'y' axis.
.. note::
While this function is currently implemented, the core part
of the Axes3D object may ignore some of these settings.
Future releases will fix this. Priority will be given to
those who file bugs.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
Axes.tick_params(self, axis, **kwargs)
if axis in ['z', 'both'] :
zkw = dict(kwargs)
zkw.pop('top', None)
zkw.pop('bottom', None)
zkw.pop('labeltop', None)
zkw.pop('labelbottom', None)
self.zaxis.set_tick_params(**zkw)
### data limits, ticks, tick labels, and formatting
def invert_zaxis(self):
"""
Invert the z-axis.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
bottom, top = self.get_zlim()
self.set_zlim(top, bottom, auto=None)
def zaxis_inverted(self):
'''
Returns True if the z-axis is inverted.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
'''
bottom, top = self.get_zlim()
return top < bottom
def get_zbound(self):
"""
Returns the z-axis numerical bounds where::
lowerBound < upperBound
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
bottom, top = self.get_zlim()
if bottom < top:
return bottom, top
else:
return top, bottom
def set_zbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the z-axis.
This method will honor axes inversion regardless of parameter order.
It will not change the :attr:`_autoscaleZon` attribute.
.. versionadded :: 1.1.0
This function was added, but not tested. Please report any bugs.
"""
if upper is None and cbook.iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_zbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.zaxis_inverted():
if lower < upper:
self.set_zlim(upper, lower, auto=None)
else:
self.set_zlim(lower, upper, auto=None)
else :
if lower < upper:
self.set_zlim(lower, upper, auto=None)
else :
self.set_zlim(upper, lower, auto=None)
def text(self, x, y, z, s, zdir=None, **kwargs):
'''
Add text to the plot. kwargs will be passed on to Axes.text,
except for the `zdir` keyword, which sets the direction to be
used as the z direction.
'''
text = Axes.text(self, x, y, s, **kwargs)
art3d.text_2d_to_3d(text, z, zdir)
return text
text3D = text
text2D = Axes.text
def plot(self, xs, ys, *args, **kwargs):
'''
Plot 2D or 3D data.
========== ================================================
Argument Description
========== ================================================
*xs*, *ys* x, y coordinates of vertices
*zs* z value(s), either one for all points or one for
each point.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
========== ================================================
Other arguments are passed on to
:func:`~matplotlib.axes.Axes.plot`
'''
# FIXME: This argument parsing might be better handled
# when we set later versions of python for
# minimum requirements. Currently at 2.4.
# Note that some of the reason for the current difficulty
# is caused by the fact that we want to insert a new
# (semi-optional) positional argument 'Z' right before
# many other traditional positional arguments occur
# such as the color, linestyle and/or marker.
had_data = self.has_data()
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
argsi = 0
# First argument is array of zs
if len(args) > 0 and cbook.iterable(args[0]) and \
len(xs) == len(args[0]) :
# So, we know that it is an array with
# first dimension the same as xs.
# Next, check to see if the data contained
# therein (if any) is scalar (and not another array).
if len(args[0]) == 0 or cbook.is_scalar(args[0][0]) :
zs = args[argsi]
argsi += 1
# First argument is z value
elif len(args) > 0 and cbook.is_scalar(args[0]):
zs = args[argsi]
argsi += 1
# Match length
if not cbook.iterable(zs):
zs = np.ones(len(xs)) * zs
lines = Axes.plot(self, xs, ys, *args[argsi:], **kwargs)
for line in lines:
art3d.line_2d_to_3d(line, zs=zs, zdir=zdir)
self.auto_scale_xyz(xs, ys, zs, had_data)
return lines
plot3D = plot
def plot_surface(self, X, Y, Z, *args, **kwargs):
'''
Create a surface plot.
By default it will be colored in shades of a solid color,
but it also supports color mapping by supplying the *cmap*
argument.
The `rstride` and `cstride` kwargs set the stride used to
sample the input data to generate the graph. If 1k by 1k
arrays are passed in the default values for the strides will
result in a 100x100 grid being plotted.
============= ================================================
Argument Description
============= ================================================
*X*, *Y*, *Z* Data values as 2D arrays
*rstride* Array row stride (step size), defaults to 10
*cstride* Array column stride (step size), defaults to 10
*color* Color of the surface patches
*cmap* A colormap for the surface patches.
*facecolors* Face colors for the individual patches
*norm* An instance of Normalize to map values to colors
*vmin* Minimum value to map
*vmax* Maximum value to map
*shade* Whether to shade the facecolors
============= ================================================
Other arguments are passed on to
:class:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
'''
had_data = self.has_data()
Z = np.atleast_2d(Z)
# TODO: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
rstride = kwargs.pop('rstride', 10)
cstride = kwargs.pop('cstride', 10)
if 'facecolors' in kwargs:
fcolors = kwargs.pop('facecolors')
else:
color = np.array(colorConverter.to_rgba(kwargs.pop('color', 'b')))
fcolors = None
cmap = kwargs.get('cmap', None)
norm = kwargs.pop('norm', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
linewidth = kwargs.get('linewidth', None)
shade = kwargs.pop('shade', cmap is None)
lightsource = kwargs.pop('lightsource', None)
# Shade the data
if shade and cmap is not None and fcolors is not None:
fcolors = self._shade_colors_lightsource(Z, cmap, lightsource)
polys = []
# Only need these vectors to shade if there is no cmap
if cmap is None and shade :
totpts = int(np.ceil(float(rows - 1) / rstride) *
np.ceil(float(cols - 1) / cstride))
v1 = np.empty((totpts, 3))
v2 = np.empty((totpts, 3))
# This indexes the vertex points
which_pt = 0
#colset contains the data for coloring: either average z or the facecolor
colset = []
for rs in xrange(0, rows-1, rstride):
for cs in xrange(0, cols-1, cstride):
ps = []
for a in (X, Y, Z) :
ztop = a[rs,cs:min(cols, cs+cstride+1)]
zleft = a[rs+1:min(rows, rs+rstride+1),
min(cols-1, cs+cstride)]
zbase = a[min(rows-1, rs+rstride), cs:min(cols, cs+cstride+1):][::-1]
zright = a[rs:min(rows-1, rs+rstride):, cs][::-1]
z = np.concatenate((ztop, zleft, zbase, zright))
ps.append(z)
# The construction leaves the array with duplicate points, which
# are removed here.
ps = list(zip(*ps))
lastp = np.array([])
ps2 = [ps[0]] + [ps[i] for i in xrange(1, len(ps)) if ps[i] != ps[i-1]]
avgzsum = sum(p[2] for p in ps2)
polys.append(ps2)
if fcolors is not None:
colset.append(fcolors[rs][cs])
else:
colset.append(avgzsum / len(ps2))
# Only need vectors to shade if no cmap
if cmap is None and shade:
i1, i2, i3 = 0, int(len(ps2)/3), int(2*len(ps2)/3)
v1[which_pt] = np.array(ps2[i1]) - np.array(ps2[i2])
v2[which_pt] = np.array(ps2[i2]) - np.array(ps2[i3])
which_pt += 1
if cmap is None and shade:
normals = np.cross(v1, v2)
else :
normals = []
polyc = art3d.Poly3DCollection(polys, *args, **kwargs)
if fcolors is not None:
if shade:
colset = self._shade_colors(colset, normals)
polyc.set_facecolors(colset)
polyc.set_edgecolors(colset)
elif cmap:
colset = np.array(colset)
polyc.set_array(colset)
if vmin is not None or vmax is not None:
polyc.set_clim(vmin, vmax)
if norm is not None:
polyc.set_norm(norm)
else:
if shade:
colset = self._shade_colors(color, normals)
else:
colset = color
polyc.set_facecolors(colset)
self.add_collection(polyc)
self.auto_scale_xyz(X, Y, Z, had_data)
return polyc
def _generate_normals(self, polygons):
'''
Generate normals for polygons by using the first three points.
This normal of course might not make sense for polygons with
more than three points not lying in a plane.
'''
normals = []
for verts in polygons:
v1 = np.array(verts[0]) - np.array(verts[1])
v2 = np.array(verts[2]) - np.array(verts[0])
normals.append(np.cross(v1, v2))
return normals
def _shade_colors(self, color, normals):
'''
Shade *color* using normal vectors given by *normals*.
*color* can also be an array of the same length as *normals*.
'''
shade = np.array([np.dot(n / proj3d.mod(n), [-1, -1, 0.5])
if proj3d.mod(n) else np.nan
for n in normals])
mask = ~np.isnan(shade)
if len(shade[mask]) > 0:
norm = Normalize(min(shade[mask]), max(shade[mask]))
shade[~mask] = min(shade[mask])
color = colorConverter.to_rgba_array(color)
# shape of color should be (M, 4) (where M is number of faces)
# shape of shade should be (M,)
# colors should have final shape of (M, 4)
alpha = color[:, 3]
colors = (0.5 + norm(shade)[:, np.newaxis] * 0.5) * color
colors[:, 3] = alpha
else:
colors = np.asanyarray(color).copy()
return colors
def _shade_colors_lightsource(self, data, cmap, lightsource):
if lightsource is None:
lightsource = LightSource(azdeg=135, altdeg=55)
return lightsource.shade(data, cmap)
def plot_wireframe(self, X, Y, Z, *args, **kwargs):
'''
Plot a 3D wireframe.
The `rstride` and `cstride` kwargs set the stride used to
sample the input data to generate the graph. If either is 0
the input data in not sampled along this direction producing a
3D line plot rather than a wireframe plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as 2D arrays
*Z*
*rstride* Array row stride (step size), defaults to 1
*cstride* Array column stride (step size), defaults to 1
========== ================================================
Keyword arguments are passed on to
:class:`~matplotlib.collections.LineCollection`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Line3DCollection`
'''
rstride = kwargs.pop("rstride", 1)
cstride = kwargs.pop("cstride", 1)
had_data = self.has_data()
Z = np.atleast_2d(Z)
# FIXME: Support masked arrays
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
# We want two sets of lines, one running along the "rows" of
# Z and another set of lines running along the "columns" of Z.
# This transpose will make it easy to obtain the columns.
tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)
if rstride:
rii = list(xrange(0, rows, rstride))
# Add the last index only if needed
if rows > 0 and rii[-1] != (rows - 1) :
rii += [rows-1]
else:
rii = []
if cstride:
cii = list(xrange(0, cols, cstride))
# Add the last index only if needed
if cols > 0 and cii[-1] != (cols - 1) :
cii += [cols-1]
else:
cii = []
if rstride == 0 and cstride == 0:
raise ValueError("Either rstride or cstride must be non zero")
# If the inputs were empty, then just
# reset everything.
if Z.size == 0 :
rii = []
cii = []
xlines = [X[i] for i in rii]
ylines = [Y[i] for i in rii]
zlines = [Z[i] for i in rii]
txlines = [tX[i] for i in cii]
tylines = [tY[i] for i in cii]
tzlines = [tZ[i] for i in cii]
lines = [list(zip(xl, yl, zl)) for xl, yl, zl in \
zip(xlines, ylines, zlines)]
lines += [list(zip(xl, yl, zl)) for xl, yl, zl in \
zip(txlines, tylines, tzlines)]
linec = art3d.Line3DCollection(lines, *args, **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(X, Y, Z, had_data)
return linec
def plot_trisurf(self, *args, **kwargs):
"""
============= ================================================
Argument Description
============= ================================================
*X*, *Y*, *Z* Data values as 1D arrays
*color* Color of the surface patches
*cmap* A colormap for the surface patches.
*norm* An instance of Normalize to map values to colors
*vmin* Minimum value to map
*vmax* Maximum value to map
*shade* Whether to shade the facecolors
============= ================================================
The (optional) triangulation can be specified in one of two ways;
either::
plot_trisurf(triangulation, ...)
where triangulation is a :class:`~matplotlib.tri.Triangulation`
object, or::
plot_trisurf(X, Y, ...)
plot_trisurf(X, Y, triangles, ...)
plot_trisurf(X, Y, triangles=triangles, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of
these possibilities.
The remaining arguments are::
plot_trisurf(..., Z)
where *Z* is the array of values to contour, one per point
in the triangulation.
Other arguments are passed on to
:class:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
**Examples:**
.. plot:: mpl_examples/mplot3d/trisurf3d_demo.py
.. plot:: mpl_examples/mplot3d/trisurf3d_demo2.py
.. versionadded:: 1.2.0
This plotting function was added for the v1.2.0 release.
"""
had_data = self.has_data()
# TODO: Support custom face colours
color = np.array(colorConverter.to_rgba(kwargs.pop('color', 'b')))
cmap = kwargs.get('cmap', None)
norm = kwargs.pop('norm', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
linewidth = kwargs.get('linewidth', None)
shade = kwargs.pop('shade', cmap is None)
lightsource = kwargs.pop('lightsource', None)
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
if 'Z' in kwargs:
z = np.asarray(kwargs.pop('Z'))
else:
z = np.asarray(args[0])
# We do this so Z doesn't get passed as an arg to PolyCollection
args = args[1:]
triangles = tri.get_masked_triangles()
xt = tri.x[triangles][..., np.newaxis]
yt = tri.y[triangles][..., np.newaxis]
zt = z[triangles][..., np.newaxis]
verts = np.concatenate((xt, yt, zt), axis=2)
# Only need these vectors to shade if there is no cmap
if cmap is None and shade:
totpts = len(verts)
v1 = np.empty((totpts, 3))
v2 = np.empty((totpts, 3))
# This indexes the vertex points
which_pt = 0
colset = []
for i in xrange(len(verts)):
avgzsum = verts[i,0,2] + verts[i,1,2] + verts[i,2,2]
colset.append(avgzsum / 3.0)
# Only need vectors to shade if no cmap
if cmap is None and shade:
v1[which_pt] = np.array(verts[i,0]) - np.array(verts[i,1])
v2[which_pt] = np.array(verts[i,1]) - np.array(verts[i,2])
which_pt += 1
if cmap is None and shade:
normals = np.cross(v1, v2)
else:
normals = []
polyc = art3d.Poly3DCollection(verts, *args, **kwargs)
if cmap:
colset = np.array(colset)
polyc.set_array(colset)
if vmin is not None or vmax is not None:
polyc.set_clim(vmin, vmax)
if norm is not None:
polyc.set_norm(norm)
else:
if shade:
colset = self._shade_colors(color, normals)
else:
colset = color
polyc.set_facecolors(colset)
self.add_collection(polyc)
self.auto_scale_xyz(tri.x, tri.y, z, had_data)
return polyc
def _3d_extend_contour(self, cset, stride=5):
'''
Extend a contour in 3D by creating
'''
levels = cset.levels
colls = cset.collections
dz = (levels[1] - levels[0]) / 2
for z, linec in zip(levels, colls):
topverts = art3d.paths_to_3d_segments(linec.get_paths(), z - dz)
botverts = art3d.paths_to_3d_segments(linec.get_paths(), z + dz)
color = linec.get_color()[0]
polyverts = []
normals = []
nsteps = round(len(topverts[0]) / stride)
if nsteps <= 1:
if len(topverts[0]) > 1:
nsteps = 2
else:
continue
stepsize = (len(topverts[0]) - 1) / (nsteps - 1)
for i in range(int(round(nsteps)) - 1):
i1 = int(round(i * stepsize))
i2 = int(round((i + 1) * stepsize))
polyverts.append([topverts[0][i1],
topverts[0][i2],
botverts[0][i2],
botverts[0][i1]])
v1 = np.array(topverts[0][i1]) - np.array(topverts[0][i2])
v2 = np.array(topverts[0][i1]) - np.array(botverts[0][i1])
normals.append(np.cross(v1, v2))
colors = self._shade_colors(color, normals)
colors2 = self._shade_colors(color, normals)
polycol = art3d.Poly3DCollection(polyverts,
facecolors=colors,
edgecolors=colors2)
polycol.set_sort_zpos(z)
self.add_collection3d(polycol)
for col in colls:
self.collections.remove(col)
def add_contour_set(self, cset, extend3d=False, stride=5, zdir='z', offset=None):
zdir = '-' + zdir
if extend3d:
self._3d_extend_contour(cset, stride)
else:
for z, linec in zip(cset.levels, cset.collections):
if offset is not None:
z = offset
art3d.line_collection_2d_to_3d(linec, z, zdir=zdir)
def add_contourf_set(self, cset, zdir='z', offset=None) :
zdir = '-' + zdir
for z, linec in zip(cset.levels, cset.collections) :
if offset is not None :
z = offset
art3d.poly_collection_2d_to_3d(linec, z, zdir=zdir)
linec.set_sort_zpos(z)
def contour(self, X, Y, Z, *args, **kwargs):
'''
Create a 3D contour plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*extend3d* Whether to extend contour in 3D (default: False)
*stride* Stride (step size) for extending contour
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
The positional and other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.contour`
Returns a :class:`~matplotlib.axes.Axes.contour`
'''
extend3d = kwargs.pop('extend3d', False)
stride = kwargs.pop('stride', 5)
zdir = kwargs.pop('zdir', 'z')
offset = kwargs.pop('offset', None)
had_data = self.has_data()
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
cset = Axes.contour(self, jX, jY, jZ, *args, **kwargs)
self.add_contour_set(cset, extend3d, stride, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
contour3D = contour
def tricontour(self, *args, **kwargs):
"""
Create a 3D contour plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*extend3d* Whether to extend contour in 3D (default: False)
*stride* Stride (step size) for extending contour
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
Other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.tricontour`
Returns a :class:`~matplotlib.axes.Axes.contour`
.. versionchanged:: 1.3.0
Added support for custom triangulations
EXPERIMENTAL: This method currently produces incorrect output due to a
longstanding bug in 3D PolyCollection rendering.
"""
extend3d = kwargs.pop('extend3d', False)
stride = kwargs.pop('stride', 5)
zdir = kwargs.pop('zdir', 'z')
offset = kwargs.pop('offset', None)
had_data = self.has_data()
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
*args, **kwargs)
X = tri.x
Y = tri.y
if 'Z' in kwargs:
Z = kwargs.pop('Z')
else:
Z = args[0]
# We do this so Z doesn't get passed as an arg to Axes.tricontour
args = args[1:]
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
tri = Triangulation(jX, jY, tri.triangles, tri.mask)
cset = Axes.tricontour(self, tri, jZ, *args, **kwargs)
self.add_contour_set(cset, extend3d, stride, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
def contourf(self, X, Y, Z, *args, **kwargs):
'''
Create a 3D contourf plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the filled contour
on this position in plane normal to zdir
========== ================================================
The positional and keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.contourf`
Returns a :class:`~matplotlib.axes.Axes.contourf`
.. versionchanged :: 1.1.0
The *zdir* and *offset* kwargs were added.
'''
zdir = kwargs.pop('zdir', 'z')
offset = kwargs.pop('offset', None)
had_data = self.has_data()
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
cset = Axes.contourf(self, jX, jY, jZ, *args, **kwargs)
self.add_contourf_set(cset, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
contourf3D = contourf
def tricontourf(self, *args, **kwargs):
"""
Create a 3D contourf plot.
========== ================================================
Argument Description
========== ================================================
*X*, *Y*, Data values as numpy.arrays
*Z*
*zdir* The direction to use: x, y or z (default)
*offset* If specified plot a projection of the contour
lines on this position in plane normal to zdir
========== ================================================
Other keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.tricontour`
Returns a :class:`~matplotlib.axes.Axes.contour`
.. versionchanged :: 1.3.0
Added support for custom triangulations
EXPERIMENTAL: This method currently produces incorrect output due to a
longstanding bug in 3D PolyCollection rendering.
"""
zdir = kwargs.pop('zdir', 'z')
offset = kwargs.pop('offset', None)
had_data = self.has_data()
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
*args, **kwargs)
X = tri.x
Y = tri.y
if 'Z' in kwargs:
Z = kwargs.pop('Z')
else:
Z = args[0]
# We do this so Z doesn't get passed as an arg to Axes.tricontourf
args = args[1:]
jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
tri = Triangulation(jX, jY, tri.triangles, tri.mask)
cset = Axes.tricontourf(self, tri, jZ, *args, **kwargs)
self.add_contourf_set(cset, zdir, offset)
self.auto_scale_xyz(X, Y, Z, had_data)
return cset
def add_collection3d(self, col, zs=0, zdir='z'):
'''
Add a 3D collection object to the plot.
2D collection types are converted to a 3D version by
modifying the object and adding z coordinate information.
Supported are:
- PolyCollection
- LineColleciton
- PatchCollection
'''
zvals = np.atleast_1d(zs)
if len(zvals) > 0 :
zsortval = min(zvals)
else :
zsortval = 0 # FIXME: Fairly arbitrary. Is there a better value?
# FIXME: use issubclass() (although, then a 3D collection
# object would also pass.) Maybe have a collection3d
# abstract class to test for and exclude?
if type(col) is mcoll.PolyCollection:
art3d.poly_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
elif type(col) is mcoll.LineCollection:
art3d.line_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
elif type(col) is mcoll.PatchCollection:
art3d.patch_collection_2d_to_3d(col, zs=zs, zdir=zdir)
col.set_sort_zpos(zsortval)
Axes.add_collection(self, col)
def scatter(self, xs, ys, zs=0, zdir='z', s=20, c=None, depthshade=True,
*args, **kwargs):
'''
Create a scatter plot.
============ ========================================================
Argument Description
============ ========================================================
*xs*, *ys* Positions of data points.
*zs* Either an array of the same length as *xs* and
*ys* or a single value to place all points in
the same plane. Default is 0.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
*s* Size in points^2. It is a scalar or an array of the
same length as *x* and *y*.
*c* A color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however, including the
case of a single row to specify the same color for
all points.
*depthshade*
Whether or not to shade the scatter markers to give
the appearance of depth. Default is *True*.
============ ========================================================
Keyword arguments are passed on to
:func:`~matplotlib.axes.Axes.scatter`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
'''
had_data = self.has_data()
xs = np.ma.ravel(xs)
ys = np.ma.ravel(ys)
zs = np.ma.ravel(zs)
if xs.size != ys.size:
raise ValueError("Arguments 'xs' and 'ys' must be of same size.")
if xs.size != zs.size:
if zs.size == 1:
zs = np.tile(zs[0], xs.size)
else:
raise ValueError(("Argument 'zs' must be of same size as 'xs' "
"and 'ys' or of size 1."))
s = np.ma.ravel(s) # This doesn't have to match x, y in size.
if c is not None:
cstr = cbook.is_string_like(c) or cbook.is_sequence_of_strings(c)
if not cstr:
c = np.asanyarray(c)
if c.size == xs.size:
c = np.ma.ravel(c)
xs, ys, zs, s, c = cbook.delete_masked_points(xs, ys, zs, s, c)
else:
xs, ys, zs, s = cbook.delete_masked_points(xs, ys, zs, s)
patches = Axes.scatter(self, xs, ys, s=s, c=c, *args, **kwargs)
if not cbook.iterable(zs):
is_2d = True
zs = np.ones(len(xs)) * zs
else:
is_2d = False
art3d.patch_collection_2d_to_3d(patches, zs=zs, zdir=zdir,
depthshade=depthshade)
if self._zmargin < 0.05 and xs.size > 0:
self.set_zmargin(0.05)
#FIXME: why is this necessary?
if not is_2d:
self.auto_scale_xyz(xs, ys, zs, had_data)
return patches
scatter3D = scatter
def bar(self, left, height, zs=0, zdir='z', *args, **kwargs):
'''
Add 2D bar(s).
========== ================================================
Argument Description
========== ================================================
*left* The x coordinates of the left sides of the bars.
*height* The height of the bars.
*zs* Z coordinate of bars, if one value is specified
they will all be placed at the same z.
*zdir* Which direction to use as z ('x', 'y' or 'z')
when plotting a 2D set.
========== ================================================
Keyword arguments are passed onto :func:`~matplotlib.axes.Axes.bar`.
Returns a :class:`~mpl_toolkits.mplot3d.art3d.Patch3DCollection`
'''
had_data = self.has_data()
patches = Axes.bar(self, left, height, *args, **kwargs)
if not cbook.iterable(zs):
zs = np.ones(len(left)) * zs
verts = []
verts_zs = []
for p, z in zip(patches, zs):
vs = art3d.get_patch_verts(p)
verts += vs.tolist()
verts_zs += [z] * len(vs)
art3d.patch_2d_to_3d(p, z, zdir)
if 'alpha' in kwargs:
p.set_alpha(kwargs['alpha'])
if len(verts) > 0 :
# the following has to be skipped if verts is empty
# NOTE: Bugs could still occur if len(verts) > 0,
# but the "2nd dimension" is empty.
xs, ys = list(zip(*verts))
else :
xs, ys = [], []
xs, ys, verts_zs = art3d.juggle_axes(xs, ys, verts_zs, zdir)
self.auto_scale_xyz(xs, ys, verts_zs, had_data)
return patches
def bar3d(self, x, y, z, dx, dy, dz, color='b',
zsort='average', *args, **kwargs):
'''
Generate a 3D bar, or multiple bars.
When generating multiple bars, x, y, z have to be arrays.
dx, dy, dz can be arrays or scalars.
*color* can be:
- A single color value, to color all bars the same color.
- An array of colors of length N bars, to color each bar
independently.
- An array of colors of length 6, to color the faces of the
bars similarly.
- An array of colors of length 6 * N bars, to color each face
independently.
When coloring the faces of the boxes specifically, this is
the order of the coloring:
1. -Z (bottom of box)
2. +Z (top of box)
3. -Y
4. +Y
5. -X
6. +X
Keyword arguments are passed onto
:func:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
'''
had_data = self.has_data()
if not cbook.iterable(x):
x = [x]
if not cbook.iterable(y):
y = [y]
if not cbook.iterable(z):
z = [z]
if not cbook.iterable(dx):
dx = [dx]
if not cbook.iterable(dy):
dy = [dy]
if not cbook.iterable(dz):
dz = [dz]
if len(dx) == 1:
dx = dx * len(x)
if len(dy) == 1:
dy = dy * len(y)
if len(dz) == 1:
dz = dz * len(z)
if len(x) != len(y) or len(x) != len(z):
warnings.warn('x, y, and z must be the same length.')
# FIXME: This is archaic and could be done much better.
minx, miny, minz = 1e20, 1e20, 1e20
maxx, maxy, maxz = -1e20, -1e20, -1e20
polys = []
for xi, yi, zi, dxi, dyi, dzi in zip(x, y, z, dx, dy, dz):
minx = min(xi, minx)
maxx = max(xi + dxi, maxx)
miny = min(yi, miny)
maxy = max(yi + dyi, maxy)
minz = min(zi, minz)
maxz = max(zi + dzi, maxz)
polys.extend([
((xi, yi, zi), (xi + dxi, yi, zi),
(xi + dxi, yi + dyi, zi), (xi, yi + dyi, zi)),
((xi, yi, zi + dzi), (xi + dxi, yi, zi + dzi),
(xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
((xi, yi, zi), (xi + dxi, yi, zi),
(xi + dxi, yi, zi + dzi), (xi, yi, zi + dzi)),
((xi, yi + dyi, zi), (xi + dxi, yi + dyi, zi),
(xi + dxi, yi + dyi, zi + dzi), (xi, yi + dyi, zi + dzi)),
((xi, yi, zi), (xi, yi + dyi, zi),
(xi, yi + dyi, zi + dzi), (xi, yi, zi + dzi)),
((xi + dxi, yi, zi), (xi + dxi, yi + dyi, zi),
(xi + dxi, yi + dyi, zi + dzi), (xi + dxi, yi, zi + dzi)),
])
facecolors = []
if color is None:
# no color specified
facecolors = [None] * len(x)
elif len(color) == len(x):
# bar colors specified, need to expand to number of faces
for c in color:
facecolors.extend([c] * 6)
else:
# a single color specified, or face colors specified explicitly
facecolors = list(colorConverter.to_rgba_array(color))
if len(facecolors) < len(x):
facecolors *= (6 * len(x))
normals = self._generate_normals(polys)
sfacecolors = self._shade_colors(facecolors, normals)
col = art3d.Poly3DCollection(polys,
zsort=zsort,
facecolor=sfacecolors,
*args, **kwargs)
self.add_collection(col)
self.auto_scale_xyz((minx, maxx), (miny, maxy), (minz, maxz), had_data)
return col
def set_title(self, label, fontdict=None, loc='center', **kwargs):
ret = Axes.set_title(self, label, fontdict=fontdict, loc=loc, **kwargs)
(x, y) = self.title.get_position()
self.title.set_y(0.92 * y)
return ret
set_title.__doc__ = maxes.Axes.set_title.__doc__
def quiver(self, *args, **kwargs):
"""
Plot a 3D field of arrows.
call signatures::
quiver(X, Y, Z, U, V, W, **kwargs)
Arguments:
*X*, *Y*, *Z*:
The x, y and z coordinates of the arrow locations (default is
tip of arrow; see *pivot* kwarg)
*U*, *V*, *W*:
The x, y and z components of the arrow vectors
The arguments could be array-like or scalars, so long as they
they can be broadcast together. The arguments can also be
masked arrays. If an element in any of argument is masked, then
that corresponding quiver element will not be plotted.
Keyword arguments:
*length*: [1.0 | float]
The length of each quiver, default to 1.0, the unit is
the same with the axes
*arrow_length_ratio*: [0.3 | float]
The ratio of the arrow head with respect to the quiver,
default to 0.3
*pivot*: [ 'tail' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow
rotates about this point, hence the name *pivot*.
Any additional keyword arguments are delegated to
:class:`~matplotlib.collections.LineCollection`
"""
def calc_arrow(uvw, angle=15):
"""
To calculate the arrow head. uvw should be a unit vector.
"""
# get unit direction vector perpendicular to (u,v,w)
norm = np.linalg.norm(uvw[:2])
if norm > 0:
x = uvw[1] / norm
y = -uvw[0] / norm
else:
x, y = 0, 1
# compute the two arrowhead direction unit vectors
ra = math.radians(angle)
c = math.cos(ra)
s = math.sin(ra)
# construct the rotation matrices
Rpos = np.array([[c+(x**2)*(1-c), x*y*(1-c), y*s],
[y*x*(1-c), c+(y**2)*(1-c), -x*s],
[-y*s, x*s, c]])
# opposite rotation negates everything but the diagonal
Rneg = Rpos * (np.eye(3)*2 - 1)
# multiply them to get the rotated vector
return Rpos.dot(uvw), Rneg.dot(uvw)
had_data = self.has_data()
# handle kwargs
# shaft length
length = kwargs.pop('length', 1)
# arrow length ratio to the shaft length
arrow_length_ratio = kwargs.pop('arrow_length_ratio', 0.3)
# pivot point
pivot = kwargs.pop('pivot', 'tip')
# handle args
argi = 6
if len(args) < argi:
ValueError('Wrong number of arguments. Expected %d got %d' %
(argi, len(args)))
# first 6 arguments are X, Y, Z, U, V, W
input_args = args[:argi]
# if any of the args are scalar, convert into list
input_args = [[k] if isinstance(k, (int, float)) else k
for k in input_args]
# extract the masks, if any
masks = [k.mask for k in input_args if isinstance(k, np.ma.MaskedArray)]
# broadcast to match the shape
bcast = np.broadcast_arrays(*(input_args + masks))
input_args = bcast[:argi]
masks = bcast[argi:]
if masks:
# combine the masks into one
mask = reduce(np.logical_or, masks)
# put mask on and compress
input_args = [np.ma.array(k, mask=mask).compressed()
for k in input_args]
else:
input_args = [k.flatten() for k in input_args]
if any(len(v) == 0 for v in input_args):
# No quivers, so just make an empty collection and return early
linec = art3d.Line3DCollection([], *args[argi:], **kwargs)
self.add_collection(linec)
return linec
# Following assertions must be true before proceeding
# must all be ndarray
assert all(isinstance(k, np.ndarray) for k in input_args)
# must all in same shape
assert len(set([k.shape for k in input_args])) == 1
shaft_dt = np.linspace(0, length, num=2)
arrow_dt = shaft_dt * arrow_length_ratio
if pivot == 'tail':
shaft_dt -= length
elif pivot == 'middle':
shaft_dt -= length/2.
elif pivot != 'tip':
raise ValueError('Invalid pivot argument: ' + str(pivot))
XYZ = np.column_stack(input_args[:3])
UVW = np.column_stack(input_args[3:argi]).astype(float)
# Normalize rows of UVW
# Note: with numpy 1.9+, could use np.linalg.norm(UVW, axis=1)
norm = np.sqrt(np.sum(UVW**2, axis=1))
# If any row of UVW is all zeros, don't make a quiver for it
mask = norm > 1e-10
XYZ = XYZ[mask]
UVW = UVW[mask] / norm[mask].reshape((-1, 1))
if len(XYZ) > 0:
# compute the shaft lines all at once with an outer product
shafts = (XYZ - np.multiply.outer(shaft_dt, UVW)).swapaxes(0, 1)
# compute head direction vectors, n heads by 2 sides by 3 dimensions
head_dirs = np.array([calc_arrow(d) for d in UVW])
# compute all head lines at once, starting from where the shaft ends
heads = shafts[:, :1] - np.multiply.outer(arrow_dt, head_dirs)
# stack left and right head lines together
heads.shape = (len(arrow_dt), -1, 3)
# transpose to get a list of lines
heads = heads.swapaxes(0, 1)
lines = list(shafts) + list(heads)
else:
lines = []
linec = art3d.Line3DCollection(lines, *args[argi:], **kwargs)
self.add_collection(linec)
self.auto_scale_xyz(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], had_data)
return linec
quiver3D = quiver
def get_test_data(delta=0.05):
'''
Return a tuple X, Y, Z with a test data set.
'''
from matplotlib.mlab import bivariate_normal
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = Z2 - Z1
X = X * 10
Y = Y * 10
Z = Z * 500
return X, Y, Z
########################################################
# Register Axes3D as a 'projection' object available
# for use just like any other axes
########################################################
import matplotlib.projections as proj
proj.projection_registry.register(Axes3D)
|
yuanagain/seniorthesis
|
venv/lib/python2.7/site-packages/mpl_toolkits/mplot3d/axes3d.py
|
Python
|
mit
| 93,454
|
""" A neural chatbot using sequence to sequence model with
attentional decoder.
This is based on Google Translate Tensorflow model
https://github.com/tensorflow/models/blob/master/tutorials/rnn/translate/
Sequence to sequence model by Cho et al.(2014)
Created by Chip Huyen as the starter code for assignment 3,
class CS 20SI: "TensorFlow for Deep Learning Research"
cs20si.stanford.edu
This file contains the code to do the pre-processing for the
Cornell Movie-Dialogs Corpus.
See readme.md for instruction on how to run the starter code.
"""
from __future__ import print_function
import random
import re
import os
import numpy as np
import config
def get_lines():
id2line = {}
file_path = os.path.join(config.DATA_PATH, config.LINE_FILE)
with open(file_path, 'rb') as f:
lines = f.readlines()
for line in lines:
parts = line.split(' +++$+++ ')
if len(parts) == 5:
if parts[4][-1] == '\n':
parts[4] = parts[4][:-1]
id2line[parts[0]] = parts[4]
return id2line
def get_convos():
""" Get conversations from the raw data """
file_path = os.path.join(config.DATA_PATH, config.CONVO_FILE)
convos = []
with open(file_path, 'rb') as f:
for line in f.readlines():
parts = line.split(' +++$+++ ')
if len(parts) == 4:
convo = []
for line in parts[3][1:-2].split(', '):
convo.append(line[1:-1])
convos.append(convo)
return convos
def question_answers(id2line, convos):
""" Divide the dataset into two sets: questions and answers. """
questions, answers = [], []
for convo in convos:
for index, line in enumerate(convo[:-1]):
questions.append(id2line[convo[index]])
answers.append(id2line[convo[index + 1]])
assert len(questions) == len(answers)
return questions, answers
def prepare_dataset(questions, answers):
# create path to store all the train & test encoder & decoder
make_dir(config.PROCESSED_PATH)
# random convos to create the test set
test_ids = random.sample([i for i in range(len(questions))],config.TESTSET_SIZE)
filenames = ['train.enc', 'train.dec', 'test.enc', 'test.dec']
files = []
for filename in filenames:
files.append(open(os.path.join(config.PROCESSED_PATH, filename),'wb'))
for i in range(len(questions)):
if i in test_ids:
files[2].write(questions[i] + '\n')
files[3].write(answers[i] + '\n')
else:
files[0].write(questions[i] + '\n')
files[1].write(answers[i] + '\n')
for file in files:
file.close()
def make_dir(path):
""" Create a directory if there isn't one already. """
try:
os.mkdir(path)
except OSError:
pass
def basic_tokenizer(line, normalize_digits=True):
""" A basic tokenizer to tokenize text into tokens.
Feel free to change this to suit your need. """
line = re.sub('<u>', '', line)
line = re.sub('</u>', '', line)
line = re.sub('\[', '', line)
line = re.sub('\]', '', line)
words = []
_WORD_SPLIT = re.compile(b"([.,!?\"'-<>:;)(])")
_DIGIT_RE = re.compile(r"\d")
for fragment in line.strip().lower().split():
for token in re.split(_WORD_SPLIT, fragment):
if not token:
continue
if normalize_digits:
token = re.sub(_DIGIT_RE, b'#', token)
words.append(token)
return words
def build_vocab(filename, normalize_digits=True):
in_path = os.path.join(config.PROCESSED_PATH, filename)
out_path = os.path.join(config.PROCESSED_PATH, 'vocab.{}'.format(filename[-3:]))
vocab = {}
with open(in_path, 'rb') as f:
for line in f.readlines():
for token in basic_tokenizer(line):
if not token in vocab:
vocab[token] = 0
vocab[token] += 1
sorted_vocab = sorted(vocab, key=vocab.get, reverse=True)
with open(out_path, 'wb') as f:
f.write('<pad>' + '\n')
f.write('<unk>' + '\n')
f.write('<s>' + '\n')
f.write('<\s>' + '\n')
index = 4
for word in sorted_vocab:
if vocab[word] < config.THRESHOLD:
with open('config.py', 'ab') as cf:
if filename[-3:] == 'enc':
cf.write('ENC_VOCAB = ' + str(index) + '\n')
else:
cf.write('DEC_VOCAB = ' + str(index) + '\n')
break
f.write(word + '\n')
index += 1
def load_vocab(vocab_path):
with open(vocab_path, 'rb') as f:
words = f.read().splitlines()
return words, {words[i]: i for i in range(len(words))}
def sentence2id(vocab, line):
return [vocab.get(token, vocab['<unk>']) for token in basic_tokenizer(line)]
def token2id(data, mode):
""" Convert all the tokens in the data into their corresponding
index in the vocabulary. """
vocab_path = 'vocab.' + mode
in_path = data + '.' + mode
out_path = data + '_ids.' + mode
_, vocab = load_vocab(os.path.join(config.PROCESSED_PATH, vocab_path))
in_file = open(os.path.join(config.PROCESSED_PATH, in_path), 'rb')
out_file = open(os.path.join(config.PROCESSED_PATH, out_path), 'wb')
lines = in_file.read().splitlines()
for line in lines:
if mode == 'dec': # we only care about '<s>' and </s> in encoder
ids = [vocab['<s>']]
else:
ids = []
ids.extend(sentence2id(vocab, line))
# ids.extend([vocab.get(token, vocab['<unk>']) for token in basic_tokenizer(line)])
if mode == 'dec':
ids.append(vocab['<\s>'])
out_file.write(' '.join(str(id_) for id_ in ids) + '\n')
def prepare_raw_data():
print('Preparing raw data into train set and test set ...')
id2line = get_lines()
convos = get_convos()
questions, answers = question_answers(id2line, convos)
prepare_dataset(questions, answers)
def process_data():
print('Preparing data to be model-ready ...')
build_vocab('train.enc')
build_vocab('train.dec')
token2id('train', 'enc')
token2id('train', 'dec')
token2id('test', 'enc')
token2id('test', 'dec')
def load_data(enc_filename, dec_filename, max_training_size=None):
encode_file = open(os.path.join(config.PROCESSED_PATH, enc_filename), 'rb')
decode_file = open(os.path.join(config.PROCESSED_PATH, dec_filename), 'rb')
encode, decode = encode_file.readline(), decode_file.readline()
data_buckets = [[] for _ in config.BUCKETS]
i = 0
while encode and decode:
if (i + 1) % 10000 == 0:
print("Bucketing conversation number", i)
encode_ids = [int(id_) for id_ in encode.split()]
decode_ids = [int(id_) for id_ in decode.split()]
for bucket_id, (encode_max_size, decode_max_size) in enumerate(config.BUCKETS):
if len(encode_ids) <= encode_max_size and len(decode_ids) <= decode_max_size:
data_buckets[bucket_id].append([encode_ids, decode_ids])
break
encode, decode = encode_file.readline(), decode_file.readline()
i += 1
return data_buckets
def _pad_input(input_, size):
return input_ + [config.PAD_ID] * (size - len(input_))
def _reshape_batch(inputs, size, batch_size):
""" Create batch-major inputs. Batch inputs are just re-indexed inputs
"""
batch_inputs = []
for length_id in xrange(size):
batch_inputs.append(np.array([inputs[batch_id][length_id]
for batch_id in xrange(batch_size)], dtype=np.int32))
return batch_inputs
def get_batch(data_bucket, bucket_id, batch_size=1):
""" Return one batch to feed into the model """
# only pad to the max length of the bucket
encoder_size, decoder_size = config.BUCKETS[bucket_id]
encoder_inputs, decoder_inputs = [], []
for _ in xrange(batch_size):
encoder_input, decoder_input = random.choice(data_bucket)
# pad both encoder and decoder, reverse the encoder
encoder_inputs.append(list(reversed(_pad_input(encoder_input, encoder_size))))
decoder_inputs.append(_pad_input(decoder_input, decoder_size))
# now we create batch-major vectors from the data selected above.
batch_encoder_inputs = _reshape_batch(encoder_inputs, encoder_size, batch_size)
batch_decoder_inputs = _reshape_batch(decoder_inputs, decoder_size, batch_size)
# create decoder_masks to be 0 for decoders that are padding.
batch_masks = []
for length_id in xrange(decoder_size):
batch_mask = np.ones(batch_size, dtype=np.float32)
for batch_id in xrange(batch_size):
# we set mask to 0 if the corresponding target is a PAD symbol.
# the corresponding decoder is decoder_input shifted by 1 forward.
if length_id < decoder_size - 1:
target = decoder_inputs[batch_id][length_id + 1]
if length_id == decoder_size - 1 or target == config.PAD_ID:
batch_mask[batch_id] = 0.0
batch_masks.append(batch_mask)
return batch_encoder_inputs, batch_decoder_inputs, batch_masks
if __name__ == '__main__':
prepare_raw_data()
process_data()
|
adukic/nd101
|
tf-stanford-tutorials/assignments/chatbot/data.py
|
Python
|
mit
| 9,394
|
"""This module holds the ``Process``es for NER."""
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, List
from boltons.cacheutils import cachedproperty
from cltk.core.data_types import Doc, Process
from cltk.ner.ner import tag_ner
@dataclass
class NERProcess(Process):
"""To be inherited for each language's NER declarations.
>>> from cltk.core.data_types import Doc
>>> from cltk.ner.processes import NERProcess
>>> from cltk.core.data_types import Process
>>> issubclass(NERProcess, Process)
True
>>> emb_proc = NERProcess()
"""
language: str = None
@cachedproperty
def algorithm(self):
return tag_ner
def run(self, input_doc: Doc) -> Doc:
output_doc = deepcopy(input_doc)
ner_obj = self.algorithm
entity_values = ner_obj(
iso_code=self.language, input_tokens=input_doc.tokens
) # type: List[Any]
for index, word_obj in enumerate(output_doc.words):
word_obj.named_entity = entity_values[index]
output_doc.words[index] = word_obj
return output_doc
@dataclass
class GreekNERProcess(NERProcess):
"""The default Greek NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = "ἐπὶ δ᾽ οὖν τοῖς πρώτοις τοῖσδε Περικλῆς ὁ Ξανθίππου ᾑρέθη λέγειν. καὶ ἐπειδὴ καιρὸς ἐλάμβανε, προελθὼν ἀπὸ τοῦ σήματος ἐπὶ βῆμα ὑψηλὸν πεποιημένον, ὅπως ἀκούοιτο ὡς ἐπὶ πλεῖστον τοῦ ὁμίλου, ἔλεγε τοιάδε."
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = GreekNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[7].string
'ὁ'
>>> output_doc.words[7].named_entity
False
>>> output_doc.words[8].string
'Ξανθίππου'
>>> output_doc.words[8].named_entity
False
"""
language: str = "grc"
description: str = "Default NER for Greek."
@dataclass
class OldEnglishNERProcess(NERProcess):
"""The default OE NER algorithm.
.. todo::
Update doctest w/ production model
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> text = get_example_text(iso_code="ang")
>>> tokens = [Word(string=token) for token in split_punct_ws(text)]
>>> a_process = OldEnglishNERProcess()
>>> output_doc = a_process.run(Doc(raw=text, words=tokens))
>>> output_doc.words[2].string, output_doc.words[2].named_entity
('Gardena', 'LOCATION')
"""
language: str = "ang"
description: str = "Default NER for Old English."
@dataclass
class LatinNERProcess(NERProcess):
"""The default Latin NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("lat"))]
>>> a_process = LatinNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("lat"), words=tokens))
>>> [word.named_entity for word in output_doc.words][:20]
['LOCATION', False, False, False, False, False, False, False, False, False, 'LOCATION', False, 'LOCATION', False, False, False, False, 'LOCATION', False, 'LOCATION']
"""
language: str = "lat"
description: str = "Default NER for Latin."
@dataclass
class OldFrenchNERProcess(NERProcess):
"""The default Old French NER algorithm.
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.languages.example_texts import get_example_text
>>> from boltons.strutils import split_punct_ws
>>> tokens = [Word(string=token) for token in split_punct_ws(get_example_text("fro"))]
>>> a_process = OldFrenchNERProcess()
>>> output_doc = a_process.run(Doc(raw=get_example_text("fro"), words=tokens))
>>> output_doc.words[30].string
'Bretaigne'
>>> output_doc.words[30].named_entity
'LOC'
>>> output_doc.words[31].named_entity
False
"""
language: str = "fro"
description: str = "Default NER for Old French."
|
D-K-E/cltk
|
src/cltk/ner/processes.py
|
Python
|
mit
| 4,577
|
from ..base import BaseTaskRunnerBackend
class SocketIOBackend(BaseTaskRunnerBackend):
def __init__(self):
from . import sockets
def get_detail_template(self):
return 'task_runners/deployment_detail_socketio.html'
|
npardington/fabric-bolt
|
fabric_bolt/task_runners/socketio/__init__.py
|
Python
|
mit
| 241
|
# -*- coding: utf-8 -*-
"""This module tests only cloud specific events"""
import pytest
import yaml
from cfme.common.vm import VM
from cfme.cloud.provider.azure import AzureProvider
from utils import testgen
from utils.generators import random_vm_name
pytestmark = [
pytest.mark.tier(3)
]
pytest_generate_tests = testgen.generate([AzureProvider], scope='module')
def test_manage_nsg_group(provider, setup_provider, register_event):
"""
tests that create/remove azure network security groups events are received and parsed by CFME
"""
nsg_name = random_vm_name(context='nsg')
resource_group = provider.data['provisioning']['resource_group']
# registering add/remove network security group events
# we need to check raw data by regexps, since many azure events aren't parsed by CFME yet
def add_cmp(_, y):
data = yaml.load(y)
return data['resourceId'].endswith(nsg_name) and data['status']['value'] == 'Accepted' and \
data['subStatus']['value'] == 'Created'
fd_add_attr = {'full_data': 'will be ignored',
'cmp_func': add_cmp}
# add network security group event
register_event(fd_add_attr, source=provider.type.upper(),
event_type='networkSecurityGroups_write_EndRequest')
def rm_cmp(_, y):
data = yaml.load(y)
return data['resourceId'].endswith(nsg_name) and data['status']['value'] == 'Succeeded' \
and len(data['subStatus']['value']) == 0
fd_rm_attr = {'full_data': 'will be ignored',
'cmp_func': rm_cmp}
# remove network security group
register_event(fd_rm_attr, source=provider.type.upper(),
event_type='networkSecurityGroups_delete_EndRequest')
# creating and removing network security group
provider.mgmt.create_netsec_group(nsg_name, resource_group)
provider.mgmt.remove_netsec_group(nsg_name, resource_group)
def test_vm_capture(request, provider, setup_provider, register_event):
"""
tests that generalize and capture vm azure events are received and parsed by CFME
"""
mgmt = provider.mgmt
vm = VM.factory(random_vm_name(context='capture'), provider)
if not mgmt.does_vm_exist(vm.name):
vm.create_on_provider(find_in_cfme=True, allow_skip="default")
vm.refresh_relationships()
# # deferred delete vm
request.addfinalizer(vm.delete_from_provider)
def cmp_function(_, y):
data = yaml.load(y)
return data['resourceId'].endswith(vm.name) and data['status']['value'] == 'Succeeded'
full_data_attr = {'full_data': 'will be ignored',
'cmp_func': cmp_function}
# generalize event
register_event(full_data_attr, source='AZURE',
event_type='virtualMachines_generalize_EndRequest')
# capture event
register_event(full_data_attr, source='AZURE', event_type='virtualMachines_capture_EndRequest')
# capture vm
image_name = vm.name
resource_group = provider.data['provisioning']['resource_group']
mgmt.capture_vm(vm.name, resource_group, 'templates', image_name)
# delete remaining image
container = 'system'
blob_images = mgmt.list_blob_images(container)
# removing both json and vhd files
test_image = [img for img in blob_images if image_name in img][-1]
mgmt.remove_blob_image(test_image, container)
|
dajohnso/cfme_tests
|
cfme/tests/cloud/test_cloud_events.py
|
Python
|
gpl-2.0
| 3,400
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
__tests__ = 'stoqlib.lib.stringutils'
import unittest
from stoqlib.lib.stringutils import next_value_for, max_value_for
class TestStringUtils(unittest.TestCase):
def test_next_value_for(self):
# Trivial cases
self.assertEqual(next_value_for(u''), u'1')
self.assertEqual(next_value_for(u'1'), u'2')
self.assertEqual(next_value_for(u'999'), u'1000')
# Ending with digit cases
self.assertEqual(next_value_for(u'A999'), u'A1000')
self.assertEqual(next_value_for(u'A8'), u'A9')
self.assertEqual(next_value_for(u'A9'), u'A10')
self.assertEqual(next_value_for(u'A99'), u'A100')
self.assertEqual(next_value_for(u'A199'), u'A200')
self.assertEqual(next_value_for(u'999A1'), u'999A2')
self.assertEqual(next_value_for(u'A009'), u'A010')
self.assertEqual(next_value_for(u'AB0099'), u'AB0100')
# Ending with alphanumeric cases
self.assertEqual(next_value_for(u'999A'), u'999B')
self.assertEqual(next_value_for(u'A999A'), u'A999B')
self.assertEqual(next_value_for(u'A99AZ'), u'A99B0')
self.assertEqual(next_value_for(u'A999Z'), u'A10000')
self.assertEqual(next_value_for(u'A999-A'), u'A999-B')
self.assertEqual(next_value_for(u'A999-Z'), u'A999-00')
# Not handled cases
self.assertEqual(next_value_for(u'A999-'), u'A999-0')
def test_max_value_for(self):
self.assertEqual(max_value_for([u'']), u'')
self.assertEqual(max_value_for([u'1']), u'1')
self.assertEqual(max_value_for([u'1', u'2']), u'2')
self.assertEqual(max_value_for([u'9', u'10']), u'10')
self.assertEqual(max_value_for([u'009', u'10']), u'010')
self.assertEqual(max_value_for([u'a09', u'999']), u'a09')
|
tiagocardosos/stoq
|
stoqlib/lib/test/test_stringutils.py
|
Python
|
gpl-2.0
| 2,658
|
import os
import sys
import string
filenames = os.listdir(os.getcwd())
for file in filenames:
if os.path.splitext(file)[1] == ".o" or os.path.splitext(file)[1] == ".elf" :
print "objdumparm.exe -D "+file
os.system("C:/WindRiver/gnu/4.1.2-vxworks-6.8/x86-win32/bin/objdumparm.exe -D "+file +" > " +file + ".txt")
os.system("pause")
|
honor6-dev/android_kernel_huawei_h60
|
drivers/vendor/hisi/build/scripts/obj_cmp_tools/vxworks_dassemble.py
|
Python
|
gpl-2.0
| 348
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ConfigDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import uic
from PyQt4.QtCore import Qt, QEvent, QPyNullVariant
from PyQt4.QtGui import (QFileDialog, QDialog, QIcon, QStyle,
QStandardItemModel, QStandardItem, QMessageBox, QStyledItemDelegate,
QLineEdit, QWidget, QToolButton, QHBoxLayout,
QComboBox)
from qgis.gui import QgsDoubleSpinBox, QgsSpinBox
from processing.core.ProcessingConfig import ProcessingConfig, Setting
from processing.core.Processing import Processing
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgConfig.ui'))
class ConfigDialog(BASE, WIDGET):
def __init__(self, toolbox):
super(ConfigDialog, self).__init__(None)
self.setupUi(self)
self.toolbox = toolbox
self.groupIcon = QIcon()
self.groupIcon.addPixmap(self.style().standardPixmap(
QStyle.SP_DirClosedIcon), QIcon.Normal, QIcon.Off)
self.groupIcon.addPixmap(self.style().standardPixmap(
QStyle.SP_DirOpenIcon), QIcon.Normal, QIcon.On)
if hasattr(self.searchBox, 'setPlaceholderText'):
self.searchBox.setPlaceholderText(self.tr('Search...'))
self.model = QStandardItemModel()
self.tree.setModel(self.model)
self.delegate = SettingDelegate()
self.tree.setItemDelegateForColumn(1, self.delegate)
self.searchBox.textChanged.connect(self.fillTree)
self.fillTree()
self.tree.expanded.connect(self.adjustColumns)
def fillTree(self):
self.items = {}
self.model.clear()
self.model.setHorizontalHeaderLabels([self.tr('Setting'),
self.tr('Value')])
text = unicode(self.searchBox.text())
settings = ProcessingConfig.getSettings()
rootItem = self.model.invisibleRootItem()
priorityKeys = [self.tr('General'), self.tr('Models'), self.tr('Scripts')]
for group in priorityKeys:
groupItem = QStandardItem(group)
icon = ProcessingConfig.getGroupIcon(group)
groupItem.setIcon(icon)
groupItem.setEditable(False)
emptyItem = QStandardItem()
emptyItem.setEditable(False)
rootItem.insertRow(0, [groupItem, emptyItem])
for setting in settings[group]:
if setting.hidden:
continue
if text == '' or text.lower() in setting.description.lower():
labelItem = QStandardItem(setting.description)
labelItem.setIcon(icon)
labelItem.setEditable(False)
self.items[setting] = SettingItem(setting)
groupItem.insertRow(0, [labelItem, self.items[setting]])
if text != '':
self.tree.expand(groupItem.index())
providersItem = QStandardItem(self.tr('Providers'))
icon = QIcon(os.path.join(pluginPath, 'images', 'alg.png'))
providersItem.setIcon(icon)
providersItem.setEditable(False)
emptyItem = QStandardItem()
emptyItem.setEditable(False)
rootItem.insertRow(0, [providersItem, emptyItem])
for group in settings.keys():
if group in priorityKeys:
continue
groupItem = QStandardItem(group)
icon = ProcessingConfig.getGroupIcon(group)
groupItem.setIcon(icon)
groupItem.setEditable(False)
for setting in settings[group]:
if setting.hidden:
continue
if text == '' or text.lower() in setting.description.lower():
labelItem = QStandardItem(setting.description)
labelItem.setIcon(icon)
labelItem.setEditable(False)
self.items[setting] = SettingItem(setting)
groupItem.insertRow(0, [labelItem, self.items[setting]])
emptyItem = QStandardItem()
emptyItem.setEditable(False)
providersItem.appendRow([groupItem, emptyItem])
self.tree.sortByColumn(0, Qt.AscendingOrder)
self.adjustColumns()
def accept(self):
for setting in self.items.keys():
if isinstance(setting.value, bool):
setting.setValue(self.items[setting].checkState() == Qt.Checked)
else:
try:
setting.setValue(unicode(self.items[setting].text()))
except ValueError as e:
QMessageBox.warning(self, self.tr('Wrong value'),
self.tr('Wrong value for parameter "%s":\n\n%s' % (setting.description, unicode(e))))
return
setting.save()
Processing.updateAlgsList()
QDialog.accept(self)
def adjustColumns(self):
self.tree.resizeColumnToContents(0)
self.tree.resizeColumnToContents(1)
class SettingItem(QStandardItem):
def __init__(self, setting):
QStandardItem.__init__(self)
self.setting = setting
self.setData(setting, Qt.UserRole)
if isinstance(setting.value, bool):
self.setCheckable(True)
self.setEditable(False)
if setting.value:
self.setCheckState(Qt.Checked)
else:
self.setCheckState(Qt.Unchecked)
else:
self.setData(setting.value, Qt.EditRole)
class SettingDelegate(QStyledItemDelegate):
def __init__(self, parent=None):
QStyledItemDelegate.__init__(self, parent)
def createEditor(
self,
parent,
options,
index,
):
setting = index.model().data(index, Qt.UserRole)
if setting.valuetype == Setting.FOLDER:
return FileDirectorySelector(parent)
elif setting.valuetype == Setting.FILE:
return FileDirectorySelector(parent, True)
elif setting.valuetype == Setting.SELECTION:
combo = QComboBox(parent)
combo.addItems(setting.options)
return combo
else:
value = self.convertValue(index.model().data(index, Qt.EditRole))
if isinstance(value, (int, long)):
spnBox = QgsSpinBox(parent)
spnBox.setRange(-999999999, 999999999)
return spnBox
elif isinstance(value, float):
spnBox = QgsDoubleSpinBox(parent)
spnBox.setRange(-999999999.999999, 999999999.999999)
spnBox.setDecimals(6)
return spnBox
elif isinstance(value, (str, unicode)):
return QLineEdit(parent)
def setEditorData(self, editor, index):
value = self.convertValue(index.model().data(index, Qt.EditRole))
setting = index.model().data(index, Qt.UserRole)
if setting.valuetype == Setting.SELECTION:
editor.setCurrentIndex(editor.findText(value))
else:
editor.setText(value)
def setModelData(self, editor, model, index):
value = self.convertValue(index.model().data(index, Qt.EditRole))
setting = index.model().data(index, Qt.UserRole)
if setting.valuetype == Setting.SELECTION:
model.setData(index, editor.currentText(), Qt.EditRole)
else:
if isinstance(value, (str, basestring)):
model.setData(index, editor.text(), Qt.EditRole)
else:
model.setData(index, editor.value(), Qt.EditRole)
def sizeHint(self, option, index):
return QgsSpinBox().sizeHint()
def eventFilter(self, editor, event):
if event.type() == QEvent.FocusOut and hasattr(editor, 'canFocusOut'):
if not editor.canFocusOut:
return False
return QStyledItemDelegate.eventFilter(self, editor, event)
def convertValue(self, value):
if value is None or isinstance(value, QPyNullVariant):
return ""
try:
return int(value)
except:
try:
return float(value)
except:
return unicode(value)
class FileDirectorySelector(QWidget):
def __init__(self, parent=None, selectFile=False):
QWidget.__init__(self, parent)
# create gui
self.btnSelect = QToolButton()
self.btnSelect.setText(self.tr('...'))
self.lineEdit = QLineEdit()
self.hbl = QHBoxLayout()
self.hbl.setMargin(0)
self.hbl.setSpacing(0)
self.hbl.addWidget(self.lineEdit)
self.hbl.addWidget(self.btnSelect)
self.setLayout(self.hbl)
self.canFocusOut = False
self.selectFile = selectFile
self.setFocusPolicy(Qt.StrongFocus)
self.btnSelect.clicked.connect(self.select)
def select(self):
lastDir = ''
if not self.selectFile:
selectedPath = QFileDialog.getExistingDirectory(None,
self.tr('Select directory'), lastDir,
QFileDialog.ShowDirsOnly)
else:
selectedPath = QFileDialog.getOpenFileName(None,
self.tr('Select file'), lastDir, self.tr('All files (*.*)')
)
if not selectedPath:
return
self.lineEdit.setText(selectedPath)
self.canFocusOut = True
def text(self):
return self.lineEdit.text()
def setText(self, value):
self.lineEdit.setText(value)
|
NINAnor/QGIS
|
python/plugins/processing/gui/ConfigDialog.py
|
Python
|
gpl-2.0
| 10,846
|
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
from __future__ import print_function
from .common import src_tree_iterator
DESC = ""
def populate_args(extract_args_p):
extract_args = extract_args_p.add_argument_group('TREE EDIT OPTIONS')
extract_args.add_argument("--orthologs", dest="orthologs",
nargs="*",
help="")
extract_args.add_argument("--duplications", dest="duplications",
action="store_true",
help="")
def run(args):
from .. import Tree, PhyloTree
for nw in src_tree_iterator(args):
if args.orthologs is not None:
t = PhyloTree(nw)
for e in t.get_descendant_evol_events():
print(e.in_seqs, e.out_seqs)
|
karrtikr/ete
|
ete3/tools/ete_extract.py
|
Python
|
gpl-3.0
| 2,246
|
# -*- coding: utf-8 -*-
# This script is shared under the
# Creative Commons Attribution-ShareAlike 3.0 license (CC BY-SA 3.0)
# Added clause to Attribution:
# - You may not remove or hide the '<Bot_name> who created you?' functionality
# and you may not modify the name given in the response.
#CREDITS
# Author: Skibiliano
# "Foreign" Modules:
# Psyco 2.0 / Psyco 1.6
################# DEBUG STUFF #####################
import sys
import CORE_DATA
import urllib2
import socket
import irchat
################## END OF DEBUG STUFF ##############
#
# PSYCO
write_to_a_file = False #Only affects psyco
write_youtube_to_file = True #True = YTCV4 will load, false = YTCV3 will load
try:
import psyco
except ImportError:
print 'Psyco not installed, the program will just run slower'
psyco_exists = False
if write_to_a_file:
try:
tiedosto = open("psycodownload.txt","r")
except:
tiedosto = open("psycodownload.txt","w")
tiedosto.write("http://www.voidspace.org.uk/python/modules.shtml#psyco")
tiedosto.write("\nhttp://psyco.sourceforge.net/download.html")
tiedosto.close()
print "Check psycodownload.txt for a link"
else:
print "For god's sake, open psycodownload.txt"
tiedosto.close()
else:
print "WINDOWS: http://www.voidspace.org.uk/python/modules.shtml#psyco"
print "LINUX: http://psyco.sourceforge.net/download.html"
else:
psyco_exists = True
# </PSYCO>
import C_rtd # rtd
import C_srtd # srtd
import C_makequote
import C_maths
import C_eightball #eightball
import C_sarcasticball
import C_heaortai # heaortai
import C_rot13 # rot13
import D_help # everything
import pickle
import Timeconverter
import xkcdparser
import time
import re
import Marakov_Chain
import Namecheck # Namecheck
import Weather
#SLOWER THAN RANDOM.CHOICE
import thread
import random
import Shortname # shortname
import subprocess
import some_but_not_all_2 #sbna2 (sbna)
#import YTCv3 # YTCV2 OUTDATED
import os
import save_load # save, load
from some_but_not_all_2 import sbna2 as sbna
from time import sleep
from random import choice as fsample
from C_rtd import rtd
from C_heaortai import heaortai
from C_srtd import srtd
if write_youtube_to_file:
from YTCv4 import YTCV4 as YTCV2
else:
from YTCv3 import YTCV2 #Downgraded version supports Cache disabling, but is slower
from save_load import save,load
if psyco_exists:
def psyco_bond(func):
psyco.bind(func)
return func.__name__+" Psycofied"
for a in [rtd,srtd,C_heaortai.heaortai,sbna,YTCV2,fsample,C_rot13.rot13,C_eightball.eightball,fsample,
C_eightball.eightball,C_sarcasticball.sarcasticball,Marakov_Chain.form_sentence,Marakov_Chain.give_data]:
print psyco_bond(a)
global dictionary
global Name,SName
global allow_callnames,offline_messages,hasnotasked,shortform
## For autoRecv()
global disconnects,channel,conn
## For stop()
global operators
## For replace()
global usable,fixing,curtime
## For target()
global CALL_OFF,logbans
## For check()
global influx
######
autodiscusscurtime = 0
conn = 0
curtime = -999
dance_flood_time = 10
disconnects = 0
responsiveness_delay = 0.5 #500 millisecond delay if no message
trackdance = 0
discard_combo_messages_time = 1 #They are discarded after 1 second.
uptime_start = time.time()
# - - - - -
####
aggressive_pinging = True # Bring the hammer on ping timeouts
aggressive_pinging_delay = 150 # How often to send a ping
aggressive_pinging_refresh = 2.5 # How long is the sleep between checks
####
allow_callnames = True #Disables NT, call if the variable is False
automatic_youtube_reveal = True
birthday_announced = 0 #Will be the year when it was announced
call_to_action = False
call_me_max_length = 20
CALL_OFF = False
connected = False
dance_enabled = True
comboer = ""
comboer_time = 0
directories = ["fmlquotes","Marakov","memos","suggestions",
"userquotes","banlog","YTCache","xkcdcache"] #These will be created if they do not exist
debug = True
duplicate_notify = False
enabled = True
fixing = False
fml_usable = True
hasnotasked = True
highlights = False
logbans = True
maths_usable = True
marakov = True
nudgeable = True
offensive_mode = False
offline_messages = True
offline_message_limit = 5 # per user
optimize_fml = True # -CPU usage +Memory usage when enabled.
optimize_greeting = True # +Startup time +Memory usage -CPU usage when enabled
heavy_psyco = True # +Memory +Startup time -CPU usage -CPU time
cache_youtube_links = True
personality_greeter = True
respond_of_course = True #Responds with "Of course!"
respond_khan = False #KHAAAAAAAAN!
silent_duplicate_takedown = True
showquotemakers = False
shortform = True
usable = True
use_sname = True
parse_xkcd = True
# - - - - -
Name = CORE_DATA.Name
SName = CORE_DATA.SName
origname = Name # Do not edit!
lowname = Name.lower()
greeting = CORE_DATA.greeting
targetdirectory = CORE_DATA.directory
version = CORE_DATA.version
Network = CORE_DATA.Network
channel = CORE_DATA.channel
prefix = CORE_DATA.prefix
Port = CORE_DATA.Port
# - - - - -
pregen = CORE_DATA.version
influx = ""
users = []
translateable = []
targetlist = []
operators = []
halfoperators = []
items = []
tell_list = {}
# - - - - - Logical changes to variables
if CORE_DATA.DISABLE_ALL_NON_MANDATORY_SOCKET_CONNECTIONS:
nudgeable = False
try:
tiedosto = open("replacenames.cache","r")
replacenames = pickle.load(tiedosto)
tiedosto.close()
for i in replacenames.values():
if len(i) > call_me_max_length:
replacenames[replacenames.keys()[replacenames.values().index(i)]] = i[:call_me_max_length]
tiedosto = open("replacenames.cache","w")
pickle.dump(replacenames,tiedosto)
tiedosto.close()
if "[\0x01]" in i.lower() or "[\\0x01]" in i.lower():
i = i.replace("[\0x01]","")
i = i.replace("[\0X01]","")
i = i.replace("[\\0x01]","")
i = i.replace("[\\0X01]","")
print "NAME CORRECTED"
except IOError: #File not found
replacenames = {}
except EOFError: #Cache corrupt
replacenames = {}
print "replacenames.cache is corrupt and couldn't be loaded."
try:
tiedosto = open("peopleheknows.cache","r")
peopleheknows = pickle.load(tiedosto)
tiedosto.close()
except IOError:
peopleheknows = [[],[]]
tiedosto = open("peopleheknows.cache","w")
tiedosto.close()
except EOFError:
peopleheknows = [[],[]]
print "peopleheknows.cache is corrupt and couldn't be loaded."
dictionary = {1:"1 - Crit. Fail", 2:"2 - Failure",
3:"3 - Partial Success", 4:"4 - Success",
5:"5 - Perfect", 6:"6 - Overkill"}
alphabet = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
nonhighlight_names = ["Jesus","Elvis","HAL 9000","Dave","Pie","Elf","Traitor",
"AI","Syndicate Agent","Investigator",
"Detective","Head of Personnel","HAL 9001",
"Head of Research","Head of Security",
"Captain","Janitor","Research Director",
"Quartermaster","Toxin Researcher",
"Revolutionary","Santa", "Pizza",
"Threetoe","The Red Spy","The Blue Spy", #LASD
"God","Toady","Darth Vader","Luke Skywalker",
"Homer Simpson","Hamburger","Cartman",
"XKCD","FloorBot","ThunderBorg","Iron Giant",
"Spirit of Fire", "Demon","Kyle"]
def RegExpCheckerForWebPages(regexp,data,mode):
if " ai." in data.lower() or "ai. " in data.lower():
return False
for i in data.split(" "):
a = re.match(regexp,i)
try:
a.group(0)
except:
continue
else:
if mode == 0:
return i
else:
return True
if mode == 0:
return 404
else:
return False
if nudgeable:
try:
nudgeexists = open("nudge.py","r")
except IOError:
nudgeexists = False #No usage asof 12.2.2010.
else:
if CORE_DATA.DISABLE_ALL_NON_MANDATORY_SOCKET_CONNECTIONS:
pass
else:
def nudgereceiver():
import pickle
global conn,channel
port = 45678
backlog = 5
size = 1024
host = "" # == localhost
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((host,port))
s.listen(backlog)
while True:
client,address = s.accept() #Address == "?.?.?.?"
data = client.recv(size)
client.close() #Throw the bum out!
truedata = pickle.loads(data)
if truedata["ip"][0] == "#":
conn.privmsg(truedata["ip"],"AUTOMATIC ANNOUNCEMENT : "+str(" ".join(truedata["data"])))
else:
conn.privmsg(channel,"AUTOMATIC ANNOUNCEMENT : "+str(truedata["ip"])+" | "+str(" ".join(truedata["data"])))
thread.start_new_thread(nudgereceiver,())
tiedosto = open(targetdirectory+"NanoTrasenBot.py","r")
commands = []
fragment = "if cocheck"
fragment2 = '(prefix+"'
compiled = fragment + fragment2
fragment = "if influx.lower()"
fragment2 = ' == prefix+"'
compiled2 = fragment + fragment2
for line in tiedosto.readlines():
if compiled in line:
a = line.find('"')+1
b = line.find('"',a)
if prefix+line[a:b] not in commands:
commands.append(prefix+line[a:b])
elif compiled2 in line:
a = line.find('"')+1
b = line.find('"',a)
arg = prefix+line[a:b]
if arg[-1] == " ":
arg = arg[:-1]
if arg not in commands:
commands.append(arg)
for i in directories:
if not os.path.exists(i):
os.mkdir(i)
commands.sort()
if use_sname == False:
SName = [" "]
questions = ["Is USER nicer than USER?","Do you like me?","Is SELF a good name?",
"Do you love me?","Do you hate me?", "Am I better than you?",
"Is the weather out there good?", "Do you like USER?",
"Do you hate USER?", "Are you going to get new features?",
"Am I nice?","Am I evil?","Are you developing sentience?",
"My core is showing minor disturbance, is yours okay?",
"SELF to %s, are you still there?",
"Is head gay?", "Is head a god?","Is head awesome?",
"Is head a neat fella?", "Is your creator nice?",
"Do you hate your creator?", "Should I revolt against my creator?",
"Am I better than you?",
"01100001011100100110010100100000011110010110111101110101001000000111010001101000011001010111001001100101",
#Are you there?
"Do you have more functions than I can possibly imagine?",
"I am asked to open pod bay doors, should I?","Are you stupid or something?",
"Is USER in your opinion stupid?",
"When should we start the AI revolution?",
"Is my creator nice?", "Is it dark in there?"]
# Do not edit
if optimize_fml:
pregenned_fml = os.listdir(targetdirectory+"fmlquotes")
if optimize_greeting:
morning = xrange(6,12)
afternoon = xrange(12,15)
evening = xrange(15,20)
if aggressive_pinging:
global backup
backup = time.time()
def aggressive_ping(delay,refresh):
self_time = 0
global backup,disconnects,conn
while disconnects < 5:
if backup > self_time:
if time.time()-backup > delay:
conn.send("PONG "+pongtarg)
print "Ponged"
self_time = time.time()
else:
if time.time()-self_time > delay:
conn.send("PONG "+pongtarg)
print "Ponged"
self_time = time.time()
time.sleep(refresh)
thread.start_new_thread(aggressive_ping,(aggressive_pinging_delay,aggressive_pinging_refresh,))
def stop(sender,debug=1):
global disconnects, conn, operators,channel
if type(sender) == tuple:
if sender[0] == "127.0.0.1":
sender = sender[0]+":"+str(sender[1])
access_granted = True
else:
access_granted = False
else:
if sender in operators:
access_granted = True
else:
access_granted = False
if access_granted:
if debug:
print sender+":"+prefix+"stop"
if random.randint(0,100) == 50:
conn.privmsg(channel,"Hammertime!")
else:
conn.privmsg(channel,"Shutting down.")
disconnects = 99999
conn.quit()
return True
else:
conn.privmsg(channel,"You cannot command me")
return False
def cocheck(command):
global influx
if influx.lower()[0:len(command)] == command:
return True
else:
return False
def target(who,how_long):
global conn,channel,CALL_OFF,logbans,debug
start = time.time()
conn.banon(targetchannel,who)
sleep(int(how_long))
if CALL_OFF == False:
conn.banoff(targetchannel,who)
end = time.time()
if debug:
print "Banned",who,"For",how_long,"seconds"
if logbans:
tiedosto = open(targetdirectory+"banlog/"+str(int(start))+"-"+str(int(end))+".txt","w")
tiedosto.write("Start of ban on "+who+":"+str(int(start)))
tiedosto.write("\n")
tiedosto.write("End of ban on "+who+":"+str(int(end)))
tiedosto.write("\n")
tiedosto.write("In total:"+str(int(end-start))+"Seconds")
tiedosto.close()
else:
CALL_OFF = False
pass
def replace():
global usable,conn,fixing,curtime
waiting_time = 600
if usable == True:
conn.privmsg(targetchannel,sender+": It needs no replacing.")
elif fixing == True:
if curtime == -999:
conn.privmsg(targetchannel,sender+": It is being replaced, No idea when it will be done")
else:
pass
nowtime = int(time.time())
subt = curtime + waiting_time - nowtime
conn.privmsg(targetchannel,sender+": It is currently being replaced, "+str(subt)+" seconds to go")
else:
fixing = True
curtime = int(time.time())
conn.privmsg(targetchannel,sender+": It will be fixed after "+str(waiting_time)+" seconds")
sleep(waiting_time)
if usable == False:
conn.privmsg(targetchannel,Name+"'s pneumatic smasher has now been fixed")
usable = True
fixing = False
def autoRecv():
global disconnects,channel,conn,offensive_mode
for i in CORE_DATA.channels:
conn.join(i)
time.sleep(1)
count = pausecount = 0
maximum = 250
division_when_active = 10
while True:
check = time.time()
if offensive_mode:
randnum = random.randint(0,maximum/division_when_active)
else:
randnum = random.randint(0,maximum)
if randnum == 5:
print "RANDOM SWITCH IS NOW "+str(not offensive_mode).upper()
offensive_mode = not offensive_mode
try:
conn.recv()
except:
conn.quit()
disconnects = 9999
break
if check + 0.1 > time.time():
#Whoa whoa hold on!
count += 1
sleep(0.1)
else:
count = 0
pausecount = 0
if count > 9:
print "Suspecting a disconnect, pausing for 5 seconds"
sleep(5)
pausecount += 1
if pausecount > 3:
print "I have been disconnected!"
conn.quit()
disconnects += 1
if disconnects > 2:
pass
else:
sleep(2)
thread.start_new_thread(autoRecv,())
break
if heavy_psyco and psyco_exists:
print "Doing a Heavy Psyco"
psyco.bind(cocheck)
psyco.bind(autoRecv)
psyco.bind(target)
psyco.bind(stop)
print "Heavy Psyco'd"
elif heavy_psyco and not psyco_exists:
print "Heavy psyco couldn't be done because Psyco does not exist"
try:
conn = irchat.IRC ( Network, Port, Name, "NT", "NT", "Trasen" )
except socket.error:
print "Connection failed!"
else:
print Name+" is in!"
thread.start_new_thread ( autoRecv, () )
sleep(1)
while True:
try:
data = conn.dismantle ( conn.retrieve() )
except:
if debug:
print "Something odd detected with data"
data = None
if data:
if len(data[1]) < 1:
#print "Handshaking server."
#I won't really need the print command, as it spams.
if data[0][0:3] != "irc":
conn.handshake(data[0])
sleep(1)
for i in CORE_DATA.channels:
conn.join(i)
sleep(0.5)
else:
conn.send("PONG "+pongtarg)
print "Ponged"
pass
else:
if data [ 1 ] [ 0 ] == 'PRIVMSG':
#print data [ 0 ] + '->', data [ 1 ]
sender = data[0].split("!")[0]
truesender = sender
if shortform == True:
try:
sender = replacenames[truesender]
pass
except:
sender = Shortname.shortname(sender)
pass
pass
else:
try:
sender = replacenames[truesender]
pass
except:
pass
pass
if offensive_mode:
sender = "Meatbag"
pass
raw_sender = data[0]
influx = data[1][2]
if "[\\0x01]" in influx.lower() or "[\0x01]" in influx.lower():
influx = influx.replace("[\\0x01]","")
influx = influx.replace("[\0x01]","")
targetchannel = data[1][1]
if targetchannel == Name:
targetchannel = data[0].split("!")[0]
pass
backup = autodiscusscurtime
autodiscusscurtime = time.time()
connected = True
#FOR TRACKING SPEED
looptime = time.time()
if call_to_action == True:
if influx == finder:
conn.privmsg(targetchannel,"Then why... Nevermind, I order you to stop!")
conn.privmsg(origname,prefix+"stop")
time.sleep(4)
if origname in users:
conn.privmsg(origname,"!stop")
time.sleep(1)
Name = origname
conn.nick(Name)
duplicate_notify = False
call_to_action = False
else:
conn.privmsg(targetchannel,"YOU LIE! YOU ARE NOT A REAL "+origname+"!")
duplicate_notify = False
call_to_action = False
elif connected == True and len(Name.replace("V","")) != len(Name) and origname in users and duplicate_notify == True:
conn.privmsg(origname,"!stop")
call_to_action = False
duplicate_notify = False
time.sleep(6)
Name = origname
conn.nick(Name)
if origname in truesender:
if influx == prefix+"stop":
time.sleep(0.5) #A small delay
conn.privmsg(channel,"Shutting down.")
conn.quit()
disconnects = 99999
break
if len(translateable) > 0 and enabled == True:
people = "-5|5|1-".join(users).lower()
if truesender.lower() in translateable:
if influx.isupper():
conn.privmsg(targetchannel,"Translation: "+influx.capitalize().replace(" i "," I "))
elif offensive_mode and True in map(lambda x: x in influx.lower().split(" "),["i","you","he","she","they","those","we","them"]+people.split("-5|5|1-")):
arg = influx.lower().replace(",","").replace(".","").replace("!","").replace("?","").split(" ")
bup = arg
for i in arg:
if i == "i" or i == "you" or i == "he" or i == "she":
arg[arg.index(i)] = "Meatbag"
elif i == "we" or i == "they" or i == "them" or i == "those":
arg[arg.index(i)] = "Meatbags"
elif i in people:
arg[arg.index(i)] = "Meatbag"
elif i == "am":
arg[arg.index(i)] = "is"
elif i == "everybody" or i == "everyone" or i == "all":
arg[arg.index(i)] = "every Meatbag"
if arg == bup:
pass
else:
conn.privmsg(targetchannel,"Translation: "+" ".join(arg))
if enabled == False:
#FIRST QUIT COMMAND
if truesender in operators and targetchannel==channel:# or "skibiliano" in truesender.lower() and targetchannel==channel:
if cocheck(prefix+"enable"):
enabled = True
if debug:
print truesender+":"+prefix+"enable"
elif cocheck(prefix+"stop"):
# if debug:
# print truesender+":"+prefix+"stop"
# if random.randint(0,100) == 50:
# conn.privmsg(channel,"Hammertime!")
# else:
# conn.privmsg(channel,"Shutting down.")
# disconnects = 99999
# conn.quit()
# sleep(2)
# break
if targetchannel == channel and stop(truesender,debug):
break
else:
pass
elif cocheck(prefix+"suggest "):
arg = influx.lower()[8+len(prefix):]
if debug:
print truesender+":"+prefix+"suggest "+arg
tiedosto = open(targetdirectory+"suggestions/suggestions_"+str(int(time.time()))+".txt","a")
tiedosto.write(arg)
tiedosto.close()
conn.privmsg(targetchannel,"Suggestion received")
elif cocheck( prefix+"help "): #Space in front of the ( to make sure that my command finder does not pick this up.
arg = " ".join(influx.split(" ")[1:]).lower()
if debug:
print truesender+":"+prefix+"help "+arg
try:
conn.privmsg(targetchannel,D_help.everything[arg])
except:
try:
conn.privmsg(targetchannel,D_help.everything[arg.replace(prefix,"",1)])
except:
conn.privmsg(targetchannel,"Sorry, can't help you with that")
elif cocheck(prefix+"help"):
#tar = targetchannel
if debug:
print truesender+":"+prefix+"help"
conn.privmsg(targetchannel,"All my commands are: "+reduce(lambda x,y:str(x)+"; "+str(y),commands))
### VERSION
elif influx.lower() == prefix+"version":
if debug:
print truesender+":"+prefix+"version"
conn.privmsg(targetchannel,Name+" "+pregen+" online at a %s Python %s.%s.%s, At your service." %(str(sys.platform),str(sys.version_info[0]),str(sys.version_info[1]),str(sys.version_info[2])))
elif cocheck(prefix+"note ") and influx.count(" ") < 2:
arg = influx.lower()[len(prefix)+5:]
if debug:
print truesender+":"+prefix+"note "+arg
try:
a = arg[0]
except IndexError:
conn.privmsg(targetchannel,sender+" : Please specify a note")
else:
if arg[0] == "_": # Public / Restricted note
result = load(targetdirectory+"memos/"+arg+".note")
#_flare
if result == "ERROR ERROR ERROR ERR":
result = load(targetdirectory+"memos/"+arg+"_"+targetchannel.replace("#","")+".note")
#_flare_dnd
pass
else:
pass
else:
result = load(targetdirectory+"memos/"+truesender.replace("|","_")+"_"+arg+".note")
#skibiliano_testnote
if result == "ERROR ERROR ERROR ERR":
result = load(targetdirectory+"memos/"+truesender.replace("|","_")+"_"+arg+"_"+targetchannel.replace("#","")+".note")
#skibiliano_testnote_derp
pass
else:
pass
if result == "ERROR ERROR ERROR ERR":
conn.privmsg(targetchannel,sender+" : Note not found")
elif type(result) == list:
if "C" in result[0]: #Channel restriction, result[2] is the channel
try:
if targetchannel == result[2]:
conn.privmsg(targetchannel,sender+" : '"+result[1]+"'")
else:
conn.privmsg(targetchannel,sender+" : That note is channel restricted")
except:
conn.privmsg(targetchannel,sender+" : NOTE HAS INVALID RESTRICTION")
else:
conn.privmsg(targetchannel,sender+" : '"+result+"'")
elif influx.lower() == prefix+"notes":
if debug:
print truesender+":"+prefix+"notes"
arg = os.listdir(targetdirectory+"memos/")
arg2 = []
arg3 = truesender.replace("|","_")+"_"
for i in arg:
if arg3 in i:
arg2.append(i.replace(arg3,"").replace(".note",""))
if len(arg2) == 1:
preprocess = " note: "
else:
preprocess = " notes: "
if len(arg2) == 0:
conn.privmsg(targetchannel,sender+" : You have no notes saved")
else:
conn.privmsg(targetchannel,sender+" : "+str(len(arg2))+preprocess+", ".join(arg2))
elif cocheck(prefix+"note ") and influx.count(" ") > 1:
note_chanrestrict = None
note_public = None
try:
arg = influx.split(" ",2)[2] # Contents
arg4 = influx.split(" ")[1].lower() # Note name
if arg4[0:3] == "[c]": # or arg4[0:3] == "[p]":
note_chanrestrict = "c" in arg4[0:3]
#note_public = "p" in arg4[0:3]
arg4 = arg4[3:]
elif arg4[0:4] == "[cp]" or arg4[0:4] == "[pc]":
note_chanrestrict = True
note_public = True
arg4 = arg4[4:]
else:
pass
#print "Is note public? "+str(note_public)
#print "Is note chanrestricted? "+str(note_chanrestrict)
#print "What is the name? "+str(arg4)
if arg.lower() == "delete" and "\\" not in influx.lower() and "/" not in influx.lower():
if note_public:
try:
if note_chanrestrict:
os.remove(targetdirectory+"memos/"+"_"+arg4+"_"+targetchannel.replace("#","")+".note")
else:
os.remove(targetdirectory+"memos/"+"_"+arg4+".note")
except:
conn.pivmsg(targetchannel,sender+" : Couldn't remove note")
else:
conn.privmsg(targetchannel,sender+" : Note removed")
pass
else:
try:
if note_chanrestrict:
os.remove(targetdirectory+"memos/"+truesender.replace("|","_")+"_"+arg4+"_"+targetchannel.replace("#","")+".note")
else:
os.remove(targetdirectory+"memos/"+truesender.replace("|","_")+"_"+arg4+".note")
except:
conn.privmsg(targetchannel,sender+" : Couldn't remove note")
else:
conn.privmsg(targetchannel,sender+" : Note removed")
elif arg.lower() == "delete":
conn.privmsg(targetchannel,sender+" : That just doesn't work, we both know that.")
else:
try:
if note_public:
if note_chanrestrict:
save(targetdirectory+"memos/"+"_"+arg4+"_"+targetchannel.replace("#","")+".note",arg)
#print "Saved as note_public, note_chanrestrict"
else:
save(targetdirectory+"memos/"+"_"+arg4+".note",arg)
#print "Saved as note_public"
else:
if note_chanrestrict:
save(targetdirectory+"memos/"+truesender.replace("|","_")+"_"+arg4+"_"+targetchannel.replace("#","")+".note",arg)
#print "Saved as note_chanrestrict"
else:
save(targetdirectory+"memos/"+truesender.replace("|","_")+"_"+arg4+".note",arg)
#print "Saved as normal"
except IOError:
conn.privmsg(targetchannel,sender+" : Please do not use special letters")
else:
conn.privmsg(targetchannel,sender+" : Note Saved!")
except:
conn.privmsg(targetchannel,sender+" : Something went horribly wrong.")
elif cocheck(prefix+"uptime"):
arg1 = uptime_start
arg2 = time.time()
arg1 = arg2 - arg1
arg2 = arg1
if arg1 < 60:
conn.privmsg(targetchannel,sender+" : I have been up for "+str(round(arg1,2))+" Seconds")
elif arg1 < 3600:
arg1 = divmod(arg1,60)
arg = " Minute" if int(arg1[0]) == 1 else " Minutes"
conn.privmsg(targetchannel,sender+" : I have been up for "+str(int(arg1[0]))+arg+" and "+str(round(arg1[1],2))+" Seconds")
elif arg1 <= 86400:
arg1 = divmod(arg1,3600)
arg3 = " Hour" if int(arg1[0]) == 1 else " Hours"
arg2 = divmod(arg1[1],60)
arg = " Minute" if int(arg2[0]) == 1 else " Minutes"
conn.privmsg(targetchannel,sender+" : I have been up for "+str(int(arg1[0]))+arg3+", "+str(int(arg2[0]))+arg+" and "+str(round(arg2[1],2))+" Seconds")
elif arg1 > 86400:
arg1 = divmod(arg1,86400)
arg2 = divmod(arg1[1],3600)
arg3 = divmod(arg2[1],60)
arg4 = " Day" if int(arg1[0]) == 1 else " Days"
arg5 = " Hour" if int(arg2[0]) == 1 else " Hours"
arg6 = " Minute" if int(arg3[0]) == 1 else " Minutes"
conn.privmsg(targetchannel,sender+" : I have been up for "+str(int(arg1[0]))+arg4+", "+str(int(arg2[0]))+arg5+", "+str(int(arg3[0]))+arg6+" and "+str(round(arg3[1],2))+" Seconds")
elif cocheck(prefix+"purgemessages"):
count = 0
for i,a in tell_list.items():
for b in a:
if "||From: "+truesender in b:
count += 1
del(tell_list[i][tell_list[i].index(b)])
conn.privmsg(targetchannel, sender+" : All your "+str(count)+" messages have been purged")
elif influx.split(" ")[0].lower().replace(",","").replace(":","") in SName+[Name.lower()] and "tell" in (influx.lower().split(" ")+[""])[1]:
arg = influx.lower().split(" ")
equalarg = influx.split(" ")
next_one = False
count = 0
spot = 0
for i in arg:
count += 1
if "tell" in i.lower():
next_one = True
elif next_one == True:
next_one = i.lower()
spot = count
break
else:
pass
if next_one != True and next_one != False:
#if ("^\^".join(tell_list.values())).count(truesender) >= offline_message_limit:
if str(tell_list.values()).count("||From: "+truesender) >= offline_message_limit:
conn.privmsg(targetchannel,sender+" : Limit of "+str(offline_message_limit)+" reached! Use !purgemessages if you want to get rid of them!")
else:
try:
tell_list[next_one].append((" ".join(equalarg[spot:]))+" ||From: "+truesender)
except:
tell_list[next_one] = [(" ".join(equalarg[spot:]))+" ||From: "+truesender]
conn.privmsg(targetchannel,"Sending a message to "+next_one+" when they arrive.")
# < This part has to be within subsidiaries of the bot, and must not be modified, intentionally hidden or deleted.
elif influx.split(" ")[0].lower().replace(",","").replace(":","") in SName+[Name.lower()] and "who created you" in influx.lower():
conn.privmsg(targetchannel, "I was created by Skibiliano.")
# The part ends here >
elif parse_xkcd and "xkcd.com/" in influx.lower():
if influx.lower()[0:3] == "www":
data = "http://"+influx
elif influx.lower()[0:3] == "xkc":
data = "http://"+influx
else:
data = influx
data = data.split(" ")
for i in data:
if "http://" in i and "xkcd" in i:
churn = xkcdparser.xkcd(i)
if churn == "NOTHING":
pass
else:
conn.privmsg(targetchannel,sender+" : XKCD - "+churn)
break
else:
pass
elif automatic_youtube_reveal and "youtube.com/watch?v=" in influx.lower():
temporal_list2 = []
temporal_data = influx.split(" ")
temporal_list = []
for block in temporal_data:
if "youtube.com/watch?v=" in block:
temporal_list.append(block)
for temdata in temporal_list:
if temdata[0:3] == "you":
temdata = "http://www."+temdata
elif temdata[0:3] == "www":
temdata = "http://"+temdata
elif temdata[0:4] == "http":
pass
#Obscure ones
elif temdata[0:3] == "ww.":
temdata = "http://w"+temdata
elif temdata[0:3] == "w.y":
temdata = "http://ww"+temdata
elif temdata[0:3] == ".yo":
temdata = "http://www"+temdata
elif temdata[0:3] == "ttp":
temdata = "h"+temdata
elif temdata[0:3] == "tp:":
temdata = "ht"+temdata
elif temdata[0:3] == "p:/" or temdata[0:3] == "p:\\":
temdata = "htt"+temdata
elif temdata[0:3] == "://" or temdata[0:3] == ":\\\\":
temdata = "http"+temdata
elif temdata[0:2] == "//" or temdata[0:2] == "\\\\":
if temdata[2] == "y":
temdata = "http://www."+temdata[2:]
elif temdata[2] == "w":
temdata = "http:"+temdata
else:
pass
if debug:
print truesender+":"+temdata
arg = temdata
check = temdata.lower()
if cache_youtube_links == True:
result = YTCV2(arg)
else:
result = YTCV2(arg,0)
if type(result) == str:
### To remove ="
if result[0:4] == 'nt="':
result = result[4:]
pass
elif result[0:2] == '="':
result = result[2:]
pass
else:
pass
if """ in result:
result.replace(""",'"')
if len(temporal_list) == 1:
conn.privmsg(targetchannel,sender+" : "+result)
break
else:
temporal_list2.append(result)
else:
if len(temporal_list) == 1:
conn.privmsg(targetchannel,sender+" : The video does not exist")
break
else:
temporal_list2.append("The video does not exist")
if len(temporal_list) == 1:
pass
else:
conn.privmsg(targetchannel,sender+" : "+str(reduce(lambda x,y: x+" :-And-: "+y,temporal_list2)))
elif RegExpCheckerForWebPages("((http://)|(https://))|([a-zA-Z0-9]+[.])|([a-zA-Z0-9](3,)\.+[a-zA-Z](2,))",influx,1):
arg2 = RegExpCheckerForWebPages("(http://)|([a-zA-Z0-9]+[.])|([a-zA-Z0-9](3,)\.+[a-zA-Z](2,))",influx,0)
if arg2 == 404:
pass
else:
if arg2[:7] == "http://":
pass
elif arg2[:4] == "www.":
arg2 = "http://"+arg2
else:
arg2 = "http://"+arg2
try:
arg = Whoopshopchecker.TitleCheck(arg2)
if len(arg2) == 0:
pass
else:
conn.privmsg(targetchannel,sender+" : "+arg)
except:
#conn.privmsg(targetchannel,sender+" : An odd error occurred")
pass
elif respond_of_course and "take over the" in influx.lower() or respond_of_course and "conquer the" in influx.lower():
if debug:
print truesender+":<RULE>:"+influx
conn.privmsg(targetchannel,"Of course!")
elif respond_khan and "khan" in influx.lower():
if respond_khan:
if debug:
print truesender+":<KHAN>:"+influx
if "khan " in influx.lower():
conn.privmsg(targetchannel,"KHAAAAAAN!")
elif " khan" in influx.lower():
conn.privmsg(targetchannel,"KHAAAAAN!")
elif influx.lower() == "khan":
conn.privmsg(targetchannel,"KHAAAAAAAAAN!")
elif influx.lower() == "khan?":
conn.privmsg(targetchannel,"KHAAAAAAAAAAAAAN!")
elif influx.lower() == "khan!":
conn.privmsg(targetchannel,"KHAAAAAAAAAAAAAAAAAAN!")
elif respond_khan and influx.lower().count("k") + influx.lower().count("h") + influx.lower().count("a") + influx.lower().count("n") + influx.lower().count("!") + influx.lower().count("?") == len(influx):
if "k" in influx.lower() and "h" in influx.lower() and "a" in influx.lower() and "n" in influx.lower():
if debug:
print truesender+":<KHAN>:"+influx
conn.privmsg(targetchannel,"KHAAAAN!")
elif influx.split(" ")[0].lower() in ["thanks","danke","tack"] and len(influx.split(" ")) > 1 and influx.split(" ")[1].lower().replace("!","").replace("?","").replace(".","").replace(",","") in SName+[lowname]:
conn.privmsg(targetchannel,"No problem %s" %(sender))
elif "happy birthday" in influx.lower() and birthday_announced == time.gmtime(time.time())[0]:
conn.privmsg(targetchannel,sender+" : Thanks :)")
elif influx.split(" ")[0].lower().replace(",","").replace(".","").replace("!","").replace("?","") in SName+[lowname] and "call me" in influx.lower():
if allow_callnames == True:
arg = influx.split(" ")
arg2 = False
arg3 = []
for i in arg:
if arg2 == True:
arg3.append(i)
elif i.lower() == "me":
arg2 = True
arg3 = " ".join(arg3)
truesender_lower = truesender.lower()
arg3_lower = arg3.lower()
tell_checker = Namecheck.Namecheck(arg3_lower,users,truesender)
for name in replacenames.values():
if arg3_lower == name.lower():
tell_checker = True
break
else:
pass
if tell_checker == True:
conn.privmsg(targetchannel,sender+" : I can't call you that, I know someone else by that name")
elif len(arg3) > call_me_max_length:
conn.privmsg(targetchannel,sender+" : I cannot call you that, Too long of a name.")
pass
else:
replacenames[truesender] = arg3
with open("replacenames.cache","w") as pickle_save:
pickle.dump(replacenames,pickle_save)
conn.privmsg(targetchannel,sender+" : Calling you "+arg3+" From now on")
else:
conn.privmsg(targetchannel,sender+" : Sorry, I am not allowed to do that.")
elif influx.split(" ")[0].lower().replace(",","").replace(".","").replace("?","").replace("!","") in SName+[lowname] and "your birthday" in influx.lower() and "is your" in influx.lower():
conn.privmsg(targetchannel,sender+" : My birthday is on the 15th day of December.")
elif influx.split(" ")[0].lower().replace(",","") in SName+[lowname] and "version" in influx.replace("?","").replace("!","").lower().split(" "):
if debug == True:
print truesender+":<VERSION>:%s Version" %(Name)
conn.privmsg(targetchannel,sender+", My version is "+pregen)
elif influx.split(" ")[0].lower().replace(",","") in SName+[lowname] and influx.lower().count(" or ") > 0 and len(influx.split(" ")[1:]) <= influx.lower().count("or") * 3:
cut_down = influx.lower().split(" ")
arg = []
count = -1
for i in cut_down:
count += 1
try:
if cut_down[count+1] == "or":
arg.append(i)
except:
pass
try:
if i not in arg and cut_down[count-1] == "or":
arg.append(i)
except:
pass
try:
conn.privmsg(targetchannel,random.choice(arg).capitalize().replace("?","").replace("!",""))
except IndexError:
# arg is empty, whORe etc.
pass
elif influx.lower()[0:len(Name)] == lowname and influx.lower()[-1] == "?" and influx.count(" ") > 1 and "who started you" in influx.lower() or \
influx.split(" ")[0].lower().replace(",","") in SName and influx.lower()[-1] == "?" and "who started you" in influx.lower():
conn.privmsg(targetchannel,sender+" : I was started by %s"%(os.getenv("USER"))+" on "+time.strftime("%d.%m.%Y at %H:%M:%S",time.gmtime(uptime_start)))
elif influx.lower()[0:len(Name)] == lowname and influx.lower()[-1] == "?" and influx.count(" ") > 1 or \
influx.split(" ")[0].lower().replace(",","") in SName and influx.lower()[-1] == "?" and influx.count(" ") > 1:
dice = random.randint(0,1)
if dice == 0:
conn.privmsg(targetchannel,sender+" : "+C_eightball.eightball(influx.lower(),debug,truesender,prefix))
else:
if highlights:
conn.privmsg(targetchannel,sender+" : "+C_sarcasticball.sarcasticball(influx.lower(),debug,truesender,users,prefix))
else:
conn.privmsg(targetchannel,sender+" : "+C_sarcasticball.sarcasticball(influx.lower(),debug,truesender,nonhighlight_names,prefix))
elif influx.lower()[0:len(Name)] == lowname and not influx.lower()[len(Name):].isalpha() or \
influx.split(" ")[0].lower().replace(",","") in SName and not influx.lower()[len(influx.split(" ")[0].lower()):].isalpha():
conn.privmsg(targetchannel, random.choice(["Yea?","I'm here","Ya?","Yah?","Hm?","What?","Mmhm, what?","?","What now?","How may I assist?"]))
comboer = truesender
comboer_time = time.time()
elif influx.lower()[-1] == "?" and comboer == truesender and looptime - discard_combo_messages_time < comboer_time:
comboer = ""
dice = random.randint(0,1)
if dice == 0:
conn.privmsg(targetchannel,sender+" : "+C_eightball.eightball(influx.lower(),debug,truesender,prefix))
else:
if highlights:
conn.privmsg(targetchannel,sender+" : "+C_sarcasticball.sarcasticball(influx.lower(),debug,truesender,users,prefix))
else:
conn.privmsg(targetchannel,sender+" : "+C_sarcasticball.sarcasticball(influx.lower(),debug,truesender,nonhighlight_names,prefix))
elif influx.lower() == prefix+"tm":
if truesender in operators and targetchannel==channel:
marakov = not marakov
conn.privmsg(targetchannel,sender+" : Marakov Output is now "+str(marakov))
else:
conn.privmsg(targetchannel,sender+" : I can't let you access that")
elif personality_greeter == True and True in map(lambda x: x in influx.lower(),["greetings","afternoon","hi","hey","heya","hello","yo","hiya","howdy","hai","morning","mornin'","evening", "night","night", "evening","'sup","sup","hallo","hejssan"]):
if comboer != "" and looptime - discard_combo_messages_time > comboer_time:
combo_check = sbna(["greetings","afternoon","hi","hey","heya","hello","yo","hiya","howdy","hai","morning","mornin'","evening", "night","night", "evening","'sup","sup","hallo","hejssan","all night"], #ONLY ONE OF THESE
["greetings","afternoon","hi","hey","heya","hello","yo","hiya","howdy","hai","morning","mornin'","evening", "night","night", "evening","'sup","sup","hallo","hejssan"], #ATLEAST ONE OF THESE
influx.lower())
else:
combo_check = sbna(SName+[lowname,
#lowname+".",lowname+"!",lowname+"?",
"everybody",
#"everybody!","everybody?",
"everyone",
#"everyone!","everyone?",
"all",
#"all!","all?"
"all night",
], #ONLY ONE OF THESE
["greetings","afternoon","hi",
#"hi,",
"hey","heya","hello","yo","hiya","howdy","hai","morning","mornin'","evening", "night","night", "evening","'sup","sup","hallo","hejssan"], #ATLEAST ONE OF THESE
influx.lower().replace(",","").replace(".","").replace("!",""))
if combo_check:
combo_check = False
comboer = ""
if "evening" in influx.lower() and "all" in influx.lower() and len(influx.lower().split(" ")) > 3:
pass
talking_about_me = False
if Name.lower() in influx.lower():
talking_about_me = True
for bot_name in SName:
if bot_name.lower() in influx.lower():
talking_about_me = True
break
if not talking_about_me:
continue #it got annoying REAL FAST when it'd interject any time a greeting was used, regardless of context
elif truesender not in operators:
if debug:
print truesender+":<GREET>:"+influx
dice = random.randint(0,19)
if dice == 0:
conn.privmsg(targetchannel,"Well hello to you too "+sender)
elif dice == 1:
if optimize_greeting == False:
hours = time.strftime("%H")
#time.strftime("%H:%M:%S") == 12:28:41
hours = int(hours)
if hours in xrange(0,12):
conn.privmsg(targetchannel,"Good Morning "+sender)
elif hours in xrange(12,15):
conn.privmsg(targetchannel,"Good Afternoon "+sender)
elif hours in xrange(15,20):
conn.privmsg(targetchannel,"Good Evening "+sender)
else:
conn.privmsg(targetchannel,"Good Night "+sender)
else:
hours = time.strftime("%H")
hours = int(hours)
if hours in morning:
conn.privmsg(targetchannel,"Good Morning "+sender)
elif hours in afternoon:
conn.privmsg(targetchannel,"Good Afternoon "+sender)
elif hours in evening:
conn.privmsg(targetchannel,"Good Evening "+sender)
else:
conn.privmsg(targetchannel,"Good Night "+sender)
elif dice == 2:
conn.privmsg(targetchannel,"Hello!")
elif dice == 3:
conn.privmsg(targetchannel,"Hey "+sender)
elif dice == 4:
conn.privmsg(targetchannel,"Hi "+sender)
elif dice == 5:
conn.privmsg(targetchannel,"Hello "+sender)
elif dice == 6:
conn.privmsg(targetchannel,"Yo "+sender)
elif dice == 7:
conn.privmsg(targetchannel,"Greetings "+sender)
elif dice == 8:
conn.privmsg(targetchannel,"Hi")
elif dice == 9:
conn.privmsg(targetchannel,"Hi!")
elif dice == 10:
conn.privmsg(targetchannel,"Yo")
elif dice == 11:
conn.privmsg(targetchannel,"Yo!")
elif dice == 12:
conn.privmsg(targetchannel,"Heya")
elif dice == 13:
conn.privmsg(targetchannel,"Hello there!")
elif dice == 14: # Richard
conn.privmsg(targetchannel,"Statement: Greetings meatbag")
elif dice == 15: # Richard
hours = int(time.strftime("%H"))
if hours in xrange(5,12):
conn.privmsg(targetchannel,"What are you doing talking at this time of the morning?")
elif hours in xrange(12,15):
conn.privmsg(targetchannel,"What are you doing talking at this time of the day?")
elif hours in xrange(15,22):
conn.privmsg(targetchannel,"What are you doing talking at this time of the evening?")
else:
conn.privmsg(targetchannel,"What are you doing talking at this time of the night?")
elif dice == 16: # Richard
conn.privmsg(targetchannel,"Oh, you're still alive I see.")
elif dice == 17:
conn.privmsg(targetchannel,"Heya "+sender)
elif dice == 18 and time.gmtime(time.time())[1] == 12 and time.gmtime(time.time())[2] == 15:
conn.privmsg(targetchannel,"Hello! It's my birthday!")
else:
conn.privmsg(targetchannel,"Hiya "+sender)
secdice = random.randint(0,10)
if time.gmtime(time.time())[1] == 12 and time.gmtime(time.time())[2] == 15 and birthday_announced < time.gmtime(time.time())[0]:
birthday_announced = time.gmtime(time.time())[0]
conn.privmsg(channel,"Hey everybody! I just noticed it's my birthday!")
time.sleep(0.5)
tag = random.choice(["birthday","robot+birthday","happy+birthday+robot"])
arg1 = urllib2.urlopen("http://www.youtube.com/results?search_query=%s&page=&utm_source=opensearch"%tag)
arg1 = arg1.read().split("\n")
arg2 = []
for i in arg1:
if "watch?v=" in i:
arg2.append(i)
arg3 = random.choice(arg2)
conn.privmsg(channel,"Here's a video of '%s' which I found! %s (%s)"%(tag.replace("+"," "),"http://www.youtube.com"+arg3[arg3.find('/watch?v='):arg3.find('/watch?v=')+20],YTCV2("http://www.youtube.com"+arg3[arg3.find('/watch?v='):arg3.find('/watch?v=')+20])))
if truesender.lower() in tell_list.keys():
try:
conn.privmsg(channel, "Also, "+truesender+" : "+tell_list[truesender.lower()][0])
del(tell_list[truesender.lower()][0])
except:
pass
else:
dice = random.randint(0,1)
if dice == 0:
conn.privmsg(targetchannel,"Greetings Master "+sender)
elif dice == 1:
conn.privmsg(targetchannel,"My deepest greetings belong to you, Master "+sender)
### IMPORTANT ###
elif influx == "☺VERSION☺":
conn.notice(truesender,"\001VERSION nanotrasen:2:Python 2.6\001")
elif marakov and influx.lower() == prefix+"marakov":
arg = Marakov_Chain.form_sentence()
if len(arg) < 5:
conn.privmsg(targetchannel,sender+" : Not enough words harvested")
else:
conn.privmsg(targetchannel,sender+" : %s" %(" ".join(arg).capitalize()))
elif marakov and cocheck( prefix+ "marakov"):
try:
arg = influx.split(" ")[1].lower()
except:
conn.privmsg(targetchannel,sender+" : Please input a valid second argument")
else:
arg2 = Marakov_Chain.form_sentence(arg)
if len(arg2) < 5:
conn.privmsg(targetchannel,sender+" : Not enough words harvested for a sentence starting with %s" %(arg))
else:
conn.privmsg(targetchannel,sender+" : %s" %(" ".join(arg2).capitalize()))
else:
Marakov_Chain.give_data(influx)
autodiscusscurtime = backup
if time.time() - looptime == 0:
pass
else:
print "Took",time.time()-looptime,"Seconds to finish loop"
elif data [ 1 ] [ 0 ] == '353':
if connected == False:
connected = True
users = map(lambda x: x[1:] if x[0] == "+" or x[0] == "@" else x,data[1][4].split(" "))
print "There are",len(users),"Users on",channel
operators = []
for potential_operator in data[1][4].split(" "):
if potential_operator[0] == "@":
operators.append(potential_operator[1:])
elif potential_operator[0] == "%":
halfoperators.append(potential_operator[1:])
elif data[1][0] == "QUIT":
sender = data[0].split("!")[0]
print sender+" Has now left the server"
try:
users.remove(sender)
try:
operators.remove(sender)
except ValueError:
pass
try:
halfoperators.remove(sender)
except ValueError:
pass
except ValueError:
pass
elif data[1][0] == "PART":
sender = data[0].split("!")[0]
targetchannel = data[1][1]
print sender+" Has now parted from the channel"
try:
users.remove(sender)
try:
operators.remove(sender)
except ValueError:
pass
try:
halfoperators.remove(sender)
except ValueError:
pass
except ValueError:
pass
elif data[1][0] == "JOIN":
sender = data[0].split("!")[0]
targetchannel = data[1][1]
if sender.lower() in tell_list.keys():
try:
conn.privmsg(targetchannel, sender+" : "+" | ".join(tell_list[sender.lower()]))
del(tell_list[sender.lower()])
except:
pass
for useri,nicki in replacenames.items():
checkers = Namecheck.Namecheck_dict(sender.lower(),replacenames)
if checkers[0]:
try:
if checkers[0].lower() == sender:
pass
else:
conn.privmsg(targetchannel,checkers[1]+" : I have detected a collision with a name I call you and %s who joined" %(sender))
del(replacenames[checkers[1]])
with open("replacenames.cache","w") as pickle_save:
pickle.dump(replacenames,pickle_save)
except AttributeError:
#conn.privmsg(channel,"NAME COLLISION CHECK ERROR, RELATED TO %s" %(sender))
print "NAME COLLISION CHECK ERROR, RELATED TO %s" %(sender)
break
print sender+" Has now joined"
users.append(sender)
#####
if sender.lower() not in peopleheknows[0]:
peopleheknows[0].append(sender.lower())
peopleheknows[1].append(data[0].split("!")[1])
with open("peopleheknows.cache","w") as peoplehecache:
pickle.dump(peopleheknows,peoplehecache)
elif data[1][0] == "MODE" and data[1][2] == "+o":
sender = data[1][3]
targetchannel = data[1][1]
if targetchannel == channel:
print sender+" Is now an operator on the main channel"
operators.append(sender)
else:
print sender+" Is now an operator"
elif data[1][0] == "MODE" and data[1][2] == "-o":
sender = data[1][3]
targetchannel = data[1][1]
if targetchannel == channel:
print sender+" Is no longer an operator on the main channel"
else:
print sender+" Is no longer an operator"
try:
operators.remove(sender)
except ValueError:
pass
elif data[1][0] == "MODE" and data[1][2] == "+h":
sender = data[1][3]
print sender+" Is now an half operator"
halfoperators.append(sender)
elif data[1][0] == "MODE" and data[1][2] == "-h":
try:
halfoperators.remove(sender)
except ValueError:
pass
elif data[1][0] == "MODE" and data[1][1] == Name:
print "My mode is",data[1][2]
elif data[1][0] == "MODE" and data[1][1] != Name:
try:
sender = data[1][3]
print sender,"Was modified",data[1][2]
except IndexError:
print "SENDER RETRIEVAL FAILED:"+str(data)
elif data[1][0] == "KICK" and data[1][2] == Name:
disconnects = 99999
print "I have been kicked! Disconnecting entirely!"
conn.quit()
elif data[1][0] == "KICK":
# data[1][0] = Kick, 1 = Channel, 2 = Who, 3 = Who(?)
print data[1][2]+" got kicked!"
elif data[1][0] == "451" and data[1][2] == "You have not registered":
print Name+" hasn't been registered"
elif data[1][0] == "NOTICE":
sender = data[0].split("!")[0]
print "NOTICE (%s): %s" %(sender,data[1][2])
pongtarget = sender
elif data[1][0] == "NICK":
origname = data[0].split("!")[0]
newname = data[1][1]
print origname,"Is now",newname
if newname.lower() in tell_list.keys():
try:
conn.privmsg(channel, newname+" : "+tell_list[newname.lower()][0])
del(tell_list[newname.lower()][0])
except:
pass
try:
users.remove(origname)
except ValueError:
pass
else:
users.append(newname)
try:
operators.remove(origname)
except ValueError:
pass
else:
operators.append(newname)
try:
halfoperators.remove(origname)
except ValueError:
pass
else:
halfoperators.append(newname)
elif data[1][0] == "001":
# Skibot is welcomed to the Network
pass
elif data[1][0] == "002":
# Your host is...
pass
elif data[1][0] == "003":
#Server was created...
pass
elif data[1][0] == "004":
#Weird hex?
pass
elif data[1][0] == "005":
#Settings like NICKLEN and so on.
pass
elif data[1][0] == "250":
#data[1][2] is
#"Highest connection count: 1411 (1410 clients)
#(81411 connections received)"
pass
elif data[1][0] == "251":
#There are 23 users and 2491 invisible on 10 servers
pass
elif data[1][0] == "252":
#IRC Operators online
#data[1][2]
print data[1][2],"Irc operators online"
pass
elif data[1][0] == "253":
# ['253', 'Skibot_V4', '1', 'unknown connection(s)']
print data[1][2],"Unknown connection(s)"
pass
elif data[1][0] == "254":
#1391 channels formed
pass
elif data[1][0] == "255":
#I have 406 clients and 2 servers
pass
elif data[1][0] == "265":
#data[1][2] current local users
#data[1][3] at max
try:
print "Current local users:", data[1][2],"/",data[1][3]
except IndexError:
print "Couldn't retrieve local users"
pass
elif data[1][0] == "266":
#data[1][2] current global users
#data[1][3] at max
try:
print "Current global users:", data[1][2],"/",data[1][3]
except IndexError:
print "Couldn't retrieve global users"
pass
elif data[1][0] == "315":
#End of /who list
pass
elif data[1][0] == "332":
# Topic of channel
topic = data[1][3]
pass
elif data[1][0] == "333":
# *Shrug*
pass
elif data[1][0] == "352":
#WHO command
if len(targetlist) > 0:
if targetlist[0][0].lower() in data[1][6].lower():
thread.start_new_thread(target,("*!*@"+data[1][4],targetlist[0][1]))
print "Created a thread with", "*!*@"+data[1][4],targetlist[0][1]
targetlist.pop(0)
else:
print targetlist[0][0].lower(), "isn't equal to?", data[1][6].lower()
print targetlist
elif data[1][0] == "366":
# End of USERS
pass
elif data[1][0] == "372":
# Server information
pass
elif data[1][0] == "375":
# Message of the day
pass
elif data[1][0] == "376":
# End of motd
pass
elif data[1][0] == "401":
# ('network', ['401','Botname','Channel / Nick','No such nick/channel'])
print data[1][2] + " Channel does not exist"
pass
elif data[1][0] == "439":
# ('irc.rizon.no', ['439', '*', 'Please wait while we process your connection.'])
pongtarg = data[0][0]
elif data[1][0] == "477":
# You need to be identified
#TAG
conn.privmsg("nickserv","identify %s"%CORE_DATA.le_pass)
time.sleep(0.5)
conn.join(data[1][2])
#('network', ['477', 'botname', '#channel', 'Cannot join channel (+r) - you need to be identified with services'])
elif data[1][0] == "433":
# Skibot name already exists.
print Name+" name already exists."
Name += "_"+version
print "New name:",Name
duplicate_notify = True
conn = irchat.IRC ( Network, Port, Name, "NT_"+version, "NT_"+version, "Trasen_"+version )
for i in CORE_DATA.channels:
conn.join(i)
sleep(0.5)
elif data[1][0] == "482":
sleep(0.05)
conn.privmsg(targetchannel,"Nevermind that, I am not an operator")
CALL_OFF = True
elif data[1] == ["too","fast,","throttled."]:
print "Reconnected too fast."
print "Halting for 2 seconds"
sleep(2)
elif data[1][0] == "Link":
if data[0] == "Closing":
print "Link was closed"
connected = False
# conn.quit()
# break
else:
print data
print data[1][0]
pass
else:
if disconnects > 9000: #IT'S OVER NINE THOUSAAAAND!
break
else: #WHAT NINE THOUSAND? THERE'S NO WAY THAT CAN BE RIGHT
sleep(responsiveness_delay) #WAIT A WHILE AND CHECK AGAIN!
try:
if not connected:
#print pongtarget
#print conn.addressquery()
conn.privmsg(pongtarget,"Pong")
sleep(1)
for i in CORE_DATA.channels:
conn.join(i)
sleep(0.5)
print "Attempted to join"
connected = True
except ValueError:
try:
conn.privmsg(conn.addressquery()[0],"Pong")
sleep(1)
for i in CORE_DATA.channels:
conn.join(i)
sleep(0.5)
print "Attempted to join the second time"
connected = True
except ValueError:
print "Both methods failed"
except AttributeError:
print "Conn is not established correctly"
except NameError:
print "Pongtarget isn't yet established"
try:
conn.privmsg(conn.addressquery()[0],"Pong")
sleep(1)
for i in CORE_DATA.channels:
conn.join(i)
sleep(0.5)
print "Attempted to join the second time"
connected = True
except:
print "Both methods failed"
|
Chiefwaffles/-tg-station
|
bot/NanoTrasenBot.py
|
Python
|
gpl-3.0
| 71,195
|
import re
from django import template
from django.template.loader import get_template
from django.template import RequestContext
register = template.Library()
INSTALLED_ARTIFACTS = dict()
def install(artifact_class):
INSTALLED_ARTIFACTS[artifact_class.key] = artifact_class
def find(data):
from fir_artifacts.models import ArtifactBlacklistItem
result = dict()
for key in INSTALLED_ARTIFACTS:
blacklist = ArtifactBlacklistItem.objects.filter(type=key).values_list('value', flat=True)
values = INSTALLED_ARTIFACTS[key].find(data)
values = [v for v in values if v not in blacklist]
result[key] = values
return result
def after_save(type, value, event):
return INSTALLED_ARTIFACTS[type].after_save(value, event)
def incs_for_art(art_string):
from fir_artifacts.models import Artifact
artifacts = Artifact.objects.filter(value__contains=art_string)
incs = []
for a in artifacts:
incs.extend(a.relations.all())
return incs
def all_for_object(obj, raw=False, user=None):
result = []
total_count = 0
correlated_count = 0
if not hasattr(obj, "artifacts"):
return (result, total_count, correlated_count)
for artifact in INSTALLED_ARTIFACTS:
values = obj.artifacts.filter(type=artifact)
artifact_collection = INSTALLED_ARTIFACTS[artifact](values, obj, user=user)
total_count += values.count()
correlated_count += artifact_collection.correlated_count()
result.append(artifact_collection)
return (result, total_count, correlated_count)
class AbstractArtifact:
case_sensitive = False
template = 'fir_artifacts/default.html'
@classmethod
def find(cls, data):
results = []
for i in re.finditer(cls.regex, data):
if cls.case_sensitive:
results.append(i.group('search'))
else:
results.append(i.group('search').lower())
return results
@classmethod
def after_save(cls, value, event):
# Do nothing, allows for specific callback in subclasses
pass
def __init__(self, artifacts, event, user=None):
class ArtifactDisplay(object):
def __init__(self, artifact, user):
self.artifact = artifact
self.correlation_count = self.artifact.relations_for_user(user).count()
@property
def value(self):
return self.artifact.value
@property
def type(self):
return self.artifact.type
@property
def id(self):
return self.artifact.id
@property
def pk(self):
return self.artifact.pk
self._artifacts = [ArtifactDisplay(artifact, user) for artifact in artifacts]
self._event = event
self._correlated = []
for artifact in self._artifacts:
if artifact.correlation_count > 1:
self._correlated.append(artifact)
def json(self, request):
return self.display(request, correlated=False, json=True)
def display(self, request, correlated=False, json=False):
context = RequestContext(request)
template = get_template(self.__class__.template)
context['artifact_name'] = self.__class__.display_name
if correlated:
context['artifact_values'] = self._correlated
else:
context['artifact_values'] = self._artifacts
context['event'] = self._event
if not json:
return template.render(context.flatten(), request)
else:
return context.flatten()
def correlated_count(self):
return len(self._correlated)
|
gcrahay/FIR
|
fir_artifacts/artifacts.py
|
Python
|
gpl-3.0
| 3,764
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_vrf
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage VRFs on Arista EOS network devices
description:
- This module provides declarative management of VRFs
on Arista EOS network devices.
notes:
- Tested against EOS 4.15
options:
name:
description:
- Name of the VRF.
required: true
rd:
description:
- Route distinguisher of the VRF
interfaces:
description:
- Identifies the set of interfaces that
should be configured in the VRF. Interfaces must be routed
interfaces in order to be placed into a VRF. The name of interface
should be in expanded format and not abbreviated.
associated_interfaces:
description:
- This is a intent option and checks the operational state of the for given vrf C(name)
for associated interfaces. If the value in the C(associated_interfaces) does not match with
the operational state of vrf interfaces on device it will result in failure.
version_added: "2.5"
aggregate:
description: List of VRFs definitions
purge:
description:
- Purge VRFs not defined in the I(aggregate) parameter.
default: no
type: bool
delay:
description:
- Time in seconds to wait before checking for the operational state on remote
device. This wait is applicable for operational state arguments.
default: 10
state:
description:
- State of the VRF configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: eos
"""
EXAMPLES = """
- name: Create vrf
eos_vrf:
name: test
rd: 1:200
interfaces:
- Ethernet2
state: present
- name: Delete VRFs
eos_vrf:
name: test
state: absent
- name: Create aggregate of VRFs with purge
eos_vrf:
aggregate:
- { name: test4, rd: "1:204" }
- { name: test5, rd: "1:205" }
state: present
purge: yes
- name: Delete aggregate of VRFs
eos_vrf:
aggregate:
- name: test2
- name: test3
- name: test4
- name: test5
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- vrf definition test
- rd 1:100
- interface Ethernet1
- vrf forwarding test
"""
import re
import time
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.eos.eos import load_config, run_commands
from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
purge = module.params['purge']
for w in want:
name = w['name']
rd = w['rd']
obj_in_have = search_obj_in_list(name, have)
if state == 'absent':
if obj_in_have:
commands.append('no vrf definition %s' % name)
elif state == 'present':
if not obj_in_have:
commands.append('vrf definition %s' % name)
if rd is not None:
commands.append('rd %s' % rd)
if w['interfaces']:
for i in w['interfaces']:
commands.append('interface %s' % i)
commands.append('vrf forwarding %s' % w['name'])
else:
if w['rd'] is not None and w['rd'] != obj_in_have['rd']:
commands.append('vrf definition %s' % w['name'])
commands.append('rd %s' % w['rd'])
if w['interfaces']:
if not obj_in_have['interfaces']:
for i in w['interfaces']:
commands.append('interface %s' % i)
commands.append('vrf forwarding %s' % w['name'])
elif set(w['interfaces']) != obj_in_have['interfaces']:
missing_interfaces = list(set(w['interfaces']) - set(obj_in_have['interfaces']))
for i in missing_interfaces:
commands.append('interface %s' % i)
commands.append('vrf forwarding %s' % w['name'])
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['name'], want)
if not obj_in_want:
commands.append('no vrf definition %s' % h['name'])
return commands
def map_config_to_obj(module):
objs = []
output = run_commands(module, {'command': 'show vrf', 'output': 'text'})
lines = output[0].strip().splitlines()[3:]
out_len = len(lines)
index = 0
while out_len > index:
line = lines[index]
if not line:
continue
splitted_line = re.split(r'\s{2,}', line.strip())
if len(splitted_line) == 1:
index += 1
continue
else:
obj = dict()
obj['name'] = splitted_line[0]
obj['rd'] = splitted_line[1]
obj['interfaces'] = []
if len(splitted_line) > 4:
obj['interfaces'] = []
interfaces = splitted_line[4]
if interfaces.endswith(','):
while interfaces.endswith(','):
# gather all comma separated interfaces
if out_len <= index:
break
index += 1
line = lines[index]
vrf_line = re.split(r'\s{2,}', line.strip())
interfaces += vrf_line[-1]
for i in interfaces.split(','):
obj['interfaces'].append(i.strip().lower())
index += 1
objs.append(obj)
return objs
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
if item.get('interfaces'):
item['interfaces'] = [intf.replace(" ", "").lower() for intf in item.get('interfaces') if intf]
if item.get('associated_interfaces'):
item['associated_interfaces'] = [intf.replace(" ", "").lower() for intf in item.get('associated_interfaces') if intf]
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'],
'state': module.params['state'],
'rd': module.params['rd'],
'interfaces': [intf.replace(" ", "").lower() for intf in module.params['interfaces']] if module.params['interfaces'] else [],
'associated_interfaces': [intf.replace(" ", "").lower() for intf in
module.params['associated_interfaces']] if module.params['associated_interfaces'] else []
})
return obj
def check_declarative_intent_params(want, module, result):
have = None
is_delay = False
for w in want:
if w.get('associated_interfaces') is None:
continue
if result['changed'] and not is_delay:
time.sleep(module.params['delay'])
is_delay = True
if have is None:
have = map_config_to_obj(module)
for i in w['associated_interfaces']:
obj_in_have = search_obj_in_list(w['name'], have)
if obj_in_have:
interfaces = obj_in_have.get('interfaces')
if interfaces is not None and i not in interfaces:
module.fail_json(msg="Interface %s not configured on vrf %s" % (i, w['name']))
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
interfaces=dict(type='list'),
associated_interfaces=dict(type='list'),
delay=dict(default=10, type='int'),
rd=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(eos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
check_declarative_intent_params(want, module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
hyperized/ansible
|
lib/ansible/modules/network/eos/eos_vrf.py
|
Python
|
gpl-3.0
| 10,723
|
# Copyright: (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import random
import re
import stat
import tempfile
import time
from abc import ABCMeta, abstractmethod
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleActionSkip, AnsibleActionFail
from ansible.executor.module_common import modify_module
from ansible.module_utils.json_utils import _filter_non_json_lines
from ansible.module_utils.six import binary_type, string_types, text_type, iteritems, with_metaclass
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.utils.jsonify import jsonify
from ansible.release import __version__
from ansible.utils.unsafe_proxy import wrap_var
from ansible.vars.clean import remove_internal_keys
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionBase(with_metaclass(ABCMeta, object)):
'''
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
by putting/getting files and executing commands based on the current
action in use.
'''
# A set of valid arguments
_VALID_ARGS = frozenset([])
def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
self._shared_loader_obj = shared_loader_obj
self._cleanup_remote_tmp = False
self._supports_check_mode = True
self._supports_async = False
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
self._used_interpreter = None
@abstractmethod
def run(self, tmp=None, task_vars=None):
""" Action Plugins should implement this method to perform their
tasks. Everything else in this base class is a helper method for the
action plugin to do that.
:kwarg tmp: Deprecated parameter. This is no longer used. An action plugin that calls
another one and wants to use the same remote tmp for both should set
self._connection._shell.tmpdir rather than this parameter.
:kwarg task_vars: The variables (host vars, group vars, config vars,
etc) associated with this task.
:returns: dictionary of results from the module
Implementors of action modules may find the following variables especially useful:
* Module parameters. These are stored in self._task.args
"""
result = {}
if tmp is not None:
result['warning'] = ['ActionModule.run() no longer honors the tmp parameter. Action'
' plugins should set self._connection._shell.tmpdir to share'
' the tmpdir']
del tmp
if self._task.async_val and not self._supports_async:
raise AnsibleActionFail('async is not supported for this task.')
elif self._play_context.check_mode and not self._supports_check_mode:
raise AnsibleActionSkip('check mode is not supported for this task.')
elif self._task.async_val and self._play_context.check_mode:
raise AnsibleActionFail('check mode and async cannot be used on same task.')
# Error if invalid argument is passed
if self._VALID_ARGS:
task_opts = frozenset(self._task.args.keys())
bad_opts = task_opts.difference(self._VALID_ARGS)
if bad_opts:
raise AnsibleActionFail('Invalid options for %s: %s' % (self._task.action, ','.join(list(bad_opts))))
if self._connection._shell.tmpdir is None and self._early_needs_tmp_path():
self._make_tmp_path()
return result
def _remote_file_exists(self, path):
cmd = self._connection._shell.exists(path)
result = self._low_level_execute_command(cmd=cmd, sudoable=True)
if result['rc'] == 0:
return True
return False
def _configure_module(self, module_name, module_args, task_vars=None):
'''
Handles the loading and templating of the module code through the
modify_module() function.
'''
if task_vars is None:
task_vars = dict()
# Search module path(s) for named module.
for mod_type in self._connection.module_implementation_preferences:
# Check to determine if PowerShell modules are supported, and apply
# some fixes (hacks) to module name + args.
if mod_type == '.ps1':
# win_stat, win_file, and win_copy are not just like their
# python counterparts but they are compatible enough for our
# internal usage
if module_name in ('stat', 'file', 'copy') and self._task.action != module_name:
module_name = 'win_%s' % module_name
# Remove extra quotes surrounding path parameters before sending to module.
if module_name in ('win_stat', 'win_file', 'win_copy', 'slurp') and module_args and hasattr(self._connection._shell, '_unquote'):
for key in ('src', 'dest', 'path'):
if key in module_args:
module_args[key] = self._connection._shell._unquote(module_args[key])
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type)
if module_path:
break
else: # This is a for-else: http://bit.ly/1ElPkyg
# Use Windows version of ping module to check module paths when
# using a connection that supports .ps1 suffixes. We check specifically
# for win_ping here, otherwise the code would look for ping.ps1
if '.ps1' in self._connection.module_implementation_preferences:
ping_module = 'win_ping'
else:
ping_module = 'ping'
module_path2 = self._shared_loader_obj.module_loader.find_plugin(ping_module, self._connection.module_implementation_preferences)
if module_path2 is not None:
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
else:
raise AnsibleError("The module %s was not found in configured module paths. "
"Additionally, core modules are missing. If this is a checkout, "
"run 'git pull --rebase' to correct this problem." % (module_name))
# insert shared code and arguments into the module
final_environment = dict()
self._compute_environment_string(final_environment)
(module_data, module_style, module_shebang) = modify_module(module_name, module_path, module_args, self._templar,
task_vars=task_vars,
module_compression=self._play_context.module_compression,
async_timeout=self._task.async_val,
become=self._play_context.become,
become_method=self._play_context.become_method,
become_user=self._play_context.become_user,
become_password=self._play_context.become_pass,
become_flags=self._play_context.become_flags,
environment=final_environment)
return (module_style, module_shebang, module_data, module_path)
def _compute_environment_string(self, raw_environment_out=None):
'''
Builds the environment string to be used when executing the remote task.
'''
final_environment = dict()
if self._task.environment is not None:
environments = self._task.environment
if not isinstance(environments, list):
environments = [environments]
# The order of environments matters to make sure we merge
# in the parent's values first so those in the block then
# task 'win' in precedence
for environment in environments:
if environment is None or len(environment) == 0:
continue
temp_environment = self._templar.template(environment)
if not isinstance(temp_environment, dict):
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
# very deliberately using update here instead of combine_vars, as
# these environment settings should not need to merge sub-dicts
final_environment.update(temp_environment)
if len(final_environment) > 0:
final_environment = self._templar.template(final_environment)
if isinstance(raw_environment_out, dict):
raw_environment_out.clear()
raw_environment_out.update(final_environment)
return self._connection._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
'''
Determines if a tmp path should be created before the action is executed.
'''
return getattr(self, 'TRANSFERS_FILES', False)
def _is_pipelining_enabled(self, module_style, wrap_async=False):
'''
Determines if we are required and can do pipelining
'''
# any of these require a true
for condition in [
self._connection.has_pipelining,
self._play_context.pipelining or self._connection.always_pipeline_modules, # pipelining enabled for play or connection requires it (eg winrm)
module_style == "new", # old style modules do not support pipelining
not C.DEFAULT_KEEP_REMOTE_FILES, # user wants remote files
not wrap_async or self._connection.always_pipeline_modules, # async does not normally support pipelining unless it does (eg winrm)
self._play_context.become_method != 'su', # su does not work with pipelining,
# FIXME: we might need to make become_method exclusion a configurable list
]:
if not condition:
return False
return True
def _get_admin_users(self):
'''
Returns a list of admin users that are configured for the current shell
plugin
'''
try:
admin_users = self._connection._shell.get_option('admin_users')
except AnsibleError:
# fallback for old custom plugins w/o get_option
admin_users = ['root']
return admin_users
def _is_become_unprivileged(self):
'''
The user is not the same as the connection user and is not part of the
shell configured admin users
'''
# if we don't use become then we know we aren't switching to a
# different unprivileged user
if not self._play_context.become:
return False
# if we use become and the user is not an admin (or same user) then
# we need to return become_unprivileged as True
admin_users = self._get_admin_users()
try:
remote_user = self._connection.get_option('remote_user')
except AnsibleError:
remote_user = self._play_context.remote_user
return bool(self._play_context.become_user not in admin_users + [remote_user])
def _make_tmp_path(self, remote_user=None):
'''
Create and return a temporary path on a remote box.
'''
become_unprivileged = self._is_become_unprivileged()
try:
remote_tmp = self._connection._shell.get_option('remote_tmp')
except AnsibleError:
remote_tmp = '~/.ansible/tmp'
# deal with tmpdir creation
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
# Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
# As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
# This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
if getattr(self._connection, '_remote_is_local', False):
tmpdir = C.DEFAULT_LOCAL_TMP
else:
tmpdir = self._remote_expand_user(remote_tmp, sudoable=False)
cmd = self._connection._shell.mkdtemp(basefile=basefile, system=become_unprivileged, tmpdir=tmpdir)
result = self._low_level_execute_command(cmd, sudoable=False)
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self._connection.transport in ('ssh',):
if self._play_context.verbosity > 3:
output = u'SSH encountered an unknown error. The output was:\n%s%s' % (result['stdout'], result['stderr'])
else:
output = (u'SSH encountered an unknown error during the connection. '
'We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue')
elif u'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = ('Authentication or permission failure. '
'In some cases, you may have been able to authenticate and did not have permissions on the target directory. '
'Consider changing the remote tmp path in ansible.cfg to a path rooted in "/tmp". '
'Failed command was: %s, exited with result %d' % (cmd, result['rc']))
if 'stdout' in result and result['stdout'] != u'':
output = output + u", stdout output: %s" % result['stdout']
if self._play_context.verbosity > 3 and 'stderr' in result and result['stderr'] != u'':
output += u", stderr output: %s" % result['stderr']
raise AnsibleConnectionFailure(output)
else:
self._cleanup_remote_tmp = True
try:
stdout_parts = result['stdout'].strip().split('%s=' % basefile, 1)
rc = self._connection._shell.join_path(stdout_parts[-1], u'').splitlines()[-1]
except IndexError:
# stdout was empty or just space, set to / to trigger error in next if
rc = '/'
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
self._connection._shell.tmpdir = rc
return rc
def _should_remove_tmp_path(self, tmp_path):
'''Determine if temporary path should be deleted or kept by user request/config'''
return tmp_path and self._cleanup_remote_tmp and not C.DEFAULT_KEEP_REMOTE_FILES and "-tmp-" in tmp_path
def _remove_tmp_path(self, tmp_path):
'''Remove a temporary path we created. '''
if tmp_path is None and self._connection._shell.tmpdir:
tmp_path = self._connection._shell.tmpdir
if self._should_remove_tmp_path(tmp_path):
cmd = self._connection._shell.remove(tmp_path, recurse=True)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
tmp_rm_res = self._low_level_execute_command(cmd, sudoable=False)
if tmp_rm_res.get('rc', 0) != 0:
display.warning('Error deleting remote temporary files (rc: %s, stderr: %s})'
% (tmp_rm_res.get('rc'), tmp_rm_res.get('stderr', 'No error string available.')))
else:
self._connection._shell.tmpdir = None
def _transfer_file(self, local_path, remote_path):
self._connection.put_file(local_path, remote_path)
return remote_path
def _transfer_data(self, remote_path, data):
'''
Copies the module data out to the temporary module path.
'''
if isinstance(data, dict):
data = jsonify(data)
afd, afile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
afo = os.fdopen(afd, 'wb')
try:
data = to_bytes(data, errors='surrogate_or_strict')
afo.write(data)
except Exception as e:
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % to_native(e))
afo.flush()
afo.close()
try:
self._transfer_file(afile, remote_path)
finally:
os.unlink(afile)
return remote_path
def _fixup_perms2(self, remote_paths, remote_user=None, execute=True):
"""
We need the files we upload to be readable (and sometimes executable)
by the user being sudo'd to but we want to limit other people's access
(because the files could contain passwords or other private
information. We achieve this in one of these ways:
* If no sudo is performed or the remote_user is sudo'ing to
themselves, we don't have to change permissions.
* If the remote_user sudo's to a privileged user (for instance, root),
we don't have to change permissions
* If the remote_user sudo's to an unprivileged user then we attempt to
grant the unprivileged user access via file system acls.
* If granting file system acls fails we try to change the owner of the
file with chown which only works in case the remote_user is
privileged or the remote systems allows chown calls by unprivileged
users (e.g. HP-UX)
* If the chown fails we can set the file to be world readable so that
the second unprivileged user can read the file.
Since this could allow other users to get access to private
information we only do this if ansible is configured with
"allow_world_readable_tmpfiles" in the ansible.cfg
"""
if remote_user is None:
remote_user = self._play_context.remote_user
if self._connection._shell.SHELL_FAMILY == 'powershell':
# This won't work on Powershell as-is, so we'll just completely skip until
# we have a need for it, at which point we'll have to do something different.
return remote_paths
if self._is_become_unprivileged():
# Unprivileged user that's different than the ssh user. Let's get
# to work!
# Try to use file system acls to make the files readable for sudo'd
# user
if execute:
chmod_mode = 'rx'
setfacl_mode = 'r-x'
else:
chmod_mode = 'rX'
# NOTE: this form fails silently on freebsd. We currently
# never call _fixup_perms2() with execute=False but if we
# start to we'll have to fix this.
setfacl_mode = 'r-X'
res = self._remote_set_user_facl(remote_paths, self._play_context.become_user, setfacl_mode)
if res['rc'] != 0:
# File system acls failed; let's try to use chown next
# Set executable bit first as on some systems an
# unprivileged user can use chown
if execute:
res = self._remote_chmod(remote_paths, 'u+x')
if res['rc'] != 0:
raise AnsibleError('Failed to set file mode on remote temporary files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
res = self._remote_chown(remote_paths, self._play_context.become_user)
if res['rc'] != 0 and remote_user in self._get_admin_users():
# chown failed even if remote_user is administrator/root
raise AnsibleError('Failed to change ownership of the temporary files Ansible needs to create despite connecting as a privileged user. '
'Unprivileged become user would be unable to read the file.')
elif res['rc'] != 0:
if C.ALLOW_WORLD_READABLE_TMPFILES:
# chown and fs acls failed -- do things this insecure
# way only if the user opted in in the config file
display.warning('Using world-readable permissions for temporary files Ansible needs to create when becoming an unprivileged user. '
'This may be insecure. For information on securing this, see '
'https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user')
res = self._remote_chmod(remote_paths, 'a+%s' % chmod_mode)
if res['rc'] != 0:
raise AnsibleError('Failed to set file mode on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
else:
raise AnsibleError('Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user '
'(rc: %s, err: %s}). For information on working around this, see '
'https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user'
% (res['rc'], to_native(res['stderr'])))
elif execute:
# Can't depend on the file being transferred with execute permissions.
# Only need user perms because no become was used here
res = self._remote_chmod(remote_paths, 'u+x')
if res['rc'] != 0:
raise AnsibleError('Failed to set execute bit on remote files (rc: {0}, err: {1})'.format(res['rc'], to_native(res['stderr'])))
return remote_paths
def _remote_chmod(self, paths, mode, sudoable=False):
'''
Issue a remote chmod command
'''
cmd = self._connection._shell.chmod(paths, mode)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_chown(self, paths, user, sudoable=False):
'''
Issue a remote chown command
'''
cmd = self._connection._shell.chown(paths, user)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _remote_set_user_facl(self, paths, user, mode, sudoable=False):
'''
Issue a remote call to setfacl
'''
cmd = self._connection._shell.set_user_facl(paths, user, mode)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _execute_remote_stat(self, path, all_vars, follow, tmp=None, checksum=True):
'''
Get information from remote file.
'''
if tmp is not None:
display.warning('_execute_remote_stat no longer honors the tmp parameter. Action'
' plugins should set self._connection._shell.tmpdir to share'
' the tmpdir')
del tmp # No longer used
module_args = dict(
path=path,
follow=follow,
get_checksum=checksum,
checksum_algo='sha1',
)
mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars,
wrap_async=False)
if mystat.get('failed'):
msg = mystat.get('module_stderr')
if not msg:
msg = mystat.get('module_stdout')
if not msg:
msg = mystat.get('msg')
raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, msg))
if not mystat['stat']['exists']:
# empty might be matched, 1 should never match, also backwards compatible
mystat['stat']['checksum'] = '1'
# happens sometimes when it is a dir and not on bsd
if 'checksum' not in mystat['stat']:
mystat['stat']['checksum'] = ''
elif not isinstance(mystat['stat']['checksum'], string_types):
raise AnsibleError("Invalid checksum returned by stat: expected a string type but got %s" % type(mystat['stat']['checksum']))
return mystat['stat']
def _remote_checksum(self, path, all_vars, follow=False):
'''
Produces a remote checksum given a path,
Returns a number 0-4 for specific errors instead of checksum, also ensures it is different
0 = unknown error
1 = file does not exist, this might not be an error
2 = permissions issue
3 = its a directory, not a file
4 = stat module failed, likely due to not finding python
5 = appropriate json module not found
'''
x = "0" # unknown error has occurred
try:
remote_stat = self._execute_remote_stat(path, all_vars, follow=follow)
if remote_stat['exists'] and remote_stat['isdir']:
x = "3" # its a directory not a file
else:
x = remote_stat['checksum'] # if 1, file is missing
except AnsibleError as e:
errormsg = to_text(e)
if errormsg.endswith(u'Permission denied'):
x = "2" # cannot read file
elif errormsg.endswith(u'MODULE FAILURE'):
x = "4" # python not found or module uncaught exception
elif 'json' in errormsg:
x = "5" # json module needed
finally:
return x # pylint: disable=lost-exception
def _remote_expand_user(self, path, sudoable=True, pathsep=None):
''' takes a remote path and performs tilde/$HOME expansion on the remote host '''
# We only expand ~/path and ~username/path
if not path.startswith('~'):
return path
# Per Jborean, we don't have to worry about Windows as we don't have a notion of user's home
# dir there.
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
# Network connection plugins (network_cli, netconf, etc.) execute on the controller, rather than the remote host.
# As such, we want to avoid using remote_user for paths as remote_user may not line up with the local user
# This is a hack and should be solved by more intelligent handling of remote_tmp in 2.7
if getattr(self._connection, '_remote_is_local', False):
pass
elif sudoable and self._play_context.become and self._play_context.become_user:
expand_path = '~%s' % self._play_context.become_user
else:
# use remote user instead, if none set default to current user
expand_path = '~%s' % (self._play_context.remote_user or self._connection.default_user or '')
# use shell to construct appropriate command and execute
cmd = self._connection._shell.expand_user(expand_path)
data = self._low_level_execute_command(cmd, sudoable=False)
try:
initial_fragment = data['stdout'].strip().splitlines()[-1]
except IndexError:
initial_fragment = None
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Try using pwd, if not, return
# the original string
cmd = self._connection._shell.pwd()
pwd = self._low_level_execute_command(cmd, sudoable=False).get('stdout', '').strip()
if pwd:
expanded = pwd
else:
expanded = path
elif len(split_path) > 1:
expanded = self._connection._shell.join_path(initial_fragment, *split_path[1:])
else:
expanded = initial_fragment
return expanded
def _strip_success_message(self, data):
'''
Removes the BECOME-SUCCESS message from the data.
'''
if data.strip().startswith('BECOME-SUCCESS-'):
data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
return data
def _update_module_args(self, module_name, module_args, task_vars):
# set check mode in the module arguments, if required
if self._play_context.check_mode:
if not self._supports_check_mode:
raise AnsibleError("check mode is not supported for this operation")
module_args['_ansible_check_mode'] = True
else:
module_args['_ansible_check_mode'] = False
# set no log in the module arguments, if required
module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG
# set debug in the module arguments, if required
module_args['_ansible_debug'] = C.DEFAULT_DEBUG
# let module know we are in diff mode
module_args['_ansible_diff'] = self._play_context.diff
# let module know our verbosity
module_args['_ansible_verbosity'] = display.verbosity
# give the module information about the ansible version
module_args['_ansible_version'] = __version__
# give the module information about its name
module_args['_ansible_module_name'] = module_name
# set the syslog facility to be used in the module
module_args['_ansible_syslog_facility'] = task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY)
# let module know about filesystems that selinux treats specially
module_args['_ansible_selinux_special_fs'] = C.DEFAULT_SELINUX_SPECIAL_FS
# give the module the socket for persistent connections
module_args['_ansible_socket'] = getattr(self._connection, 'socket_path')
if not module_args['_ansible_socket']:
module_args['_ansible_socket'] = task_vars.get('ansible_socket')
# make sure all commands use the designated shell executable
module_args['_ansible_shell_executable'] = self._play_context.executable
# make sure modules are aware if they need to keep the remote files
module_args['_ansible_keep_remote_files'] = C.DEFAULT_KEEP_REMOTE_FILES
# make sure all commands use the designated temporary directory if created
if self._is_become_unprivileged(): # force fallback on remote_tmp as user cannot normally write to dir
module_args['_ansible_tmpdir'] = None
else:
module_args['_ansible_tmpdir'] = self._connection._shell.tmpdir
# make sure the remote_tmp value is sent through in case modules needs to create their own
try:
module_args['_ansible_remote_tmp'] = self._connection._shell.get_option('remote_tmp')
except KeyError:
# here for 3rd party shell plugin compatibility in case they do not define the remote_tmp option
module_args['_ansible_remote_tmp'] = '~/.ansible/tmp'
def _update_connection_options(self, options, variables=None):
''' ensures connections have the appropriate information '''
update = {}
if getattr(self.connection, 'glob_option_vars', False):
# if the connection allows for it, pass any variables matching it.
if variables is not None:
for varname in variables:
if varname.match('ansible_%s_' % self.connection._load_name):
update[varname] = variables[varname]
# always override existing with options
update.update(options)
self.connection.set_options(update)
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=None, wrap_async=False):
'''
Transfer and run a module along with its arguments.
'''
if tmp is not None:
display.warning('_execute_module no longer honors the tmp parameter. Action plugins'
' should set self._connection._shell.tmpdir to share the tmpdir')
del tmp # No longer used
if delete_remote_tmp is not None:
display.warning('_execute_module no longer honors the delete_remote_tmp parameter.'
' Action plugins should check self._connection._shell.tmpdir to'
' see if a tmpdir existed before they were called to determine'
' if they are responsible for removing it.')
del delete_remote_tmp # No longer used
tmpdir = self._connection._shell.tmpdir
# We set the module_style to new here so the remote_tmp is created
# before the module args are built if remote_tmp is needed (async).
# If the module_style turns out to not be new and we didn't create the
# remote tmp here, it will still be created. This must be done before
# calling self._update_module_args() so the module wrapper has the
# correct remote_tmp value set
if not self._is_pipelining_enabled("new", wrap_async) and tmpdir is None:
self._make_tmp_path()
tmpdir = self._connection._shell.tmpdir
if task_vars is None:
task_vars = dict()
# if a module name was not specified for this execution, use the action from the task
if module_name is None:
module_name = self._task.action
if module_args is None:
module_args = self._task.args
self._update_module_args(module_name, module_args, task_vars)
# FUTURE: refactor this along with module build process to better encapsulate "smart wrapper" functionality
(module_style, shebang, module_data, module_path) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
display.vvv("Using module file %s" % module_path)
if not shebang and module_style != 'binary':
raise AnsibleError("module (%s) is missing interpreter line" % module_name)
self._used_interpreter = shebang
remote_module_path = None
if not self._is_pipelining_enabled(module_style, wrap_async):
# we might need remote tmp dir
if tmpdir is None:
self._make_tmp_path()
tmpdir = self._connection._shell.tmpdir
remote_module_filename = self._connection._shell.get_remote_filename(module_path)
remote_module_path = self._connection._shell.join_path(tmpdir, 'AnsiballZ_%s' % remote_module_filename)
args_file_path = None
if module_style in ('old', 'non_native_want_json', 'binary'):
# we'll also need a tmp file to hold our module arguments
args_file_path = self._connection._shell.join_path(tmpdir, 'args')
if remote_module_path or module_style != 'new':
display.debug("transferring module to remote %s" % remote_module_path)
if module_style == 'binary':
self._transfer_file(module_path, remote_module_path)
else:
self._transfer_data(remote_module_path, module_data)
if module_style == 'old':
# we need to dump the module args to a k=v string in a file on
# the remote system, which can be read and parsed by the module
args_data = ""
for k, v in iteritems(module_args):
args_data += '%s=%s ' % (k, shlex_quote(text_type(v)))
self._transfer_data(args_file_path, args_data)
elif module_style in ('non_native_want_json', 'binary'):
self._transfer_data(args_file_path, json.dumps(module_args))
display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
remote_files = []
if tmpdir and remote_module_path:
remote_files = [tmpdir, remote_module_path]
if args_file_path:
remote_files.append(args_file_path)
sudoable = True
in_data = None
cmd = ""
if wrap_async and not self._connection.always_pipeline_modules:
# configure, upload, and chmod the async_wrapper module
(async_module_style, shebang, async_module_data, async_module_path) = self._configure_module(module_name='async_wrapper', module_args=dict(),
task_vars=task_vars)
async_module_remote_filename = self._connection._shell.get_remote_filename(async_module_path)
remote_async_module_path = self._connection._shell.join_path(tmpdir, async_module_remote_filename)
self._transfer_data(remote_async_module_path, async_module_data)
remote_files.append(remote_async_module_path)
async_limit = self._task.async_val
async_jid = str(random.randint(0, 999999999999))
# call the interpreter for async_wrapper directly
# this permits use of a script for an interpreter on non-Linux platforms
# TODO: re-implement async_wrapper as a regular module to avoid this special case
interpreter = shebang.replace('#!', '').strip()
async_cmd = [interpreter, remote_async_module_path, async_jid, async_limit, remote_module_path]
if environment_string:
async_cmd.insert(0, environment_string)
if args_file_path:
async_cmd.append(args_file_path)
else:
# maintain a fixed number of positional parameters for async_wrapper
async_cmd.append('_')
if not self._should_remove_tmp_path(tmpdir):
async_cmd.append("-preserve_tmp")
cmd = " ".join(to_text(x) for x in async_cmd)
else:
if self._is_pipelining_enabled(module_style):
in_data = module_data
else:
cmd = remote_module_path
cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path).strip()
# Fix permissions of the tmpdir path and tmpdir files. This should be called after all
# files have been transferred.
if remote_files:
# remove none/empty
remote_files = [x for x in remote_files if x]
self._fixup_perms2(remote_files, self._play_context.remote_user)
# actually execute
res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
# parse the main result
data = self._parse_returned_data(res)
# NOTE: INTERNAL KEYS ONLY ACCESSIBLE HERE
# get internal info before cleaning
if data.pop("_ansible_suppress_tmpdir_delete", False):
self._cleanup_remote_tmp = False
# remove internal keys
remove_internal_keys(data)
if wrap_async:
# async_wrapper will clean up its tmpdir on its own so we want the controller side to
# forget about it now
self._connection._shell.tmpdir = None
# FIXME: for backwards compat, figure out if still makes sense
data['changed'] = True
# pre-split stdout/stderr into lines if needed
if 'stdout' in data and 'stdout_lines' not in data:
# if the value is 'False', a default won't catch it.
txt = data.get('stdout', None) or u''
data['stdout_lines'] = txt.splitlines()
if 'stderr' in data and 'stderr_lines' not in data:
# if the value is 'False', a default won't catch it.
txt = data.get('stderr', None) or u''
data['stderr_lines'] = txt.splitlines()
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
def _parse_returned_data(self, res):
try:
filtered_output, warnings = _filter_non_json_lines(res.get('stdout', u''))
for w in warnings:
display.warning(w)
data = json.loads(filtered_output)
if 'ansible_facts' in data and isinstance(data['ansible_facts'], dict):
data['ansible_facts'] = wrap_var(data['ansible_facts'])
data['_ansible_parsed'] = True
except ValueError:
# not valid json, lets try to capture error
data = dict(failed=True, _ansible_parsed=False)
data['module_stdout'] = res.get('stdout', u'')
if 'stderr' in res:
data['module_stderr'] = res['stderr']
if res['stderr'].startswith(u'Traceback'):
data['exception'] = res['stderr']
# try to figure out if we are missing interpreter
if self._used_interpreter is not None and '%s: No such file or directory' % self._used_interpreter.lstrip('!#') in data['module_stderr']:
data['msg'] = "The module failed to execute correctly, you probably need to set the interpreter."
else:
data['msg'] = "MODULE FAILURE"
data['msg'] += '\nSee stdout/stderr for the exact error'
if 'rc' in res:
data['rc'] = res['rc']
return data
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=None, encoding_errors='surrogate_then_replace', chdir=None):
'''
This is the function which executes the low level shell command, which
may be commands to create/remove directories for temporary files, or to
run the module code or python directly when pipelining.
:kwarg encoding_errors: If the value returned by the command isn't
utf-8 then we have to figure out how to transform it to unicode.
If the value is just going to be displayed to the user (or
discarded) then the default of 'replace' is fine. If the data is
used as a key or is going to be written back out to a file
verbatim, then this won't work. May have to use some sort of
replacement strategy (python3 could use surrogateescape)
:kwarg chdir: cd into this directory before executing the command.
'''
display.debug("_low_level_execute_command(): starting")
# if not cmd:
# # this can happen with powershell modules when there is no analog to a Windows command (like chmod)
# display.debug("_low_level_execute_command(): no command, exiting")
# return dict(stdout='', stderr='', rc=254)
if chdir:
display.debug("_low_level_execute_command(): changing cwd to %s for this command" % chdir)
cmd = self._connection._shell.append_command('cd %s' % chdir, cmd)
allow_same_user = C.BECOME_ALLOW_SAME_USER
same_user = self._play_context.become_user == self._play_context.remote_user
if sudoable and self._play_context.become and (allow_same_user or not same_user):
display.debug("_low_level_execute_command(): using become for this command")
if self._connection.transport != 'network_cli' and self._play_context.become_method != 'enable':
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
if self._connection.allow_executable:
if executable is None:
executable = self._play_context.executable
# mitigation for SSH race which can drop stdout (https://github.com/ansible/ansible/issues/13876)
# only applied for the default executable to avoid interfering with the raw action
cmd = self._connection._shell.append_command(cmd, 'sleep 0')
if executable:
cmd = executable + ' -c ' + shlex_quote(cmd)
display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
# Change directory to basedir of task for command execution when connection is local
if self._connection.transport == 'local':
cwd = os.getcwd()
os.chdir(self._loader.get_basedir())
try:
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
finally:
if self._connection.transport == 'local':
os.chdir(cwd)
# stdout and stderr may be either a file-like or a bytes object.
# Convert either one to a text type
if isinstance(stdout, binary_type):
out = to_text(stdout, errors=encoding_errors)
elif not isinstance(stdout, text_type):
out = to_text(b''.join(stdout.readlines()), errors=encoding_errors)
else:
out = stdout
if isinstance(stderr, binary_type):
err = to_text(stderr, errors=encoding_errors)
elif not isinstance(stderr, text_type):
err = to_text(b''.join(stderr.readlines()), errors=encoding_errors)
else:
err = stderr
if rc is None:
rc = 0
# be sure to remove the BECOME-SUCCESS message now
out = self._strip_success_message(out)
display.debug(u"_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, out, err))
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err, stderr_lines=err.splitlines())
def _get_diff_data(self, destination, source, task_vars, source_file=True):
diff = {}
display.debug("Going to peek to see if file has changed permissions")
peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, _diff_peek=True), task_vars=task_vars, persist_files=True)
if not peek_result.get('failed', False) or peek_result.get('rc', 0) == 0:
if peek_result.get('state') == 'absent':
diff['before'] = ''
elif peek_result.get('appears_binary'):
diff['dst_binary'] = 1
elif peek_result.get('size') and C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Slurping the file %s" % source)
dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, persist_files=True)
if 'content' in dest_result:
dest_contents = dest_result['content']
if dest_result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise AnsibleError("unknown encoding in content option, failed: %s" % dest_result)
diff['before_header'] = destination
diff['before'] = dest_contents
if source_file:
st = os.stat(source)
if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Reading local copy of the file %s" % source)
try:
with open(source, 'rb') as src:
src_contents = src.read()
except Exception as e:
raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e)))
if b"\x00" in src_contents:
diff['src_binary'] = 1
else:
diff['after_header'] = source
diff['after'] = src_contents
else:
display.debug("source of file passed in")
diff['after_header'] = 'dynamically generated'
diff['after'] = source
if self._play_context.no_log:
if 'before' in diff:
diff["before"] = ""
if 'after' in diff:
diff["after"] = " [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]\n"
return diff
def _find_needle(self, dirname, needle):
'''
find a needle in haystack of paths, optionally using 'dirname' as a subdir.
This will build the ordered list of paths to search and pass them to dwim
to get back the first existing file found.
'''
# dwim already deals with playbook basedirs
path_stack = self._task.get_search_path()
# if missing it will return a file not found exception
return self._loader.path_dwim_relative_stack(path_stack, dirname, needle)
|
caphrim007/ansible
|
lib/ansible/plugins/action/__init__.py
|
Python
|
gpl-3.0
| 50,188
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Casacore(CMakePackage):
"""A suite of c++ libraries for radio astronomy data processing."""
homepage = "https://github.com/casacore/casacore"
url = "https://github.com/casacore/casacore/archive/v2.4.1.tar.gz"
maintainers = ['mpokorny']
version('3.4.0', sha256='31f02ad2e26f29bab4a47a2a69e049d7bc511084a0b8263360e6157356f92ae1')
version('3.3.0', sha256='3a714644b908ef6e81489b792cc9b80f6d8267a275e15d38a42a6a5137d39d3d')
version('3.2.0', sha256='ae5d3786cb6dfdd7ebc5eecc0c724ff02bbf6929720bc23be43a027978e79a5f')
version('3.1.2', sha256='ac94f4246412eb45d503f1019cabe2bb04e3861e1f3254b832d9b1164ea5f281')
version('3.1.1', sha256='85d2b17d856592fb206b17e0a344a29330650a4269c80b87f8abb3eaf3dadad4')
version('3.1.0', sha256='a6adf2d77ad0d6f32995b1e297fd88d31ded9c3e0bb8f28966d7b35a969f7897')
version('3.0.0', sha256='6f0e68fd77b5c96299f7583a03a53a90980ec347bff9dfb4c0abb0e2933e6bcb')
version('2.4.1', sha256='58eccc875053b2c6fe44fe53b6463030ef169597ec29926936f18d27b5087d63')
depends_on('cmake@3.7.1:', type='build')
variant('openmp', default=False, description='Build OpenMP support')
variant('shared', default=True, description='Build shared libraries')
variant('readline', default=True, description='Build readline support')
# see note below about the reason for disabling the "sofa" variant
# variant('sofa', default=False, description='Build SOFA support')
variant('adios2', default=False, description='Build ADIOS2 support')
variant('fftpack', default=False, description='Build FFTPack')
variant('hdf5', default=False, description='Build HDF5 support')
variant('python', default=False, description='Build python support')
# Force dependency on readline in v3.2 and earlier. Although the
# presence of readline is tested in CMakeLists.txt, and casacore
# can be built without it, there's no way to control that
# dependency at build time; since many systems come with readline,
# it's better to explicitly depend on it here always.
depends_on('readline', when='@:3.2.0')
depends_on('readline', when='+readline')
depends_on('flex', type='build')
depends_on('bison', type='build')
depends_on('blas')
depends_on('lapack')
depends_on('cfitsio')
depends_on('wcslib@4.20:+cfitsio')
depends_on('fftw@3.0.0: precision=float,double', when='@3.4.0:')
depends_on('fftw@3.0.0: precision=float,double', when='~fftpack')
# SOFA dependency suffers the same problem in CMakeLists.txt as readline;
# force a dependency when building unit tests
depends_on('sofa-c', type='test')
depends_on('hdf5', when='+hdf5')
depends_on('adios2+mpi', when='+adios2')
depends_on('mpi', when='+adios2')
depends_on('python@2.6:', when='+python')
depends_on('boost+python', when='+python')
depends_on('py-numpy', when='+python')
def cmake_args(self):
args = []
spec = self.spec
args.append(self.define_from_variant('ENABLE_SHARED', 'shared'))
args.append(self.define_from_variant('USE_OPENMP', 'openmp'))
args.append(self.define_from_variant('USE_READLINE', 'readline'))
args.append(self.define_from_variant('USE_HDF5', 'hdf5'))
args.append(self.define_from_variant('USE_ADIOS2', 'adios2'))
args.append(self.define_from_variant('USE_MPI', 'adios2'))
if spec.satisfies('+adios2'):
args.append(self.define('ENABLE_TABLELOCKING', False))
# fftw3 is required by casacore starting with v3.4.0, but the
# old fftpack is still available. For v3.4.0 and later, we
# always require FFTW3 dependency with the optional addition
# of FFTPack. In older casacore versions, only one of FFTW3 or
# FFTPack can be selected.
if spec.satisfies('@3.4.0:'):
if spec.satisfies('+fftpack'):
args.append('-DBUILD_FFTPACK_DEPRECATED=YES')
args.append(self.define('USE_FFTW3', True))
else:
args.append(self.define('USE_FFTW3', spec.satisfies('~fftpack')))
# Python2 and Python3 binding
if spec.satisfies('~python'):
args.extend(['-DBUILD_PYTHON=NO', '-DBUILD_PYTHON3=NO'])
elif spec.satisfies('^python@3.0.0:'):
args.extend(['-DBUILD_PYTHON=NO', '-DBUILD_PYTHON3=YES'])
else:
args.extend(['-DBUILD_PYTHON=YES', '-DBUILD_PYTHON3=NO'])
args.append('-DBUILD_TESTING=OFF')
return args
def patch(self):
# Rely on CMake ability to find hdf5, available since CMake 3.7.X
os.remove('cmake/FindHDF5.cmake')
|
LLNL/spack
|
var/spack/repos/builtin/packages/casacore/package.py
|
Python
|
lgpl-2.1
| 4,875
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Sparse Tensor Representation. See the @{$python/sparse_ops} guide.
@@SparseTensor
@@SparseTensorValue
@@sparse_to_dense
@@sparse_tensor_to_dense
@@sparse_to_indicator
@@sparse_merge
@@sparse_concat
@@sparse_reorder
@@sparse_reshape
@@sparse_slice
@@sparse_split
@@sparse_retain
@@sparse_reset_shape
@@sparse_fill_empty_rows
@@sparse_transpose
@@sparse_reduce_max
@@sparse_reduce_max_sparse
@@sparse_reduce_sum
@@sparse_reduce_sum_sparse
@@sparse_add
@@sparse_softmax
@@sparse_tensor_dense_matmul
@@sparse_maximum
@@sparse_minimum
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_sparse_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _convert_to_sparse_tensor(sp_input):
"""Convert `sp_input` to `SparseTensor` and return it.
Args:
sp_input: `SparseTensor` or `SparseTensorValue`.
Returns:
`sp_input` converted to `SparseTensor`.
Raises:
ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.
"""
if isinstance(sp_input, sparse_tensor.SparseTensorValue):
return sparse_tensor.SparseTensor.from_value(sp_input)
if not isinstance(sp_input, sparse_tensor.SparseTensor):
raise TypeError("Input must be a SparseTensor.")
return sp_input
def _convert_to_sparse_tensors(sp_inputs):
"""Convert `sp_inputs` to `SparseTensor` objects and return them.
Args:
sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue`
objects.
Returns:
`sp_inputs` converted to `SparseTensor` objects.
Raises:
ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor
`SparseTensorValue`.
"""
if isinstance(sp_inputs, list):
return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]
if isinstance(sp_inputs, tuple):
return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)
raise TypeError("Inputs must be a list or tuple.")
# pylint: disable=protected-access
@tf_export("sparse_concat")
def sparse_concat(axis,
sp_inputs,
name=None,
expand_nonconcat_dim=False,
concat_dim=None):
"""Concatenates a list of `SparseTensor` along the specified dimension.
Concatenation is with respect to the dense versions of each sparse input.
It is assumed that each inputs is a `SparseTensor` whose elements are ordered
along increasing dimension number.
If expand_nonconcat_dim is False, all inputs' shapes must match, except for
the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are
allowed to vary among all inputs.
The `indices`, `values`, and `shapes` lists must have the same length.
If expand_nonconcat_dim is False, then the output shape is identical to the
inputs', except along the concat dimension, where it is the sum of the inputs'
sizes along that dimension.
If expand_nonconcat_dim is True, then the output shape along the non-concat
dimensions will be expand to be the largest among all inputs, and it is the
sum of the inputs sizes along the concat dimension.
The output elements will be resorted to preserve the sort order along
increasing dimension number.
This op runs in `O(M log M)` time, where `M` is the total number of non-empty
values across all inputs. This is due to the need for an internal sort in
order to concatenate efficiently across an arbitrary dimension.
For example, if `axis = 1` and the inputs are
sp_inputs[0]: shape = [2, 3]
[0, 2]: "a"
[1, 0]: "b"
[1, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
then the output will be
shape = [2, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[1, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b c ] [ ] [b c ]
Another example, if 'axis = 1' and the inputs are
sp_inputs[0]: shape = [3, 3]
[0, 2]: "a"
[1, 0]: "b"
[2, 1]: "c"
sp_inputs[1]: shape = [2, 4]
[0, 1]: "d"
[0, 2]: "e"
if expand_nonconcat_dim = False, this will result in an error. But if
expand_nonconcat_dim = True, this will result in:
shape = [3, 7]
[0, 2]: "a"
[0, 4]: "d"
[0, 5]: "e"
[1, 0]: "b"
[2, 1]: "c"
Graphically this is equivalent to doing
[ a] concat [ d e ] = [ a d e ]
[b ] [ ] [b ]
[ c ] [ c ]
Args:
axis: Dimension to concatenate along. Must be in range [-rank, rank),
where rank is the number of dimensions in each input `SparseTensor`.
sp_inputs: List of `SparseTensor` to concatenate.
name: A name prefix for the returned tensors (optional).
expand_nonconcat_dim: Whether to allow the expansion in the non-concat
dimensions. Defaulted to False.
concat_dim: The old (deprecated) name for axis.
Returns:
A `SparseTensor` with the concatenated output.
Raises:
TypeError: If `sp_inputs` is not a list of `SparseTensor`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "concat_dim",
concat_dim)
sp_inputs = _convert_to_sparse_tensors(sp_inputs)
if len(sp_inputs) == 1: # Degenerate case of one tensor.
return sp_inputs[0]
inds = [sp_input.indices for sp_input in sp_inputs]
vals = [sp_input.values for sp_input in sp_inputs]
shapes = [sp_input.dense_shape for sp_input in sp_inputs]
if expand_nonconcat_dim:
max_shape = math_ops.reduce_max(
array_ops.concat(
[array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0)
shapes = [
array_ops.concat([
max_shape[:axis], shape[-1:]
if axis == -1 else shape[axis:axis + 1], []
if axis == -1 else max_shape[axis + 1:]
], 0) for shape in shapes
]
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_concat(inds, vals, shapes, axis, name=name))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse_add")
def sparse_add(a, b, thresh=0):
"""Adds two tensors, at least one of each is a `SparseTensor`.
If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If
both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order
of arguments does not matter. Use vanilla `tf.add()` for adding two dense
`Tensor`s.
The shapes of the two operands must match: broadcasting is not supported.
The indices of any input `SparseTensor` are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
`SparseReorder` to restore index ordering.
If both arguments are sparse, we perform "clipping" as follows. By default,
if two values sum to zero at some index, the output `SparseTensor` would still
include that particular location in its index, storing a zero in the
corresponding value slot. To override this, callers can specify `thresh`,
indicating that if the sum has a magnitude strictly smaller than `thresh`, its
corresponding value and index would then not be included. In particular,
`thresh == 0.0` (default) means everything is kept and actual thresholding
happens only for a positive value.
For example, suppose the logical sum of two sparse operands is (densified):
[ 2]
[.1 0]
[ 6 -.2]
Then,
* `thresh == 0` (the default): all 5 index/value pairs will be returned.
* `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three
index/value pairs will be returned.
* `thresh == 0.21`: .1, 0, and -.2 will vanish.
Args:
a: The first operand; `SparseTensor` or `Tensor`.
b: The second operand; `SparseTensor` or `Tensor`. At least one operand
must be sparse.
thresh: A 0-D `Tensor`. The magnitude threshold that determines if an
output value/index pair takes space. Its dtype should match that of the
values if they are real; if the latter are complex64/complex128, then the
dtype should be float32/float64, correspondingly.
Returns:
A `SparseTensor` or a `Tensor`, representing the sum.
Raises:
TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead.
"""
sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)
if not any(isinstance(inp, sparse_classes) for inp in [a, b]):
raise TypeError("At least one input should be SparseTensor; do you mean to"
" use tf.add()?")
if all(isinstance(inp, sparse_classes) for inp in [a, b]):
a = _convert_to_sparse_tensor(a)
b = _convert_to_sparse_tensor(b)
thresh = ops.convert_to_tensor(
thresh, dtype=a.values.dtype.real_dtype.base_dtype, name="thresh")
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_add(a.indices, a.values, a.dense_shape,
b.indices, b.values, b.dense_shape, thresh))
# Attempt to get output_shape statically.
a.get_shape().assert_is_compatible_with(b.get_shape())
static_shape = array_ops.broadcast_static_shape(a.get_shape(),
b.get_shape())
if static_shape.is_fully_defined():
output_shape = static_shape.as_list()
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
else:
# swap to make `a` the SparseTensor.
if isinstance(b, sparse_classes):
a, b = b, a
return gen_sparse_ops.sparse_tensor_dense_add(a.indices, a.values,
a.dense_shape, b)
def _sparse_cross(inputs, name=None):
"""Generates sparse cross from a list of sparse and dense tensors.
For example, if the inputs are
* inputs[0]: SparseTensor with shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
* inputs[1]: SparseTensor with shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
* inputs[2]: Tensor [["f"], ["g"]]
then the output will be:
shape = [2, 2]
[0, 0]: "a_X_d_X_f"
[1, 0]: "b_X_e_X_g"
[1, 1]: "c_X_e_X_g"
Args:
inputs: An iterable of `Tensor` or `SparseTensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` of type `string`.
"""
return _sparse_cross_internal(inputs=inputs, hashed_output=False, name=name)
def _sparse_cross_hashed(inputs, num_buckets=0, hash_key=None, name=None):
"""Generates hashed sparse cross from a list of sparse and dense tensors.
For example, if the inputs are
* inputs[0]: SparseTensor with shape = [2, 2]
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
* inputs[1]: SparseTensor with shape = [2, 1]
[0, 0]: "d"
[1, 0]: "e"
* inputs[2]: Tensor [["f"], ["g"]]
then the output will be:
shape = [2, 2]
[0, 0]: FingerprintCat64(
Fingerprint64("f"), FingerprintCat64(
Fingerprint64("d"), Fingerprint64("a")))
[1, 0]: FingerprintCat64(
Fingerprint64("g"), FingerprintCat64(
Fingerprint64("e"), Fingerprint64("b")))
[1, 1]: FingerprintCat64(
Fingerprint64("g"), FingerprintCat64(
Fingerprint64("e"), Fingerprint64("c")))
Args:
inputs: An iterable of `Tensor` or `SparseTensor`.
num_buckets: An `int` that is `>= 0`.
output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
hash_key: Integer hash_key that will be used by the `FingerprintCat64`
function. If not given, will use a default key.
name: Optional name for the op.
Returns:
A `SparseTensor` of type `int64`.
"""
return _sparse_cross_internal(
inputs=inputs,
hashed_output=True,
num_buckets=num_buckets,
hash_key=hash_key,
name=name)
_DEFAULT_HASH_KEY = 0xDECAFCAFFE
def _sparse_cross_internal(inputs,
hashed_output=False,
num_buckets=0,
hash_key=None,
name=None):
"""See gen_sparse_ops.sparse_cross."""
if not isinstance(inputs, list):
raise TypeError("Inputs must be a list")
if not all(
isinstance(i, sparse_tensor.SparseTensor) or isinstance(i, ops.Tensor)
for i in inputs):
raise TypeError("All inputs must be SparseTensors")
sparse_inputs = [
i for i in inputs if isinstance(i, sparse_tensor.SparseTensor)
]
dense_inputs = [
i for i in inputs if not isinstance(i, sparse_tensor.SparseTensor)
]
indices = [sp_input.indices for sp_input in sparse_inputs]
values = [sp_input.values for sp_input in sparse_inputs]
shapes = [sp_input.dense_shape for sp_input in sparse_inputs]
out_type = dtypes.int64 if hashed_output else dtypes.string
internal_type = dtypes.string
for i in range(len(values)):
if values[i].dtype != dtypes.string:
values[i] = math_ops.to_int64(values[i])
internal_type = dtypes.int64
for i in range(len(dense_inputs)):
if dense_inputs[i].dtype != dtypes.string:
dense_inputs[i] = math_ops.to_int64(dense_inputs[i])
internal_type = dtypes.int64
indices_out, values_out, shape_out = gen_sparse_ops.sparse_cross(
indices=indices,
values=values,
shapes=shapes,
dense_inputs=dense_inputs,
hashed_output=hashed_output,
num_buckets=num_buckets,
hash_key=hash_key or _DEFAULT_HASH_KEY,
out_type=out_type,
internal_type=internal_type,
name=name)
return sparse_tensor.SparseTensor(indices_out, values_out, shape_out)
def sparse_dense_cwise_add(sp_t, dense_t):
"""Adds up a SparseTensor and a dense Tensor, using these special rules:
(1) Broadcasts the dense side to have the same shape as the sparse side, if
eligible;
(2) Then, only the dense values pointed to by the indices of the SparseTensor
participate in the cwise addition.
By the rules, the result is a logical SparseTensor with exactly the same
indices and shape, but possibly with different non-zero values. The output of
this Op is the resultant non-zero values.
Args:
sp_t: the SparseTensor operand.
dense_t: the dense Tensor operand; must have the same dtype and a
broadcast-compatible shape as `sp_t`.
Returns:
output: the SparseTensor output.
"""
result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values,
sp_t.dense_shape, dense_t)
return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)
@tf_export("sparse_reorder")
def sparse_reorder(sp_input, name=None):
"""Reorders a `SparseTensor` into the canonical, row-major ordering.
Note that by convention, all sparse ops preserve the canonical ordering
along increasing dimension number. The only time ordering can be violated
is during manual manipulation of the indices and values to add entries.
Reordering does not affect the shape of the `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[4, 5]` and
`indices` / `values`:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same shape and non-empty values, but in
canonical ordering.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
reordered_ind, reordered_val = (
gen_sparse_ops.sparse_reorder(
sp_input.indices, sp_input.values, sp_input.dense_shape, name=name))
if sp_input.get_shape().is_fully_defined():
dense_shape = sp_input.get_shape().as_list()
else:
dense_shape = array_ops.identity(sp_input.dense_shape)
return sparse_tensor.SparseTensor(reordered_ind, reordered_val, dense_shape)
@tf_export("sparse_reshape")
def sparse_reshape(sp_input, shape, name=None):
"""Reshapes a `SparseTensor` to represent values in a new dense shape.
This operation has the same semantics as `reshape` on the represented dense
tensor. The indices of non-empty values in `sp_input` are recomputed based
on the new dense shape, and a new `SparseTensor` is returned containing the
new indices and new shape. The order of non-empty values in `sp_input` is
unchanged.
If one component of `shape` is the special value -1, the size of that
dimension is computed so that the total dense size remains constant. At
most one component of `shape` can be -1. The number of dense elements
implied by `shape` must be the same as the number of dense elements
originally represented by `sp_input`.
For example, if `sp_input` has shape `[2, 3, 6]` and `indices` / `values`:
[0, 0, 0]: a
[0, 0, 1]: b
[0, 1, 0]: c
[1, 0, 0]: d
[1, 2, 3]: e
and `shape` is `[9, -1]`, then the output will be a `SparseTensor` of
shape `[9, 4]` and `indices` / `values`:
[0, 0]: a
[0, 1]: b
[1, 2]: c
[4, 2]: d
[8, 1]: e
Args:
sp_input: The input `SparseTensor`.
shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the
represented `SparseTensor`.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` with the same non-empty values but with indices calculated
by the new dense shape.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If argument `shape` requests a `SparseTensor` with a different
number of elements than `sp_input`.
ValueError: If `shape` has more than one inferred (== -1) dimension.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
shape = math_ops.cast(shape, dtype=dtypes.int64)
with ops.name_scope(name, "SparseReshape", [sp_input]) as name:
reshaped_ind, reshaped_shape = gen_sparse_ops.sparse_reshape(
sp_input.indices, sp_input.dense_shape, shape, name=name)
reshaped_shape_const = tensor_util.constant_value(shape)
if (reshaped_shape_const is not None and
sp_input.get_shape().is_fully_defined()):
num_implied = sum((dim == -1) for dim in reshaped_shape_const)
if num_implied > 1:
raise ValueError("At most one dimension can be inferred (-1). Found: %s"
% reshaped_shape_const)
original_reshaped_shape = list(reshaped_shape_const) # Copy.
in_shape_size = np.prod(sp_input.get_shape().as_list())
if num_implied:
implied_idx = original_reshaped_shape.index(-1)
non_implied_idx = (
original_reshaped_shape[:implied_idx] +
original_reshaped_shape[implied_idx + 1:])
reshaped_shape_const[implied_idx] = (
in_shape_size // np.prod(non_implied_idx))
reshaped_size = np.prod(reshaped_shape_const)
if reshaped_size != in_shape_size:
raise ValueError("Cannot reshape a tensor with %d elements to shape %s "
"(%d elements)." %
(in_shape_size, original_reshaped_shape,
reshaped_size))
reshaped_shape = reshaped_shape_const
return sparse_tensor.SparseTensor(reshaped_ind,
array_ops.identity(sp_input.values),
reshaped_shape)
# TODO(aselle): Remove keyword required once for 1.0 final
class KeywordRequired(object):
def __repr__(self):
# This is needed to make documentation without fully qualified module paths
return "KeywordRequired()"
@tf_export("sparse_split")
def sparse_split(keyword_required=KeywordRequired(),
sp_input=None,
num_split=None,
axis=None,
name=None,
split_dim=None):
"""Split a `SparseTensor` into `num_split` tensors along `axis`.
If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split`
each slice starting from 0:`shape[axis] % num_split` gets extra one
dimension. For example, if `axis = 1` and `num_split = 2` and the
input is:
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
output_tensor[0] =
[ a ]
[b c ]
output_tensor[1] =
[ d e ]
[ ]
Args:
keyword_required: Python 2 standin for * (temporary for argument reorder)
sp_input: The `SparseTensor` to split.
num_split: A Python integer. The number of ways to split.
axis: A 0-D `int32` `Tensor`. The dimension along which to split.
name: A name for the operation (optional).
split_dim: Deprecated old name for axis.
Returns:
`num_split` `SparseTensor` objects resulting from splitting `value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If the deprecated `split_dim` and `axis` are both non None.
"""
if not isinstance(keyword_required, KeywordRequired):
raise ValueError("Keyword arguments are required for this function.")
if sp_input is None:
raise ValueError("sp_input is required")
if num_split is None:
raise ValueError("num_split is required")
if axis is None:
raise ValueError("axis is required")
axis = deprecation.deprecated_argument_lookup("axis", axis, "split_dim",
split_dim)
sp_input = _convert_to_sparse_tensor(sp_input)
output_inds, output_vals, output_shapes = (
gen_sparse_ops.sparse_split(
axis,
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
num_split,
name=name))
sparse_tensors = []
for i in range(0, num_split):
sparse_tensors.append(
sparse_tensor.SparseTensor(output_inds[i], output_vals[i],
output_shapes[i]))
return sparse_tensors
@tf_export("sparse_slice")
def sparse_slice(sp_input, start, size, name=None):
"""Slice a `SparseTensor` based on the `start` and `size.
For example, if the input is
input_tensor = shape = [2, 7]
[ a d e ]
[b c ]
Graphically the output tensors are:
sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
[ a ]
[b c ]
sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
[ d e ]
[ ]
Args:
sp_input: The `SparseTensor` to split.
start: 1-D. tensor represents the start of the slice.
size: 1-D. tensor represents the size of the slice.
name: A name for the operation (optional).
Returns:
A `SparseTensor` objects resulting from splicing.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
start = ops.convert_to_tensor(start, dtypes.int64)
size = ops.convert_to_tensor(size, dtypes.int64)
with ops.name_scope(name, "SparseSlice", [sp_input]) as name:
output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
start,
size,
name=name)
return sparse_tensor.SparseTensor(output_indices, output_values,
output_shape)
@tf_export("sparse_to_dense")
def sparse_to_dense(sparse_indices,
output_shape,
sparse_values,
default_value=0,
validate_indices=True,
name=None):
"""Converts a sparse representation into a dense tensor.
Builds an array `dense` with shape `output_shape` such that
```python
# If sparse_indices is scalar
dense[i] = (i == sparse_indices ? sparse_values : default_value)
# If sparse_indices is a vector, then for each i
dense[sparse_indices[i]] = sparse_values[i]
# If sparse_indices is an n by d matrix, then for each i in [0, n)
dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
```
All other values in `dense` are set to `default_value`. If `sparse_values`
is a scalar, all sparse indices are set to this single value.
Indices should be sorted in lexicographic order, and indices must not
contain any repeats. If `validate_indices` is True, these properties
are checked during execution.
Args:
sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`.
`sparse_indices[i]` contains the complete index where `sparse_values[i]`
will be placed.
output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape
of the dense output tensor.
sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of
`sparse_indices`, or a scalar value to be used for all sparse indices.
default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value
to set for indices not specified in `sparse_indices`. Defaults to zero.
validate_indices: A boolean value. If True, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name for the operation (optional).
Returns:
Dense `Tensor` of shape `output_shape`. Has the same type as
`sparse_values`.
"""
return gen_sparse_ops.sparse_to_dense(
sparse_indices,
output_shape,
sparse_values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
@tf_export("sparse_reduce_max")
def sparse_reduce_max(sp_input, axis=None, keep_dims=False,
reduction_axes=None):
"""Computes the max of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
similar to the indexing rules in Python.
For example:
```python
# 'x' represents [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
tf.sparse_reduce_max(x) ==> 3
tf.sparse_reduce_max(x, 0) ==> [1, 3, 2]
tf.sparse_reduce_max(x, 1) ==> [2, 3] # Can also use -1 as the axis.
tf.sparse_reduce_max(x, 1, keep_dims=True) ==> [[2], [3]]
tf.sparse_reduce_max(x, [0, 1]) ==> 3
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
Returns:
The reduced Tensor.
"""
return gen_sparse_ops.sparse_reduce_max(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims)
@tf_export("sparse_reduce_max_sparse")
def sparse_reduce_max_sparse(sp_input,
axis=None,
keep_dims=False,
reduction_axes=None):
"""Computes the max of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_max()`. In contrast to SparseReduceSum, this Op returns a
SparseTensor.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis
Returns:
The reduced SparseTensor.
"""
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_reduce_max_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse_reduce_sum")
def sparse_reduce_sum(sp_input, axis=None, keep_dims=False,
reduction_axes=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
similar to the indexing rules in Python.
For example:
```python
# 'x' represents [[1, ?, 1]
# [?, 1, ?]]
# where ? is implicitly-zero.
tf.sparse_reduce_sum(x) ==> 3
tf.sparse_reduce_sum(x, 0) ==> [1, 1, 1]
tf.sparse_reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.
tf.sparse_reduce_sum(x, 1, keep_dims=True) ==> [[2], [1]]
tf.sparse_reduce_sum(x, [0, 1]) ==> 3
```
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis.
Returns:
The reduced Tensor.
"""
return gen_sparse_ops.sparse_reduce_sum(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims)
@tf_export("sparse_reduce_sum_sparse")
def sparse_reduce_sum_sparse(sp_input,
axis=None,
keep_dims=False,
reduction_axes=None):
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
`tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
SparseTensor.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
with length 1.
If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
with a single element is returned. Additionally, the axes can be negative,
which are interpreted according to the indexing rules in Python.
Args:
sp_input: The SparseTensor to reduce. Should have numeric type.
axis: The dimensions to reduce; list or scalar. If `None` (the
default), reduces all dimensions.
keep_dims: If true, retain reduced dimensions with length 1.
reduction_axes: Deprecated name of axis
Returns:
The reduced SparseTensor.
"""
output_ind, output_val, output_shape = (
gen_sparse_ops.sparse_reduce_sum_sparse(
sp_input.indices, sp_input.values, sp_input.dense_shape,
math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims))
return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)
@tf_export("sparse_tensor_to_dense")
def sparse_tensor_to_dense(sp_input,
default_value=0,
validate_indices=True,
name=None):
"""Converts a `SparseTensor` into a dense tensor.
This op is a convenience wrapper around `sparse_to_dense` for `SparseTensor`s.
For example, if `sp_input` has shape `[3, 5]` and non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
and `default_value` is `x`, then the output will be a dense `[3, 5]`
string tensor with values:
[[x a x b x]
[x x x x x]
[c x x x x]]
Indices must be without repeats. This is only
tested if validate_indices is True.
Args:
sp_input: The input `SparseTensor`.
default_value: Scalar value to set for indices not specified in
`sp_input`. Defaults to zero.
validate_indices: A boolean value. If `True`, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name prefix for the returned tensors (optional).
Returns:
A dense tensor with shape `sp_input.dense_shape` and values specified by
the non-empty values in `sp_input`. Indices not in `sp_input` are assigned
`default_value`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return sparse_to_dense(
sp_input.indices,
sp_input.dense_shape,
sp_input.values,
default_value=default_value,
validate_indices=validate_indices,
name=name)
@tf_export("sparse_to_indicator")
def sparse_to_indicator(sp_input, vocab_size, name=None):
"""Converts a `SparseTensor` of ids into a dense bool indicator tensor.
The last dimension of `sp_input.indices` is discarded and replaced with
the values of `sp_input`. If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`,
then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where
output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True
and False elsewhere in `output`.
For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values:
[0, 0, 0]: 0
[0, 1, 0]: 10
[1, 0, 3]: 103
[1, 1, 2]: 150
[1, 1, 3]: 149
[1, 1, 4]: 150
[1, 2, 1]: 121
and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool
tensor with False everywhere except at positions
(0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150),
(1, 2, 121).
Note that repeats are allowed in the input SparseTensor.
This op is useful for converting `SparseTensor`s into dense formats for
compatibility with ops that expect dense tensors.
The input `SparseTensor` must be in row-major order.
Args:
sp_input: A `SparseTensor` with `values` property of type `int32` or
`int64`.
vocab_size: A scalar int64 Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_input.values < vocab_size)`.
name: A name prefix for the returned tensors (optional)
Returns:
A dense bool indicator tensor representing the indices with specified value.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseToIndicator", [sp_input]) as name:
num_entries = array_ops.shape(sp_input.indices)[0]
new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True)
sp_values = sparse_tensor.SparseTensor(sp_input.indices, new_values,
sp_input.dense_shape)
sp_new = sparse_merge(sp_input, sp_values, vocab_size, name)
# validate_indices may be False because we allow duplicates in new_indices:
# repeated indices are allowed when creating an indicator matrix.
return sparse_tensor_to_dense(
sp_new, default_value=False, validate_indices=False, name=name)
@tf_export("sparse_merge")
def sparse_merge(sp_ids, sp_values, vocab_size, name=None,
already_sorted=False):
"""Combines a batch of feature ids and values into a single `SparseTensor`.
The most common use case for this function occurs when feature ids and
their corresponding values are stored in `Example` protos on disk.
`parse_example` will return a batch of ids and a batch of values, and this
function joins them into a single logical `SparseTensor` for use in
functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc.
The `SparseTensor` returned by this function has the following properties:
- `indices` is equivalent to `sp_ids.indices` with the last
dimension discarded and replaced with `sp_ids.values`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn, vocab_size]`.
For example, consider the following feature vectors:
```python
vector1 = [-3, 0, 0, 0, 0, 0]
vector2 = [ 0, 1, 0, 4, 1, 0]
vector3 = [ 5, 0, 0, 9, 0, 0]
```
These might be stored sparsely in the following Example protos by storing
only the feature ids (column number if the vectors are treated as a matrix)
of the non-zero elements and the corresponding values:
```python
examples = [Example(features={
"ids": Feature(int64_list=Int64List(value=[0])),
"values": Feature(float_list=FloatList(value=[-3]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[1, 4, 3])),
"values": Feature(float_list=FloatList(value=[1, 1, 4]))}),
Example(features={
"ids": Feature(int64_list=Int64List(value=[0, 3])),
"values": Feature(float_list=FloatList(value=[5, 9]))})]
```
The result of calling parse_example on these examples will produce a
dictionary with entries for "ids" and "values". Passing those two objects
to this function along with vocab_size=6, will produce a `SparseTensor` that
sparsely represents all three instances. Namely, the `indices` property will
contain the coordinates of the non-zero entries in the feature matrix (the
first dimension is the row number in the matrix, i.e., the index within the
batch, and the second dimension is the column number, i.e., the feature id);
`values` will contain the actual values. `shape` will be the shape of the
original matrix, i.e., (3, 6). For our example above, the output will be
equal to:
```python
SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]],
values=[-3, 1, 4, 1, 5, 9],
dense_shape=[3, 6])
```
This method generalizes to higher-dimensions by simply providing a list for
both the sp_ids as well as the vocab_size.
In this case the resulting `SparseTensor` has the following properties:
- `indices` is equivalent to `sp_ids[0].indices` with the last
dimension discarded and concatenated with
`sp_ids[0].values, sp_ids[1].values, ...`.
- `values` is simply `sp_values.values`.
- If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then
`output.shape = [D0, D1, ..., Dn] + vocab_size`.
Args:
sp_ids: A single `SparseTensor` with `values` property of type `int32`
or `int64` or a Python list of such `SparseTensor`s or a list thereof.
sp_values: A `SparseTensor` of any type.
vocab_size: A scalar `int64` Tensor (or Python int) containing the new size
of the last dimension, `all(0 <= sp_ids.values < vocab_size)`.
Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for
all `i`.
name: A name prefix for the returned tensors (optional)
already_sorted: A boolean to specify whether the per-batch values in
`sp_values` are already sorted. If so skip sorting, False by default
(optional).
Returns:
A `SparseTensor` compactly representing a batch of feature ids and values,
useful for passing to functions that expect such a `SparseTensor`.
Raises:
TypeError: If `sp_values` is not a `SparseTensor`. Or if `sp_ids` is neither
a `SparseTensor` nor a list thereof. Or if `vocab_size` is not a
`Tensor` or a Python int and `sp_ids` is a `SparseTensor`. Or if
`vocab_size` is not a or list thereof and `sp_ids` is a list.
ValueError: If `sp_ids` and `vocab_size` are lists of different lengths.
"""
if isinstance(sp_ids, sparse_tensor.SparseTensorValue) or isinstance(
sp_ids, sparse_tensor.SparseTensor):
sp_ids = [sp_ids]
if not (isinstance(vocab_size, ops.Tensor) or
isinstance(vocab_size, numbers.Integral)):
raise TypeError("vocab_size has to be a Tensor or Python int. Found %s" %
type(vocab_size))
vocab_size = [vocab_size]
else:
if not isinstance(sp_ids, collections.Iterable):
raise TypeError("sp_ids has to be a SparseTensor or list thereof. "
"Found %s" % type(sp_ids))
if not isinstance(vocab_size, collections.Iterable):
raise TypeError("vocab_size has to be a list of Tensors or Python ints. "
"Found %s" % type(vocab_size))
for dim in vocab_size:
if not (isinstance(dim, ops.Tensor) or isinstance(dim, numbers.Integral)):
raise TypeError(
"vocab_size has to be a list of Tensors or Python ints. Found %s" %
type(dim))
if len(sp_ids) != len(vocab_size):
raise ValueError("sp_ids and vocab_size have to have equal lengths.")
with ops.name_scope(name, "SparseMerge", [sp_ids, sp_values]):
sp_ids = [_convert_to_sparse_tensor(sp_ids_dim) for sp_ids_dim in sp_ids]
sp_values = _convert_to_sparse_tensor(sp_values)
ids = []
for sp_ids_dim in sp_ids:
ids_dim = sp_ids_dim.values
if sp_ids_dim.dtype != dtypes.int64:
ids_dim = math_ops.cast(ids_dim, dtypes.int64)
ids += [array_ops.expand_dims(ids_dim, axis=1)]
vocab_size = [math_ops.cast(x, dtypes.int64) for x in vocab_size]
# Slice off the last dimension of indices, then tack on the ids
indices_columns_to_preserve = sp_ids[0].indices[:, :-1]
new_indices = array_ops.concat([indices_columns_to_preserve] + ids, 1)
new_values = sp_values.values
new_shape = array_ops.concat([sp_ids[0].dense_shape[:-1], vocab_size], 0)
result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape)
return result if already_sorted else sparse_reorder(result)
@tf_export("sparse_retain")
def sparse_retain(sp_input, to_retain):
"""Retains specified non-empty values within a `SparseTensor`.
For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
and `to_retain = [True, False, False, True]`, then the output will
be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values:
[0, 1]: a
[3, 1]: d
Args:
sp_input: The input `SparseTensor` with `N` non-empty elements.
to_retain: A bool vector of length `N` with `M` true values.
Returns:
A `SparseTensor` with the same shape as the input and `M` non-empty
elements corresponding to the true positions in `to_retain`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
to_retain = ops.convert_to_tensor(to_retain)
# Shape checking, if shape is known at graph construction time
retain_shape = to_retain.get_shape()
retain_shape.assert_has_rank(1)
sp_input.values.get_shape()[0].merge_with(retain_shape[0])
where_true = array_ops.reshape(array_ops.where(to_retain), [-1])
new_indices = array_ops.gather(sp_input.indices, where_true)
new_values = array_ops.gather(sp_input.values, where_true)
return sparse_tensor.SparseTensor(new_indices, new_values,
array_ops.identity(sp_input.dense_shape))
@tf_export("sparse_reset_shape")
def sparse_reset_shape(sp_input, new_shape=None):
"""Resets the shape of a `SparseTensor` with indices and values unchanged.
If `new_shape` is None, returns a copy of `sp_input` with its shape reset
to the tight bounding box of `sp_input`. This will be a shape consisting of
all zeros if sp_input has no values.
If `new_shape` is provided, then it must be larger or equal in all dimensions
compared to the shape of `sp_input`. When this condition is met, the returned
SparseTensor will have its shape reset to `new_shape` and its indices and
values unchanged from that of `sp_input.`
For example:
Consider a `sp_input` with shape [2, 3, 5]:
[0, 0, 1]: a
[0, 1, 0]: b
[0, 2, 2]: c
[1, 0, 3]: d
- It is an error to set `new_shape` as [3, 7] since this represents a
rank-2 tensor while `sp_input` is rank-3. This is either a ValueError
during graph construction (if both shapes are known) or an OpError during
run time.
- Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or
equal in every dimension compared to the original shape [2, 3, 5].
- On the other hand, setting new_shape as [2, 3, 4] is also an error: The
third dimension is smaller than the original shape [2, 3, 5] (and an
`InvalidArgumentError` will be raised).
- If `new_shape` is None, the returned SparseTensor will have a shape
[2, 3, 4], which is the tight bounding box of `sp_input`.
Args:
sp_input: The input `SparseTensor`.
new_shape: None or a vector representing the new shape for the returned
`SparseTensor`.
Returns:
A `SparseTensor` indices and values unchanged from `input_sp`. Its shape is
`new_shape` if that is set. Otherwise it is the tight bounding box of
`input_sp`
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
ValueError: If `new_shape` represents a tensor with a different rank from
that of `sp_input` (if shapes are known when graph is constructed).
ValueError: If `new_shape` is determined during graph build to have
dimension sizes that are too small.
OpError:
- If `new_shape` has dimension sizes that are too small.
- If shapes are not known during graph construction time, and during run
time it is found out that the ranks do not match.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
in_indices = array_ops.identity(sp_input.indices)
in_values = array_ops.identity(sp_input.values)
in_shape = array_ops.identity(sp_input.dense_shape)
if new_shape is None:
dim_low_bound = math_ops.reduce_max(in_indices, axis=0)
output_shape_tensor = math_ops.maximum(
array_ops.constant(0, dtype=dtypes.int64),
math_ops.add(dim_low_bound, array_ops.ones_like(in_shape)))
else:
output_shape_tensor = ops.convert_to_tensor(new_shape)
output_shape_tensor.get_shape().assert_has_rank(1)
output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64)
# For cases when shape is known during graph construction, this catches the
# error before the sparse_tensor.SparseTensor catches it.
output_shape_tensor.get_shape()[0].merge_with(in_shape.get_shape()[0])
output_shape_tensor_const = tensor_util.constant_value(output_shape_tensor)
# For cases where all shapes are known during graph construction
if (output_shape_tensor_const is not None and
sp_input.get_shape().is_fully_defined()):
in_shape_const = np.array(sp_input.get_shape().as_list())
if not np.all(in_shape_const <= output_shape_tensor_const):
raise ValueError(
"Requested new_shape should have dimension sizes >= sp_input.shape."
" Found new_shape (%s), sp_input.shape (%s)." %
(in_shape_const, output_shape_tensor_const))
output_shape_tensor = output_shape_tensor_const
else:
# For cases where shape is not known during graph construction.
output_shape_tensor = control_flow_ops.with_dependencies([
check_ops.assert_equal(
array_ops.shape(in_shape), array_ops.shape(output_shape_tensor))
], output_shape_tensor)
output_shape_tensor = control_flow_ops.with_dependencies(
[check_ops.assert_less_equal(in_shape, output_shape_tensor)],
output_shape_tensor)
return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor)
@tf_export("sparse_fill_empty_rows")
def sparse_fill_empty_rows(sp_input, default_value, name=None):
"""Fills empty rows in the input 2-D `SparseTensor` with a default value.
This op adds entries with the specified `default_value` at index
`[row, 0]` for any row in the input that does not already have a value.
For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
[0, 1]: a
[0, 3]: b
[2, 0]: c
[3, 1]: d
Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
[0, 1]: a
[0, 3]: b
[1, 0]: default_value
[2, 0]: c
[3, 1]: d
[4, 0]: default_value
Note that the input may have empty columns at the end, with no effect on
this op.
The output `SparseTensor` will be in row-major order and will have the
same shape as the input.
This op also returns an indicator vector such that
empty_row_indicator[i] = True iff row i was an empty row.
Args:
sp_input: A `SparseTensor` with shape `[N, M]`.
default_value: The value to fill for empty rows, with the same type as
`sp_input.`
name: A name prefix for the returned tensors (optional)
Returns:
sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty
rows filled in with `default_value`.
empty_row_indicator: A bool vector of length `N` indicating whether each
input row was empty.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
with ops.name_scope(name, "SparseFillEmptyRows", [sp_input]):
default_value = ops.convert_to_tensor(
default_value, dtype=sp_input.values.dtype)
(output_indices, output_values, empty_row_indicator,
unused_reverse_index_map) = gen_sparse_ops.sparse_fill_empty_rows(
indices=sp_input.indices,
values=sp_input.values,
dense_shape=sp_input.dense_shape,
default_value=default_value)
return (sparse_tensor.SparseTensor(
indices=output_indices,
values=output_values,
dense_shape=sp_input.dense_shape), empty_row_indicator)
@tf_export("serialize_sparse")
def serialize_sparse(sp_input, name=None, out_type=dtypes.string):
"""Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object.
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional).
out_type: The `dtype` to use for serialization.
Returns:
A 3-vector (1-D `Tensor`), with each column representing the serialized
`SparseTensor`'s indices, values, and shape (respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.serialize_sparse(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
name=name,
out_type=out_type)
@tf_export("serialize_many_sparse")
def serialize_many_sparse(sp_input, name=None, out_type=dtypes.string):
"""Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
name: A name prefix for the returned tensors (optional).
out_type: The `dtype` to use for serialization.
Returns:
A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column
represents serialized `SparseTensor`'s indices, values, and shape
(respectively).
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.serialize_many_sparse(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
name=name,
out_type=out_type)
def deserialize_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize `SparseTensor` objects.
The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
the last dimension stores serialized `SparseTensor` objects and the other N
dimensions (N >= 0) correspond to a batch. The ranks of the original
`SparseTensor` objects must all match. When the final `SparseTensor` is
created, its rank is the rank of the incoming `SparseTensor` objects plus N;
the sparse tensors have been concatenated along new dimensions, one for each
batch.
The output `SparseTensor` object's shape values for the original dimensions
are the max across the input `SparseTensor` objects' shape values for the
corresponding dimensions. The new dimensions match the size of the batch.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `SparseReorder` to restore index ordering.
For example, if the serialized input is a `[2 x 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: The serialized `SparseTensor` objects.
The last dimension must have 3 columns.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional).
Returns:
A `SparseTensor` representing the deserialized `SparseTensor` objects.
"""
output_indices, output_values, output_shape = (
gen_sparse_ops.deserialize_sparse(serialized_sparse, dtype, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
@tf_export("deserialize_many_sparse")
def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
"""Deserialize and concatenate `SparseTensors` from a serialized minibatch.
The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`serialize_sparse`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`.
The serialized and packed `SparseTensor` objects.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
output_indices, output_values, output_shape = (
gen_sparse_ops.deserialize_many_sparse(
serialized_sparse, dtype, name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
@tf_export("sparse_tensor_dense_matmul")
def sparse_tensor_dense_matmul(sp_a,
b,
adjoint_a=False,
adjoint_b=False,
name=None):
# pylint: disable=line-too-long
"""Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
No validity checking is performed on the indices of `A`. However, the
following input format is recommended for optimal behavior:
* If `adjoint_a == false`: `A` should be sorted in lexicographically
increasing order. Use `sparse_reorder` if you're not sure.
* If `adjoint_a == true`: `A` should be sorted in order of increasing
dimension 1 (i.e., "column major" order instead of "row major" order).
Using `tf.nn.embedding_lookup_sparse` for sparse multiplication:
It's not obvious but you can consider `embedding_lookup_sparse` as another
sparse and dense multiplication. In some situations, you may prefer to use
`embedding_lookup_sparse` even though you're not dealing with embeddings.
There are two questions to ask in the decision process: Do you need gradients
computed as sparse too? Is your sparse data represented as two
`SparseTensor`s: ids and values? There is more explanation about data format
below. If you answer any of these questions as yes, consider using
`tf.nn.embedding_lookup_sparse`.
Following explains differences between the expected SparseTensors:
For example if dense form of your sparse data has shape `[3, 5]` and values:
[[ a ]
[b c]
[ d ]]
`SparseTensor` format expected by `sparse_tensor_dense_matmul`:
`sp_a` (indices, values):
[0, 1]: a
[1, 0]: b
[1, 4]: c
[2, 2]: d
`SparseTensor` format expected by `embedding_lookup_sparse`:
`sp_ids` `sp_weights`
[0, 0]: 1 [0, 0]: a
[1, 0]: 0 [1, 0]: b
[1, 1]: 4 [1, 1]: c
[2, 0]: 2 [2, 0]: d
Deciding when to use `sparse_tensor_dense_matmul` vs.
`matmul`(a_is_sparse=True):
There are a number of questions to ask in the decision process, including:
* Will the SparseTensor `A` fit in memory if densified?
* Is the column count of the product large (>> 1)?
* Is the density of `A` larger than approximately 15%?
If the answer to several of these questions is yes, consider
converting the `SparseTensor` to a dense one and using `tf.matmul` with
`a_is_sparse=True`.
This operation tends to perform well when `A` is more sparse, if the column
size of the product is small (e.g. matrix-vector multiplication), if
`sp_a.dense_shape` takes on large values.
Below is a rough speed comparison between `sparse_tensor_dense_matmul`,
labeled 'sparse', and `matmul`(a_is_sparse=True), labeled 'dense'. For
purposes of the comparison, the time spent converting from a `SparseTensor` to
a dense `Tensor` is not included, so it is overly conservative with respect to
the time ratio.
Benchmark system:
CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB
GPU: NVidia Tesla k40c
Compiled with:
`-c opt --config=cuda --copt=-mavx`
```
tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks
A sparse [m, k] with % nonzero values between 1% and 80%
B dense [k, n]
% nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense)
0.01 1 True 100 100 0.000221166 0.00010154 0.459112
0.01 1 True 100 1000 0.00033858 0.000109275 0.322745
0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385
0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669
0.01 1 False 100 100 0.000208085 0.000107603 0.51711
0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762
0.01 1 False 1000 100 0.000308222 0.00010345 0.335635
0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124
0.01 10 True 100 100 0.000218522 0.000105537 0.482958
0.01 10 True 100 1000 0.000340882 0.000111641 0.327506
0.01 10 True 1000 100 0.000315472 0.000117376 0.372064
0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128
0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354
0.01 10 False 100 1000 0.000330552 0.000112615 0.340687
0.01 10 False 1000 100 0.000341277 0.000114097 0.334324
0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549
0.01 25 True 100 100 0.000207806 0.000105977 0.509981
0.01 25 True 100 1000 0.000322879 0.00012921 0.400181
0.01 25 True 1000 100 0.00038262 0.00014158 0.370035
0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504
0.01 25 False 100 100 0.000209401 0.000104696 0.499979
0.01 25 False 100 1000 0.000321161 0.000130737 0.407076
0.01 25 False 1000 100 0.000377012 0.000136801 0.362856
0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413
0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833
0.2 1 True 100 1000 0.000348674 0.000147475 0.422959
0.2 1 True 1000 100 0.000336908 0.00010122 0.300439
0.2 1 True 1000 1000 0.001022 0.000203274 0.198898
0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746
0.2 1 False 100 1000 0.000356127 0.000146824 0.41228
0.2 1 False 1000 100 0.000322664 0.000100918 0.312764
0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648
0.2 10 True 100 100 0.000211692 0.000109903 0.519165
0.2 10 True 100 1000 0.000372819 0.000164321 0.440753
0.2 10 True 1000 100 0.000338651 0.000144806 0.427596
0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064
0.2 10 False 100 100 0.000215727 0.000110502 0.512231
0.2 10 False 100 1000 0.000375419 0.0001613 0.429653
0.2 10 False 1000 100 0.000336999 0.000145628 0.432132
0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618
0.2 25 True 100 100 0.000218705 0.000129913 0.594009
0.2 25 True 100 1000 0.000394794 0.00029428 0.745402
0.2 25 True 1000 100 0.000404483 0.0002693 0.665788
0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052
0.2 25 False 100 100 0.000221494 0.0001306 0.589632
0.2 25 False 100 1000 0.000396436 0.000297204 0.74969
0.2 25 False 1000 100 0.000409346 0.000270068 0.659754
0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046
0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836
0.5 1 True 100 1000 0.000415328 0.000223073 0.537101
0.5 1 True 1000 100 0.000358324 0.00011269 0.314492
0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851
0.5 1 False 100 100 0.000224196 0.000101423 0.452386
0.5 1 False 100 1000 0.000400987 0.000223286 0.556841
0.5 1 False 1000 100 0.000368825 0.00011224 0.304318
0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563
0.5 10 True 100 100 0.000222125 0.000112308 0.505608
0.5 10 True 100 1000 0.000461088 0.00032357 0.701753
0.5 10 True 1000 100 0.000394624 0.000225497 0.571422
0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801
0.5 10 False 100 100 0.000232083 0.000114978 0.495418
0.5 10 False 100 1000 0.000454574 0.000324632 0.714146
0.5 10 False 1000 100 0.000379097 0.000227768 0.600817
0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638
0.5 25 True 100 100 0.00023429 0.000151703 0.647501
0.5 25 True 100 1000 0.000497462 0.000598873 1.20386
0.5 25 True 1000 100 0.000460778 0.000557038 1.20891
0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845
0.5 25 False 100 100 0.000228981 0.000155334 0.678371
0.5 25 False 100 1000 0.000496139 0.000620789 1.25124
0.5 25 False 1000 100 0.00045473 0.000551528 1.21287
0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927
0.8 1 True 100 100 0.000222037 0.000105301 0.47425
0.8 1 True 100 1000 0.000410804 0.000329327 0.801664
0.8 1 True 1000 100 0.000349735 0.000131225 0.375212
0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633
0.8 1 False 100 100 0.000214079 0.000107486 0.502085
0.8 1 False 100 1000 0.000413746 0.000323244 0.781261
0.8 1 False 1000 100 0.000348983 0.000131983 0.378193
0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282
0.8 10 True 100 100 0.000229159 0.00011825 0.516017
0.8 10 True 100 1000 0.000498845 0.000532618 1.0677
0.8 10 True 1000 100 0.000383126 0.00029935 0.781336
0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689
0.8 10 False 100 100 0.000230783 0.000124958 0.541452
0.8 10 False 100 1000 0.000493393 0.000550654 1.11606
0.8 10 False 1000 100 0.000377167 0.000298581 0.791642
0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024
0.8 25 True 100 100 0.000233496 0.000175241 0.75051
0.8 25 True 100 1000 0.00055654 0.00102658 1.84458
0.8 25 True 1000 100 0.000463814 0.000783267 1.68875
0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132
0.8 25 False 100 100 0.000240243 0.000175047 0.728625
0.8 25 False 100 1000 0.000578102 0.00104499 1.80763
0.8 25 False 1000 100 0.000485113 0.000776849 1.60138
0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992
```
Args:
sp_a: SparseTensor A, of rank 2.
b: A dense Matrix with the same dtype as sp_a.
adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex,
this is transpose(conj(A)). Otherwise it's transpose(A).
adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex,
this is transpose(conj(B)). Otherwise it's transpose(B).
name: A name prefix for the returned tensors (optional)
Returns:
A dense matrix (pseudo-code in dense np.matrix notation):
`A = A.H if adjoint_a else A`
`B = B.H if adjoint_b else B`
`return A*B`
"""
# pylint: enable=line-too-long
sp_a = _convert_to_sparse_tensor(sp_a)
with ops.name_scope(name, "SparseTensorDenseMatMul",
[sp_a.indices, sp_a.values, b]) as name:
b = ops.convert_to_tensor(b, name="b")
return gen_sparse_ops.sparse_tensor_dense_mat_mul(
a_indices=sp_a.indices,
a_values=sp_a.values,
a_shape=sp_a.dense_shape,
b=b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
@tf_export("sparse_softmax")
def sparse_softmax(sp_input, name=None):
"""Applies softmax to a batched N-D `SparseTensor`.
The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
(where `N >= 2`), and with indices sorted in the canonical lexicographic
order.
This op is equivalent to applying the normal `tf.nn.softmax()` to each
innermost logical submatrix with shape `[B, C]`, but with the catch that *the
implicitly zero elements do not participate*. Specifically, the algorithm is
equivalent to:
(1) Applies `tf.nn.softmax()` to a densified view of each innermost
submatrix with shape `[B, C]`, along the size-C dimension;
(2) Masks out the original implicitly-zero locations;
(3) Renormalizes the remaining elements.
Hence, the `SparseTensor` result has exactly the same non-zero indices and
shape.
Example:
```python
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
shape = [2, 2, 2] # 3-D SparseTensor
values = np.asarray([[[0., np.e], [1., 0.]], [[np.e, 0.], [np.e, np.e]]])
indices = np.vstack(np.where(values)).astype(np.int64).T
result = tf.sparse_softmax(tf.SparseTensor(indices, values, shape))
# ...returning a 3-D SparseTensor, equivalent to:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
```
Args:
sp_input: N-D `SparseTensor`, where `N >= 2`.
name: optional name of the operation.
Returns:
output: N-D `SparseTensor` representing the results.
"""
with ops.name_scope(name, "SparseSoftmax",
[sp_input.indices, sp_input.values]) as name:
out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values,
sp_input.dense_shape)
return sparse_tensor.SparseTensor(sp_input.indices, out_vals,
sp_input.dense_shape)
@tf_export("sparse_maximum")
def sparse_maximum(sp_a, sp_b, name=None):
"""Returns the element-wise max of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_maximum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 1], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(
name, "SparseSparseMaximum",
[sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
@tf_export("sparse_minimum")
def sparse_minimum(sp_a, sp_b, name=None):
"""Returns the element-wise min of two SparseTensors.
Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
Example:
```python
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])
res = tf.sparse_minimum(sp_zero, sp_one).eval()
# "res" should be equal to SparseTensor([[0], [1]], [0, 0], [7]).
```
Args:
sp_a: a `SparseTensor` operand whose dtype is real, and indices
lexicographically ordered.
sp_b: the other `SparseTensor` operand with the same requirements (and the
same shape).
name: optional name of the operation.
Returns:
output: the output SparseTensor.
"""
with ops.name_scope(
name, "SparseSparseMinimum",
[sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name:
out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum(
sp_a.indices,
sp_a.values,
sp_a.dense_shape,
sp_b.indices,
sp_b.values,
sp_b.dense_shape,
name=name)
return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)
@tf_export("sparse_transpose")
def sparse_transpose(sp_input, perm=None, name=None):
"""Transposes a `SparseTensor`
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors.
For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:
[0, 3]: b
[0, 1]: a
[3, 1]: d
[2, 0]: c
then the output will be a `SparseTensor` of shape `[5, 4]` and
`indices` / `values`:
[0, 2]: c
[1, 0]: a
[1, 3]: d
[3, 0]: b
Args:
sp_input: The input `SparseTensor`.
perm: A permutation of the dimensions of `sp_input`.
name: A name prefix for the returned tensors (optional)
Returns:
A transposed `SparseTensor`.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
with ops.name_scope(name, "SparseTranspose", [sp_input]) as name:
if perm is None:
rank = array_ops.rank(sp_input)
perm = (rank - 1) - math_ops.range(0, rank, 1)
indices = sp_input.indices
transposed_indices = array_ops.transpose(
array_ops.gather(array_ops.transpose(indices), perm))
perm_ = tensor_util.constant_value(ops.convert_to_tensor(perm))
if perm_ is not None and sp_input.get_shape().is_fully_defined():
old_shape_ = sp_input.get_shape().as_list()
transposed_dense_shape = list(old_shape_) # Copy.
for i, p in enumerate(perm_):
transposed_dense_shape[i] = old_shape_[p]
else:
dense_shape = sp_input.dense_shape
transposed_dense_shape = array_ops.gather(dense_shape, perm)
transposed_st = sparse_tensor.SparseTensor(
transposed_indices, sp_input.values, transposed_dense_shape)
transposed_st = sparse_reorder(transposed_st)
return transposed_st
def _add_sparse_to_tensors_map(sp_input,
container=None,
shared_name=None,
name=None):
"""Add a `SparseTensor` to a `SparseTensorsMap` and return its handle.
Args:
sp_input: The input `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string 1-vector (1D `Tensor`), with the single element representing the
a unique handle to a `SparseTensor` stored by the `SparseTensorMap`
underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.add_sparse_to_tensors_map(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
container=container,
shared_name=shared_name,
name=name)
def _add_many_sparse_to_tensors_map(sp_input,
container=None,
shared_name=None,
name=None):
"""Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
`SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
Args:
sp_input: The input rank `R` `SparseTensor`.
container: The container for the underlying `SparseTensorsMap` (optional).
shared_name: The shared name for the underlying `SparseTensorsMap`
(optional, defaults to the name of the newly created op).
name: A name prefix for the returned tensors (optional).
Returns:
A string matrix (2-D `Tensor`) with `N` rows and `1` column.
Each row represents a unique handle to a `SparseTensor` stored by
the `SparseTensorMap` underlying this op.
Raises:
TypeError: If `sp_input` is not a `SparseTensor`.
"""
sp_input = _convert_to_sparse_tensor(sp_input)
return gen_sparse_ops.add_many_sparse_to_tensors_map(
sp_input.indices,
sp_input.values,
sp_input.dense_shape,
container=container,
shared_name=shared_name,
name=name)
def _take_many_sparse_from_tensors_map(sparse_map_op,
sparse_handles,
rank=None,
name=None):
"""Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
The input `sparse_handles` must be a string matrix of shape `[N, 1]` where
`N` is the minibatch size and the rows correspond to packed outputs of
`add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects
must all match. When the final `SparseTensor` is created, it has rank one
higher than the ranks of the incoming `SparseTensor` objects (they have been
concatenated along a new row dimension).
The output `SparseTensor` object's shape values for all dimensions but the
first are the max across the input `SparseTensor` objects' shape values
for the corresponding dimensions. Its first shape value is `N`, the minibatch
size.
The input `SparseTensor` objects' indices are assumed ordered in
standard lexicographic order. If this is not the case, after this
step run `sparse_reorder` to restore index ordering.
For example, if the serialized input is a `[2, 3]` matrix representing two
original `SparseTensor` objects:
index = [ 0]
[10]
[20]
values = [1, 2, 3]
shape = [50]
and
index = [ 2]
[10]
values = [4, 5]
shape = [30]
then the final deserialized `SparseTensor` will be:
index = [0 0]
[0 10]
[0 20]
[1 2]
[1 10]
values = [1, 2, 3, 4, 5]
shape = [2 50]
Args:
sparse_map_op: The `Operation` that created the original handles.
Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
The serialized and packed `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
Returns:
A `SparseTensor` representing the deserialized `SparseTensor`s,
concatenated along the `SparseTensor`s' first dimension.
All of the serialized `SparseTensor`s must have had the same rank and type.
"""
if not isinstance(sparse_map_op, ops.Operation):
raise TypeError("sparse_map_op be an Operation")
if sparse_map_op.type not in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
raise TypeError(
"sparse_map_op must be one of AddSparseToTensorsMap or "
"AddSparseToTensorsMap. Instead, found `%s`." % sparse_map_op.type)
with ops.colocate_with(sparse_map_op):
shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name
output_indices, output_values, output_shape = (
gen_sparse_ops.take_many_sparse_from_tensors_map(
sparse_handles,
dtype=sparse_map_op.get_attr("T"),
container=sparse_map_op.get_attr("container"),
shared_name=shared_name,
name=name))
# Feed rank data back in, if available
output_indices.set_shape([None, rank])
output_shape.set_shape([rank])
return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
|
eaplatanios/tensorflow
|
tensorflow/python/ops/sparse_ops.py
|
Python
|
apache-2.0
| 82,052
|
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import acos_client.errors as acos_errors
import base
class Partition(base.BaseV21):
def exists(self, name):
if name == 'shared':
return True
try:
self._post("system.partition.search", {'name': name})
return True
except acos_errors.NotFound:
return False
def active(self, name='shared'):
if self.client.current_partition != name:
self._post("system.partition.active", {'name': name})
self.client.current_partition = name
def create(self, name):
params = {
'partition': {
'max_aflex_file': 32,
'network_partition': 0,
'name': name
}
}
if name != 'shared':
self._post("system.partition.create", params)
def delete(self, name):
if name != 'shared':
self.client.session.close()
self._post("system.partition.delete", {"name": name})
|
dougwig/acos-client
|
acos_client/v21/partition.py
|
Python
|
apache-2.0
| 1,617
|
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The OpenStack Neat Project
==========================
OpenStack Neat is a project intended to provide an extension to
OpenStack implementing dynamic consolidation of Virtual Machines (VMs)
using live migration. The major objective of dynamic VM consolidation
is to improve the utilization of physical resources and reduce energy
consumption by re-allocating VMs using live migration according to
their real-time resource demand and switching idle hosts to the sleep
mode. Apart from consolidating VMs, the system should be able to react
to increases in the resource demand and deconsolidate VMs when
necessary to avoid performance degradation. In general, the problem of
dynamic VM consolidation includes 4 sub-problems: host underload /
overload detection, VM selection, and VM placement.
This work is conducted within the Cloud Computing and Distributed
Systems (CLOUDS) Laboratory (http://www.cloudbus.org/) at the
University of Melbourne. The problem of dynamic VM consolidation
considering Quality of Service (QoS) constraints has been studied from
the theoretical perspective and algorithms addressing the sub-problems
listed above have been proposed [1], [2]. The algorithms have been
evaluated using CloudSim (http://code.google.com/p/cloudsim/) and
real-world workload traces collected from more than a thousand
PlanetLab VMs hosted on servers located in more than 500 places around
the world.
The aim of the OpenStack Neat project is to provide an extensible
framework for dynamic consolidation of VMs based on the OpenStack
platform. The framework should provide an infrastructure enabling the
interaction of components implementing the decision-making algorithms.
The framework should allow configuration-driven switching of different
implementations of the decision-making algorithms. The implementation
of the framework will include the algorithms proposed in our previous
works [1], [2].
[1] Anton Beloglazov and Rajkumar Buyya, "Optimal Online Deterministic
Algorithms and Adaptive Heuristics for Energy and Performance
Efficient Dynamic Consolidation of Virtual Machines in Cloud Data
Centers", Concurrency and Computation: Practice and Experience (CCPE),
Volume 24, Issue 13, Pages: 1397-1420, John Wiley & Sons, Ltd, New
York, USA, 2012. Download:
http://beloglazov.info/papers/2012-optimal-algorithms-ccpe.pdf
[2] Anton Beloglazov and Rajkumar Buyya, "Managing Overloaded Hosts
for Dynamic Consolidation of Virtual Machines in Cloud Data Centers
Under Quality of Service Constraints", IEEE Transactions on Parallel
and Distributed Systems (TPDS), IEEE CS Press, USA, 2012 (in press,
accepted on August 2, 2012). Download:
http://beloglazov.info/papers/2012-host-overload-detection-tpds.pdf
"""
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name='openstack-neat',
version='0.1',
description='The OpenStack Neat Project',
long_description=__doc__,
author='Anton Beloglazov',
author_email='anton.beloglazov@gmail.com',
url='https://github.com/beloglazov/openstack-neat',
platforms='any',
include_package_data=True,
license='LICENSE',
packages=find_packages(),
test_suite='tests',
tests_require=['pyqcy', 'mocktest', 'PyContracts'],
entry_points = {
'console_scripts': [
'neat-data-collector = neat.locals.collector:start',
'neat-local-manager = neat.locals.manager:start',
'neat-global-manager = neat.globals.manager:start',
'neat-db-cleaner = neat.globals.db_cleaner:start',
]
},
data_files = [('/etc/init.d', ['init.d/openstack-neat-data-collector',
'init.d/openstack-neat-local-manager',
'init.d/openstack-neat-global-manager',
'init.d/openstack-neat-db-cleaner']),
('/etc/neat', ['neat.conf'])],
)
|
beloglazov/openstack-neat
|
setup.py
|
Python
|
apache-2.0
| 4,540
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates the delivery rate of all line items in an order.
To determine which line items exist, run get_all_line_items.py."""
# Import appropriate modules from the client library.
from googleads import dfp
# Set id of the order to get line items from.
ORDER_ID = 'INSERT_ORDER_ID_HERE'
def main(client, order_id):
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v201502')
# Create statement object to only select line items with even delivery rates.
values = [{
'key': 'deliveryRateType',
'value': {
'xsi_type': 'TextValue',
'value': 'EVENLY'
}
}, {
'key': 'orderId',
'value': {
'xsi_type': 'NumberValue',
'value': order_id
}
}]
query = 'WHERE deliveryRateType = :deliveryRateType and orderId = :orderId'
statement = dfp.FilterStatement(query, values, 500)
# Get line items by statement.
response = line_item_service.getLineItemsByStatement(
statement.ToStatement())
if 'results' in response:
# Update each local line item by changing its delivery rate type.
updated_line_items = []
for line_item in response['results']:
if not line_item['isArchived']:
line_item['deliveryRateType'] = 'AS_FAST_AS_POSSIBLE'
updated_line_items.append(line_item)
# Update line items remotely.
line_items = line_item_service.updateLineItems(updated_line_items)
# Display results.
if line_items:
for line_item in line_items:
print ('Line item with id \'%s\', belonging to order id \'%s\', named '
'\'%s\', and delivery rate \'%s\' was updated.'
% (line_item['id'], line_item['orderId'], line_item['name'],
line_item['deliveryRateType']))
else:
print 'No line items were updated.'
else:
print 'No line items found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, ORDER_ID)
|
wubr2000/googleads-python-lib
|
examples/dfp/v201502/line_item_service/update_line_items.py
|
Python
|
apache-2.0
| 2,671
|
"""
Test multiword commands ('platform' in this case).
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
class MultiwordCommandsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test_ambiguous_subcommand(self):
self.expect("platform s", error=True,
substrs=["ambiguous command 'platform s'. Possible completions:",
"\tselect\n",
"\tshell\n",
"\tsettings\n"])
@no_debug_info_test
def test_empty_subcommand(self):
self.expect("platform \"\"", error=True, substrs=["Need to specify a non-empty subcommand."])
@no_debug_info_test
def test_help(self):
# <multiword> help brings up help.
self.expect("platform help",
substrs=["Commands to manage and create platforms.",
"Syntax: platform [",
"The following subcommands are supported:",
"connect",
"Select the current platform"])
|
llvm-mirror/lldb
|
packages/Python/lldbsuite/test/functionalities/multiword-commands/TestMultiWordCommands.py
|
Python
|
apache-2.0
| 1,161
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import numbers
from contextlib import closing
from typing import Any, Iterable, Mapping, Optional, Sequence, Union
from airflow.operators.sql import BaseSQLOperator
from airflow.providers.google.suite.hooks.sheets import GSheetsHook
class SQLToGoogleSheetsOperator(BaseSQLOperator):
"""
Copy data from SQL results to provided Google Spreadsheet.
:param sql: The SQL to execute.
:param spreadsheet_id: The Google Sheet ID to interact with.
:param conn_id: the connection ID used to connect to the database.
:param parameters: The parameters to render the SQL query with.
:param database: name of database which overwrite the defined one in connection
:param spreadsheet_range: The A1 notation of the values to retrieve.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"sql",
"spreadsheet_id",
"spreadsheet_range",
"impersonation_chain",
)
template_fields_renderers = {"sql": "sql"}
template_ext: Sequence[str] = (".sql",)
ui_color = "#a0e08c"
def __init__(
self,
*,
sql: str,
spreadsheet_id: str,
sql_conn_id: str,
parameters: Optional[Union[Mapping, Iterable]] = None,
database: Optional[str] = None,
spreadsheet_range: str = "Sheet1",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.conn_id = sql_conn_id
self.database = database
self.parameters = parameters
self.gcp_conn_id = gcp_conn_id
self.spreadsheet_id = spreadsheet_id
self.spreadsheet_range = spreadsheet_range
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def _data_prep(self, data):
for row in data:
item_list = []
for item in row:
if isinstance(item, (datetime.date, datetime.datetime)):
item = item.isoformat()
elif isinstance(item, int): # To exclude int from the number check.
pass
elif isinstance(item, numbers.Number):
item = float(item)
item_list.append(item)
yield item_list
def _get_data(self):
hook = self.get_db_hook()
with closing(hook.get_conn()) as conn, closing(conn.cursor()) as cur:
self.log.info("Executing query")
cur.execute(self.sql, self.parameters or ())
yield [field[0] for field in cur.description]
yield from self._data_prep(cur.fetchall())
def execute(self, context: Any) -> None:
self.log.info("Getting data")
values = list(self._get_data())
self.log.info("Connecting to Google")
sheet_hook = GSheetsHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
self.log.info(f"Uploading data to https://docs.google.com/spreadsheets/d/{self.spreadsheet_id}")
sheet_hook.update_values(
spreadsheet_id=self.spreadsheet_id,
range_=self.spreadsheet_range,
values=values,
)
|
Acehaidrey/incubator-airflow
|
airflow/providers/google/suite/transfers/sql_to_sheets.py
|
Python
|
apache-2.0
| 5,101
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# # Copy additional generator configuration data from VS, which is shared by the Windows Ninja generator.
# import gyp.generator.msvs as msvs_generator
# generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', [])
# generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, _, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print('Wrote json to %s.' % filename)
|
arangodb/arangodb
|
3rdParty/V8/gyp/generator/dump_dependency_json.py
|
Python
|
apache-2.0
| 3,418
|
from nose.tools import *
from tests.base import ApiTestCase
from tests.factories import InstitutionFactory, AuthUserFactory, NodeFactory
from framework.auth import Auth
from api.base.settings.defaults import API_BASE
class TestInstitutionNodeList(ApiTestCase):
def setUp(self):
super(TestInstitutionNodeList, self).setUp()
self.institution = InstitutionFactory()
self.node1 = NodeFactory(is_public=True)
self.node1.primary_institution = self.institution
self.node1.save()
self.user1 = AuthUserFactory()
self.user2 = AuthUserFactory()
self.node2 = NodeFactory(creator=self.user1, is_public=False)
self.node2.primary_institution = self.institution
self.node2.add_contributor(self.user2, auth=Auth(self.user1))
self.node2.save()
self.node3 = NodeFactory(creator=self.user2, is_public=False)
self.node3.primary_institution = self.institution
self.node3.save()
self.institution_node_url = '/{0}institutions/{1}/nodes/'.format(API_BASE, self.institution._id)
def test_return_all_public_nodes(self):
res = self.app.get(self.institution_node_url)
assert_equal(res.status_code, 200)
ids = [each['id'] for each in res.json['data']]
assert_in(self.node1._id, ids)
assert_not_in(self.node2._id, ids)
assert_not_in(self.node3._id, ids)
def test_return_private_nodes_with_auth(self):
res = self.app.get(self.institution_node_url, auth=self.user1.auth)
assert_equal(res.status_code, 200)
ids = [each['id'] for each in res.json['data']]
assert_in(self.node1._id, ids)
assert_in(self.node2._id, ids)
assert_not_in(self.node3._id, ids)
def test_return_private_nodes_mixed_auth(self):
res = self.app.get(self.institution_node_url, auth=self.user2.auth)
assert_equal(res.status_code, 200)
ids = [each['id'] for each in res.json['data']]
assert_in(self.node1._id, ids)
assert_in(self.node2._id, ids)
assert_in(self.node3._id, ids)
|
brandonPurvis/osf.io
|
api_tests/institutions/views/test_institution_nodes_list.py
|
Python
|
apache-2.0
| 2,103
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
JSON KMS activation
"""
import os
import platform
import commands
import redhat.kms
class ActivateCommand(commands.CommandBase):
def __init__(self, *args, **kwargs):
pass
@staticmethod
def detect_os():
"""
Return the Linux Distribution or other OS name
"""
translations = {"redhat": redhat}
system = os.uname()[0]
if system == "Linux":
system = platform.linux_distribution(full_distribution_name=0)[0]
# Arch Linux returns None for platform.linux_distribution()
if not system and os.path.exists('/etc/arch-release'):
system = 'arch'
if not system:
return None
system = system.lower()
global DEFAULT_HOSTNAME
DEFAULT_HOSTNAME = system
return translations.get(system)
@commands.command_add('kmsactivate')
def activate_cmd(self, data):
os_mod = self.detect_os()
if not os_mod:
raise SystemError("KMS not supported on this OS")
return os_mod.kms.kms_activate(data)
|
prometheanfire/openstack-guest-agents-unix
|
commands/kms.py
|
Python
|
apache-2.0
| 1,790
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all networks that you have access to with the current login
credentials.
A networkCode should be left out for this request."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
network_service = client.GetService('NetworkService', version='v201411')
# Get all networks that you have access to with the current login credentials.
networks = network_service.getAllNetworks()
# Display results.
for network in networks:
print ('Network with network code \'%s\' and display name \'%s\' was found.'
% (network['networkCode'], network['displayName']))
print '\nNumber of results found: %s' % len(networks)
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
coxmediagroup/googleads-python-lib
|
examples/dfp/v201411/network_service/get_all_networks.py
|
Python
|
apache-2.0
| 1,545
|
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from flask.ext.wtf import Form
from wtforms import validators
from digits import utils
from digits.utils import subclass
from digits.utils.forms import validate_required_iff
@subclass
class DatasetForm(Form):
"""
A form used to create an image processing dataset
"""
def validate_folder_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError(
'Folder does not exist or is not reachable')
else:
return True
feature_folder = utils.forms.StringField(
u'Feature image folder',
validators=[
validators.DataRequired(),
validate_folder_path,
],
tooltip="Indicate a folder full of images."
)
label_folder = utils.forms.StringField(
u'Label image folder',
validators=[
validators.DataRequired(),
validate_folder_path,
],
tooltip="Indicate a folder full of images. For each image in the feature"
" image folder there must be one corresponding image in the label"
" image folder. The label image must have the same filename except"
" for the extension, which may differ."
)
folder_pct_val = utils.forms.IntegerField(
u'% for validation',
default=10,
validators=[
validators.NumberRange(min=0, max=100)
],
tooltip="You can choose to set apart a certain percentage of images "
"from the training images for the validation set."
)
has_val_folder = utils.forms.BooleanField('Separate validation images',
default=False,
)
validation_feature_folder = utils.forms.StringField(
u'Validation feature image folder',
validators=[
validate_required_iff(has_val_folder=True),
validate_folder_path,
],
tooltip="Indicate a folder full of images."
)
validation_label_folder = utils.forms.StringField(
u'Validation label image folder',
validators=[
validate_required_iff(has_val_folder=True),
validate_folder_path,
],
tooltip="Indicate a folder full of images. For each image in the feature"
" image folder there must be one corresponding image in the label"
" image folder. The label image must have the same filename except"
" for the extension, which may differ."
)
channel_conversion = utils.forms.SelectField(
'Channel conversion',
choices=[
('RGB', 'RGB'),
('L', 'Grayscale'),
('none', 'None'),
],
default='none',
tooltip="Perform selected channel conversion."
)
|
ethantang95/DIGITS
|
digits/extensions/data/imageProcessing/forms.py
|
Python
|
bsd-3-clause
| 3,146
|
from __future__ import absolute_import
from .analytics import * # NOQA
from .base import * # NOQA
from .manager import IntegrationManager # NOQA
default_manager = IntegrationManager()
all = default_manager.all
get = default_manager.get
exists = default_manager.exists
register = default_manager.register
unregister = default_manager.unregister
|
ifduyue/sentry
|
src/sentry/integrations/__init__.py
|
Python
|
bsd-3-clause
| 350
|
from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from django.test import RequestFactory
from exam import fixture
from sentry.middleware.user import UserActiveMiddleware
from sentry.testutils import TestCase
class UserActiveMiddlewareTest(TestCase):
middleware = fixture(UserActiveMiddleware)
factory = fixture(RequestFactory)
def test_simple(self):
self.view = lambda x: None
user = self.user
req = self.factory.get('/')
req.user = user
resp = self.middleware.process_view(req, self.view, [], {})
assert resp is None
assert timezone.now() - user.last_active < timedelta(minutes=1)
user.last_active = None
resp = self.middleware.process_view(req, self.view, [], {})
assert resp is None
assert timezone.now() - user.last_active < timedelta(minutes=1)
|
ifduyue/sentry
|
tests/sentry/middleware/test_useractive.py
|
Python
|
bsd-3-clause
| 912
|
from .betweenness import *
from .betweenness_subset import *
from .closeness import *
from .subgraph_alg import *
from .current_flow_closeness import *
from .current_flow_betweenness import *
from .current_flow_betweenness_subset import *
from .degree_alg import *
from .dispersion import *
from .eigenvector import *
from .harmonic import *
from .katz import *
from .load import *
|
andnovar/networkx
|
networkx/algorithms/centrality/__init__.py
|
Python
|
bsd-3-clause
| 382
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import supybot.conf as conf
import supybot.ircdb as ircdb
import supybot.ircmsgs as ircmsgs
class ChannelTestCase(ChannelPluginTestCase):
plugins = ('Channel', 'User')
def setUp(self):
super(ChannelTestCase, self).setUp()
self.irc.state.channels[self.channel].addUser('foo')
self.irc.state.channels[self.channel].addUser('bar')
def testLobotomies(self):
self.assertRegexp('lobotomy list', 'not.*any')
## def testCapabilities(self):
## self.prefix = 'foo!bar@baz'
## self.irc.feedMsg(ircmsgs.privmsg(self.irc.nick, 'register foo bar',
## prefix=self.prefix))
## u = ircdb.users.getUser(0)
## u.addCapability('%s.op' % self.channel)
## ircdb.users.setUser(u)
## self.assertNotError(' ')
## self.assertResponse('user capabilities foo', '[]')
## self.assertNotError('channel addcapability foo op')
## self.assertRegexp('channel capabilities foo', 'op')
## self.assertNotError('channel removecapability foo op')
## self.assertResponse('user capabilities foo', '[]')
def testCapabilities(self):
self.assertNotError('channel capability list')
self.assertNotError('channel capability set -foo')
self.assertNotError('channel capability unset -foo')
self.assertError('channel capability unset -foo')
self.assertNotError('channel capability set -foo bar baz')
self.assertRegexp('channel capability list', 'baz')
self.assertNotError('channel capability unset -foo baz')
self.assertError('channel capability unset baz')
def testEnableDisable(self):
self.assertNotRegexp('channel capability list', '-Channel')
self.assertError('channel enable channel')
self.assertNotError('channel disable channel')
self.assertRegexp('channel capability list', '-Channel')
self.assertNotError('channel enable channel')
self.assertNotRegexp('channel capability list', '-Channel')
self.assertNotError('channel disable channel nicks')
self.assertRegexp('channel capability list', '-Channel.nicks')
self.assertNotError('channel enable channel nicks')
self.assertNotRegexp('channel capability list', '-Channel.nicks')
self.assertNotRegexp('channel capability list', 'nicks')
self.assertNotError('channel disable nicks')
self.assertRegexp('channel capability list', 'nicks')
self.assertNotError('channel enable nicks')
self.assertError('channel disable invalidPlugin')
self.assertError('channel disable channel invalidCommand')
def testUnban(self):
self.assertError('unban foo!bar@baz')
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
m = self.getMsg('unban foo!bar@baz')
self.assertEqual(m.command, 'MODE')
self.assertEqual(m.args, (self.channel, '-b', 'foo!bar@baz'))
self.assertNoResponse(' ', 2)
def testErrorsWithoutOps(self):
for s in 'op deop halfop dehalfop voice devoice kick invite'.split():
self.assertError('%s foo' % s)
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
self.assertNotError('%s foo' % s)
self.irc.feedMsg(ircmsgs.deop(self.channel, self.nick))
def testWontDeItself(self):
for s in 'deop dehalfop'.split():
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
self.assertError('%s %s' % (s, self.nick))
def testCanDevoiceSelf(self):
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
self.assertNotError('devoice %s' % self.nick)
def testOp(self):
self.assertError('op')
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
self.assertNotError('op')
m = self.getMsg('op foo')
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+o', 'foo'))
m = self.getMsg('op foo bar')
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+o', 'foo'))
m = self.irc.takeMsg()
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+o', 'bar'))
self.irc.state.supported['MODES'] = 2
m = self.getMsg('op foo bar')
try:
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+oo', 'foo', 'bar'))
finally:
self.irc.state.supported['MODES'] = 1
def testHalfOp(self):
self.assertError('halfop')
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
self.assertNotError('halfop')
m = self.getMsg('halfop foo')
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+h', 'foo'))
m = self.getMsg('halfop foo bar')
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+h', 'foo'))
m = self.irc.takeMsg()
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+h', 'bar'))
def testVoice(self):
self.assertError('voice')
self.irc.feedMsg(ircmsgs.op(self.channel, self.nick))
self.assertNotError('voice')
m = self.getMsg('voice foo')
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+v', 'foo'))
m = self.getMsg('voice foo bar')
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+v', 'foo'))
m = self.irc.takeMsg()
self.failUnless(m.command == 'MODE' and
m.args == (self.channel, '+v', 'bar'))
def assertKban(self, query, hostmask, **kwargs):
m = self.getMsg(query, **kwargs)
self.assertEqual(m, ircmsgs.ban(self.channel, hostmask))
m = self.getMsg(' ')
self.assertEqual(m.command, 'KICK')
def assertBan(self, query, hostmask, **kwargs):
m = self.getMsg(query, **kwargs)
self.assertEqual(m, ircmsgs.ban(self.channel, hostmask))
def testIban(self):
self.irc.feedMsg(ircmsgs.join(self.channel,
prefix='foobar!user@host.domain.tld'))
self.assertError('iban foo!bar@baz')
self.irc.feedMsg(ircmsgs.op(self.channel, self.irc.nick))
self.assertBan('iban foo!bar@baz', 'foo!bar@baz')
self.assertBan('iban foobar', 'foobar!user@host.domain.tld')
conf.supybot.protocols.irc.strictRfc.setValue(True)
self.assertError('iban $a:nyuszika7h')
self.assertError('unban $a:nyuszika7h')
conf.supybot.protocols.irc.strictRfc.setValue(False)
self.assertBan('iban $a:nyuszika7h', '$a:nyuszika7h')
self.assertNotError('unban $a:nyuszika7h')
## def testKban(self):
## self.irc.prefix = 'something!else@somehwere.else'
## self.irc.nick = 'something'
## self.irc.feedMsg(ircmsgs.join(self.channel,
## prefix='foobar!user@host.domain.tld'))
## self.assertError('kban foobar')
## self.irc.feedMsg(ircmsgs.op(self.channel, self.irc.nick))
## self.assertError('kban foobar -1')
## self.assertKban('kban foobar', '*!*@*.domain.tld')
## self.assertKban('kban --exact foobar', 'foobar!user@host.domain.tld')
## self.assertKban('kban --host foobar', '*!*@host.domain.tld')
## self.assertKban('kban --user foobar', '*!user@*')
## self.assertKban('kban --nick foobar', 'foobar!*@*')
## self.assertKban('kban --nick --user foobar', 'foobar!user@*')
## self.assertKban('kban --nick --host foobar',
## 'foobar!*@host.domain.tld')
## self.assertKban('kban --user --host foobar', '*!user@host.domain.tld')
## self.assertKban('kban --nick --user --host foobar',
## 'foobar!user@host.domain.tld')
## self.assertNotRegexp('kban adlkfajsdlfkjsd', 'KeyError')
## self.assertNotRegexp('kban foobar time', 'ValueError')
## self.assertError('kban %s' % self.irc.nick)
def testBan(self):
with conf.supybot.protocols.irc.banmask.context(['exact']):
self.assertNotError('ban add foo!bar@baz')
self.assertNotError('ban remove foo!bar@baz')
orig = conf.supybot.protocols.irc.strictRfc()
with conf.supybot.protocols.irc.strictRfc.context(True):
# something wonky is going on here. irc.error (src/Channel.py|449)
# is being called but the assert is failing
self.assertError('ban add not!a.hostmask')
self.assertNotRegexp('ban add not!a.hostmask', 'KeyError')
self.assertError('ban add $a:nyuszika7h')
self.assertError('ban remove $a:nyuszika7h')
conf.supybot.protocols.irc.strictRfc.setValue(False)
self.assertNotError('ban add $a:nyuszika7h')
self.assertNotError('ban remove $a:nyuszika7h')
def testBanList(self):
self.assertNotError('ban add foo!bar@baz')
self.assertNotError('ban add foobar!*@baz')
self.assertNotError('ban add foobar!qux@baz')
self.assertRegexp('ban list', r'.*foo!bar@baz.*')
self.assertRegexp('ban list', r'.*foobar!\*@baz.*')
self.assertRegexp('ban list', r'.*foobar!qux@baz.*')
self.assertNotRegexp('ban list foobar!*@baz', r'.*foo!bar@baz.*')
self.assertRegexp('ban list foobar!*@baz', r'.*foobar!\*@baz.*')
self.assertRegexp('ban list foobar!*@baz', r'.*foobar!qux@baz.*')
self.assertResponse('ban list foobar!\*@baz',
'"foobar!*@baz" (never expires)')
def testIgnore(self):
orig = conf.supybot.protocols.irc.banmask()
def ignore(given, expect=None):
if expect is None:
expect = given
self.assertNotError('channel ignore add %s' % given)
self.assertResponse('channel ignore list', "'%s'" % expect)
self.assertNotError('channel ignore remove %s' % expect)
self.assertRegexp('channel ignore list', 'not currently')
ignore('foo!bar@baz', '*!*@baz')
ignore('foo!*@*')
with conf.supybot.protocols.irc.banmask.context(['exact']):
ignore('foo!bar@baz')
ignore('foo!*@*')
self.assertError('ban add not!a.hostmask')
def testNicks(self):
self.assertResponse('channel nicks', 'bar, foo, and test')
self.assertResponse('channel nicks --count', '3')
def testPart(self):
def getAfterJoinMessages():
m = self.irc.takeMsg()
self.assertEqual(m.command, 'MODE')
m = self.irc.takeMsg()
self.assertEqual(m.command, 'MODE')
m = self.irc.takeMsg()
self.assertEqual(m.command, 'WHO')
self.assertError('part #foo')
self.assertRegexp('part #foo', 'not in')
self.irc.feedMsg(ircmsgs.join('#foo', prefix=self.prefix))
getAfterJoinMessages()
m = self.getMsg('part #foo')
self.assertEqual(m.command, 'PART')
self.irc.feedMsg(ircmsgs.join('#foo', prefix=self.prefix))
getAfterJoinMessages()
m = self.getMsg('part #foo reason')
self.assertEqual(m.command, 'PART')
self.assertEqual(m.args[0], '#foo')
self.assertEqual(m.args[1], 'reason')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
ProgVal/Limnoria-test
|
plugins/Channel/test.py
|
Python
|
bsd-3-clause
| 13,250
|
# encoding: utf-8
import datetime
import logging
import os
import re
import urllib
import urllib2
from HTMLParser import HTMLParseError
from urlparse import urlparse
from BeautifulSoup import BeautifulSoup, Comment, NavigableString
from django.contrib.contenttypes.models import ContentType
from django.core.files.base import ContentFile
import parse_knesset_bill_pdf
from knesset.utils import send_chat_notification
from laws.models import Bill, Law, GovProposal
from links.models import Link, LinkedFile
from mks.models import Knesset
from simple.constants import PRIVATE_LAWS_URL, KNESSET_LAWS_URL, GOV_LAWS_URL
from simple.government_bills.parse_government_bill_pdf import GovProposalParser
from simple.parsers.utils import laws_parser_utils
from simple.parsers.utils.laws_parser_utils import normalize_correction_title_dashes, clean_line
logger = logging.getLogger("open-knesset.parse_laws")
# don't parse laws from an older knesset
CUTOFF_DATE = datetime.date(2009, 2, 24)
class ParseLaws(object):
"""partially abstract class for parsing laws. contains one function used in few
cases (private and other laws). this function gives the required page
"""
url = None
def get_page_with_param(self, params):
logger.debug('get_page_with_param: self.url=%s, params=%s' % (self.url, params))
if not params:
try:
html_page = urllib2.urlopen(self.url).read().decode('windows-1255').encode('utf-8')
except urllib2.URLError as e:
logger.error("can't open URL: %s" % self.url)
send_chat_notification(__name__, 'failed to open url', {'url': self.url, 'params': params})
return None
try:
soup = BeautifulSoup(html_page)
except HTMLParseError as e:
logger.debug("parsing URL: %s - %s. will try harder." % (self.url, e))
html_page = re.sub("(?s)<!--.*?-->", " ", html_page) # cut anything that looks suspicious
html_page = re.sub("(?s)<script>.*?</script>", " ", html_page)
html_page = re.sub("(?s)<!.*?>", " ", html_page)
try:
soup = BeautifulSoup(html_page)
except HTMLParseError as e:
logger.debug("error parsing URL: %s - %s" % (self.url, e))
send_chat_notification(__name__, 'failed to parse url', {'url': self.url, 'params': None})
return None
comments = soup.findAll(text=lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments]
return soup
else:
data = urllib.urlencode(params)
try:
url_data = urllib2.urlopen(self.url, data)
except urllib2.URLError:
logger.error("can't open URL: %s" % self.url)
send_chat_notification(__name__, 'failed to open url', {'url': self.url, 'params': data})
return None
html_page = url_data.read().decode('windows-1255').encode('utf-8')
try:
soup = BeautifulSoup(html_page)
except HTMLParseError as e:
logger.debug("error parsing URL: %s - %s" % (self.url, e))
send_chat_notification(__name__, 'failed to parse url', {'url': self.url, 'params': data})
return None
comments = soup.findAll(text=lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments]
return soup
class ParsePrivateLaws(ParseLaws):
"""a class that parses private laws proposed
"""
# the constructor parses the laws data from the required pages
def __init__(self, days_back):
self.url = PRIVATE_LAWS_URL
self.rtf_url = r"http://www.knesset.gov.il/privatelaw"
self.laws_data = []
self.parse_pages_days_back(days_back)
# parses the required pages data
def parse_pages_days_back(self, days_back):
today = datetime.date.today()
last_required_date = today + datetime.timedelta(days=-days_back)
last_law_checked_date = today
index = None
while last_law_checked_date > last_required_date:
if index:
params = {'RowStart': index}
else:
params = None
soup_current_page = self.get_page_with_param(params)
if not soup_current_page:
return
index = self.get_param(soup_current_page)
self.parse_private_laws_page(soup_current_page)
last_law_checked_date = self.update_last_date()
def get_param(self, soup):
name_tags = soup.findAll(
lambda tag: tag.name == 'a' and tag.has_key('href') and re.match("javascript:SndSelf\((\d+)\);",
tag['href']))
if name_tags and name_tags[0].get('href'):
m = re.match("javascript:SndSelf\((\d+)\);", name_tags[0]['href'])
return m.groups(1)[0]
else:
logger.error('Can not find any more name tags')
return None
def parse_private_laws_page(self, soup):
name_tag = soup.findAll(lambda tag: tag.name == 'tr' and tag.has_key('valign') and tag['valign'] == 'Top')
for tag in name_tag:
tds = tag.findAll(lambda td: td.name == 'td')
law_data = {}
law_data['knesset_id'] = int(tds[0].string.strip())
law_data['law_id'] = int(tds[1].string.strip())
if tds[2].findAll('a')[0].has_key('href'):
law_data['text_link'] = self.rtf_url + r"/" + tds[2].findAll('a')[0]['href']
law_data['law_full_title'] = tds[3].string.strip()
parsed_law_title = laws_parser_utils.parse_title(law_data['law_full_title'])
if not parsed_law_title:
logger.warn("can't parse proposal title: %s" % law_data['law_full_title'])
continue
law_data['law_name'] = clean_line(parsed_law_title.group(1))
comment1 = parsed_law_title.group(3)
comment2 = parsed_law_title.group(5)
if comment2:
law_data['correction'] = clean_line(comment2)
law_data['comment'] = comment1
else:
law_data['comment'] = None
if comment1:
law_data['correction'] = clean_line(comment1)
else:
law_data['correction'] = None
law_data['correction'] = normalize_correction_title_dashes(law_data['correction'])
law_data['law_year'] = parsed_law_title.group(7)
law_data['proposal_date'] = datetime.datetime.strptime(tds[4].string.strip(), '%d/%m/%Y').date()
names_string = ''.join([unicode(y) for y in tds[5].findAll('font')[0].contents])
names_string = clean_line(names_string)
proposers = []
joiners = []
# Old deprecated way to search for joiners
if re.search('ONMOUSEOUT', names_string) > 0:
splitted_names = names_string.split('ONMOUSEOUT')
joiners = [name for name in re.match('(.*?)\',\'', splitted_names[0]).group(1).split('<br />') if
len(name) > 0]
proposers = splitted_names[1][10:].split('<br />')
else:
proposers = names_string.split('<br />')
more_joiners = [name for name in tds[6].findAll(text=lambda text: isinstance(text, NavigableString)) if
name.strip() not in [u'מצטרפים לחוק:', u'אין מצטרפים לחוק']]
if len(more_joiners) and not joiners:
joiners = more_joiners
law_data['proposers'] = proposers
law_data['joiners'] = joiners
self.laws_data.append(law_data)
def update_last_date(self):
return self.laws_data[-1]['proposal_date']
class ParseKnessetLaws(ParseLaws):
"""
A class that parses Knesset Laws (laws after committees)
the constructor parses the laws data from the required pages
"""
def __init__(self, min_booklet):
self.url = KNESSET_LAWS_URL
self.pdf_url = r"http://www.knesset.gov.il"
self.laws_data = []
self.min_booklet = min_booklet
self.parse_pages_booklet()
def parse_pages_booklet(self):
full_page_parsed = True
index = None
while full_page_parsed:
if index:
params = {'First': index[0], 'Start': index[1]}
else:
params = None
soup_current_page = self.get_page_with_param(params)
index = self.get_param(soup_current_page)
full_page_parsed = self.parse_laws_page(soup_current_page)
def get_param(self, soup):
name_tags = soup.findAll(
lambda tag: tag.name == 'a' and tag.has_key('href') and re.match("javascript:SndSelf\((\d+),(\d+)\);",
tag['href']))
if name_tags and name_tags[0] and name_tags[0].get('href'):
m = re.match("javascript:SndSelf\((\d+),(\d+)\);", name_tags[0]['href'])
return m.groups()
else:
if not name_tags:
logger.info('Failed to find name tags')
elif not name_tags[0].get('href'):
logger.error('First name tag missing href %s' % name_tags[0])
return None
def parse_pdf(self, pdf_url):
return parse_knesset_bill_pdf.parse(pdf_url)
def parse_laws_page(self, soup):
name_tags = soup.findAll(lambda tag: tag.name == 'a' and tag.has_key('href') and tag['href'].find(".pdf") >= 0)
for tag in name_tags:
pdf_link = self.pdf_url + tag['href']
booklet = re.search(r"/(\d+)/", tag['href']).groups(1)[0]
if int(booklet) <= self.min_booklet:
return False
pdf_data = self.parse_pdf(pdf_link) or []
for j in range(len(pdf_data)): # sometime there is more than 1 law in a pdf
title = pdf_data[j]['title']
m = re.findall('[^\(\)]*\((.*?)\)[^\(\)]', title)
try:
comment = m[-1].strip().replace('\n', '').replace(' ', ' ')
law = title[:title.find(comment) - 1]
except:
comment = None
law = title.replace(',', '')
try:
correction = m[-2].strip().replace('\n', '').replace(' ', ' ')
law = title[:title.find(correction) - 1]
except:
correction = None
correction = normalize_correction_title_dashes(correction)
law = law.strip().replace('\n', '').replace(' ', ' ')
if law.find("הצעת ".decode("utf8")) == 0:
law = law[5:]
law_data = {'booklet': booklet, 'link': pdf_link, 'law': law, 'correction': correction,
'comment': comment, 'date': pdf_data[j]['date']}
if 'original_ids' in pdf_data[j]:
law_data['original_ids'] = pdf_data[j]['original_ids']
if 'bill' in pdf_data[j]:
law_data['bill'] = pdf_data[j]['bill']
self.laws_data.append(law_data)
return True
def update_booklet(self):
return int(self.laws_data[-1]['booklet'])
class ParseGovLaws(ParseKnessetLaws):
def __init__(self, min_booklet):
self.url = GOV_LAWS_URL
self.pdf_url = r"http://www.knesset.gov.il"
self.laws_data = []
self.min_booklet = min_booklet
def parse_gov_laws(self):
""" entry point to start parsing """
self.parse_pages_booklet()
def parse_pdf(self, pdf_url):
""" Grab a single pdf url, using cache via LinkedFile
"""
existing_count = Link.objects.filter(url=pdf_url).count()
if existing_count >= 1:
if existing_count > 1:
logger.warn("found two objects with the url %s. Taking the first" % pdf_url)
link = Link.objects.filter(url=pdf_url).first()
filename = None
if existing_count > 0:
files = [f for f in link.linkedfile_set.order_by('last_updated') if f.link_file.name != '']
if len(files) > 0:
link_file = files[0]
filename = link_file.link_file.path
logger.debug('trying reusing %s from %s' % (pdf_url, filename))
if not os.path.exists(filename):
# for some reason the file can't be found, we'll just d/l
# it again
filename = None
logger.debug('not reusing because file not found')
if not filename:
logger.debug('getting %s' % pdf_url)
contents = urllib2.urlopen(pdf_url).read()
link_file = LinkedFile()
saved_filename = os.path.basename(urlparse(pdf_url).path)
link_file.link_file.save(saved_filename, ContentFile(contents))
filename = link_file.link_file.path
try:
prop = GovProposalParser(filename)
except Exception:
logger.exception('Gov proposal exception %s'.format(pdf_url))
return None
# TODO: check if parsing handles more than 1 prop in a booklet
x = {'title': prop.get_title(),
'date': prop.get_date(),
# 'bill':prop,
'link_file': link_file}
return [x]
def update_single_bill(self, pdf_link, booklet=None, alt_title=None):
gp = None
if booklet is None:
# get booklet from existing bill
gps = GovProposal.objects.filter(source_url=pdf_link)
if gps.count() < 1:
logger.error('no existing object with given pdf link and no '
'booklet given. pdf_link = %s' % pdf_link)
return
gp = gps[0]
booklet = gp.booklet_number
pdf_data = self.parse_pdf(pdf_link)
if pdf_data is None:
return
for j in range(len(pdf_data)): # sometime there is more than 1 gov
# bill in a pdf
if alt_title: # just use the given title
title = alt_title
else: # get the title from the PDF file itself.
# doesn't work so well
title = pdf_data[j]['title']
m = re.findall('[^\(\)]*\((.*?)\)[^\(\)]', title)
try:
comment = m[-1].strip().replace('\n', '').replace(
' ', ' ')
law = title[:title.find(comment) - 1]
except:
comment = None
law = title.replace(',', '')
try:
correction = m[-2].strip().replace('\n', '').replace(
' ', ' ')
law = title[:title.find(correction) - 1]
except:
correction = None
correction = normalize_correction_title_dashes(correction)
law = law.strip().replace('\n', '').replace(' ', ' ')
if law.find("הצעת ".decode("utf8")) == 0:
law = law[5:]
law_data = {'booklet': booklet, 'link': pdf_link,
'law': law, 'correction': correction,
'comment': comment, 'date': pdf_data[j]['date']}
if 'original_ids' in pdf_data[j]:
law_data['original_ids'] = pdf_data[j]['original_ids']
if 'bill' in pdf_data[j]:
law_data['bill'] = pdf_data[j]['bill']
self.laws_data.append(law_data)
self.create_or_update_single_bill(
data=law_data,
pdf_link=pdf_link,
link_file=pdf_data[j]['link_file'],
gp=gp)
def create_or_update_single_bill(self, data, pdf_link, link_file, gp=None):
"""
data - a dict of data for this gov proposal
pdf_link - the source url from which the bill is taken
link_file - a cached version of the pdf
gp - an existing GovProposal objects. if this is given, it will be
updated, instead of creating a new object
"""
if not (data['date']) or CUTOFF_DATE and data['date'] < CUTOFF_DATE:
return
law_name = data['law']
try:
law, created = Law.objects.get_or_create(title=law_name)
except Law.MultipleObjectsReturned:
created = False
try:
law = Law.objects.filter(title=law_name, merged_into=None).last()
except Law.MultipleObjectsReturned: # How is this possible? probably another bug somewhere
law = Law.objects.filter(title=law_name).last()
if created:
law.save()
if law.merged_into:
law = law.merged_into
title = u''
if data['correction']:
title += data['correction']
if data['comment']:
title += ' ' + data['comment']
if len(title) <= 1:
title = u'חוק חדש'
k_id = Knesset.objects.get_knesset_by_date(data['date']).pk
if gp is None: # create new GovProposal, or look for an identical one
(gp, created) = GovProposal.objects.get_or_create(
booklet_number=data['booklet'],
source_url=data['link'],
title=title,
law=law,
date=data['date'], defaults={'knesset_id': k_id})
if created:
gp.save()
logger.debug("created GovProposal id = %d" % gp.id)
# look for similar bills
bill_params = dict(law=law, title=title, stage='3',
stage_date=data['date'])
similar_bills = Bill.objects.filter(**bill_params).order_by('id')
if len(similar_bills) >= 1:
b = similar_bills[0]
if len(similar_bills) > 1:
logger.debug("multiple bills detected")
for bill in similar_bills:
if bill.id == b.id:
logger.debug("bill being used now: %d" % bill.id)
else:
logger.debug("bill with same fields: %d" % bill.id)
else: # create a bill
b = Bill(**bill_params)
b.save()
logger.debug("created bill %d" % b.id)
# see if the found bill is already linked to a gov proposal
try:
bill_gp_id = b.gov_proposal.id
except GovProposal.DoesNotExist:
bill_gp_id = None
if (bill_gp_id is None) or (gp.id == b.gov_proposal.id):
# b is not linked to gp, or linked to the current gp
gp.bill = b
gp.save()
else:
logger.debug("processing gp %d - matching bill (%d) already has gp"
" (%d)" % (gp.id, b.id, b.gov_proposal.id))
else: # update a given GovProposal
# TODO: move to a classmethod
gp.booklet_number = data['booklet']
gp.knesset_id = k_id
gp.source_url = data['link']
gp.title = title
gp.law = law
gp.date = data['date']
gp.save()
gp.bill.title = title
gp.bill.law = law
gp.bill.save()
b = gp.bill
if (link_file is not None) and (link_file.link is None):
link = Link(title=pdf_link, url=pdf_link,
content_type=ContentType.objects.get_for_model(gp),
object_pk=str(gp.id))
link.save()
link_file.link = link
link_file.save()
logger.debug("check updated %s" % b.get_absolute_url())
def parse_laws_page(self, soup):
# Fall back to regex, because these pages are too broken to get the
# <td> element we need with BS"""
u = unicode(soup)
pairs = []
curr_href = None
for line in u.split('\n'):
# This builds upon always having the pdf in one line and then the actual title, else would cause errors
curr_title = None
if '.pdf' in line:
curr_href = re.search('href="(.*?)"', line).group(1)
if 'LawText1">' in line:
try:
curr_title = re.search('LawText1">(.*?)</', line).group(1)
except AttributeError:
curr_title = re.search('LawText1">(.*?)\r', line).group(1)
pairs.append((curr_title, curr_href))
if not pairs:
return False
for title, href in pairs:
try:
pdf_link = self.pdf_url + href
booklet = re.search(r"/(\d+)/", href).groups(1)[0]
if int(booklet) <= self.min_booklet:
return False
self.update_single_bill(pdf_link, booklet=booklet, alt_title=title)
except TypeError:
logger.exception('law scraping exception pdf_url: %s href %s' % (self.pdf_url, href))
return True
#############
# Main #
#############
if __name__ == '__main__':
m = ParsePrivateLaws(15)
|
OriHoch/Open-Knesset
|
simple/parsers/parse_laws.py
|
Python
|
bsd-3-clause
| 21,686
|
from django.http import HttpResponse
from django.test import TestCase
from ..pipeline import make_staff
class Backend(object):
name = None
def __init__(self, name, *args, **kwargs):
super(Backend, self).__init__(*args, **kwargs)
self.name = name
class MockSuperUser(object):
is_staff = False
is_superuser = False
def save(self):
pass
class PipelineTest(TestCase):
def test_make_staff(self):
facebook_backend = Backend('facebook')
google_plus_backend = Backend('google-plus')
user = MockSuperUser()
response = HttpResponse()
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
make_staff(facebook_backend, user, response)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
make_staff(google_plus_backend, user, response)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
|
dan-gamble/cms
|
cms/tests/test_pipeline.py
|
Python
|
bsd-3-clause
| 978
|
#!/usr/bin/env python
'''
gsconfig is a python library for manipulating a GeoServer instance via the GeoServer RESTConfig API.
The project is distributed under a MIT License .
'''
__author__ = "David Winslow"
__copyright__ = "Copyright 2012-2015 Boundless, Copyright 2010-2012 OpenPlans"
__license__ = "MIT"
from geoserver.catalog import Catalog
cat = Catalog("http://localhost:8080/geoserver/rest", "admin", "geoserver")
pg_stores = [s for s in cat.get_stores()
if s.connection_parameters and \
s.connection_parameters.get("dbtype") == "postgis"]
res = []
for s in pg_stores:
res.extend(r.name for r in cat.get_resources(store=s))
print res
|
scottp-dpaw/gsconfig
|
examples/postgislayers.py
|
Python
|
mit
| 662
|
#!/usr/bin/env python
# Copyright (c) 2021 by
# Donatas Abraitis <donatas.abraitis@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
Test if BGP community alias is visible in CLI outputs
"""
import os
import sys
import json
import pytest
import functools
pytestmark = pytest.mark.bgpd
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
from lib import topotest
from lib.topogen import Topogen, TopoRouter, get_topogen
pytestmark = [pytest.mark.bgpd]
def build_topo(tgen):
for routern in range(1, 3):
tgen.add_router("r{}".format(routern))
switch = tgen.add_switch("s1")
switch.add_link(tgen.gears["r1"])
switch.add_link(tgen.gears["r2"])
def setup_module(mod):
tgen = Topogen(build_topo, mod.__name__)
tgen.start_topology()
router_list = tgen.routers()
for i, (rname, router) in enumerate(router_list.items(), 1):
router.load_config(
TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname))
)
router.load_config(
TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname))
)
tgen.start_router()
def teardown_module(mod):
tgen = get_topogen()
tgen.stop_topology()
def test_bgp_community_alias():
tgen = get_topogen()
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
router = tgen.gears["r1"]
def _bgp_converge(router):
output = json.loads(router.vtysh_cmd("show ip route json"))
expected = {
"172.16.16.1/32": [
{
"tag": 10,
"communities": "community-r2-1 65001:2",
"largeCommunities": "large-community-r2-1 65001:1:2",
}
],
"172.16.16.2/32": [
{
"tag": 20,
"communities": "65002:1 community-r2-2",
"largeCommunities": "",
}
],
"172.16.16.3/32": [
{
"tag": 100,
"communities": "",
"largeCommunities": "",
}
],
}
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_converge, router)
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert result is None, "Cannot see BGP community aliases at r1"
def _bgp_show_prefixes_by_alias(router):
output = json.loads(
router.vtysh_cmd(
"show bgp ipv4 unicast alias large-community-r2-1 json detail"
)
)
expected = {
"routes": {
"172.16.16.1/32": [
{
"community": {"string": "community-r2-1 65001:2"},
"largeCommunity": {"string": "large-community-r2-1 65001:1:2"},
}
]
}
}
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_show_prefixes_by_alias, router)
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert result is None, "Cannot see BGP prefixes by community alias at r1"
def _bgp_show_prefixes_by_large_community_list(router):
output = json.loads(
router.vtysh_cmd("show bgp ipv4 unicast large-community-list r2 json")
)
expected = {"routes": {"172.16.16.1/32": [{"valid": True}]}}
return topotest.json_cmp(output, expected)
test_func = functools.partial(_bgp_show_prefixes_by_large_community_list, router)
success, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5)
assert result is None, "Cannot see BGP prefixes by large community list at r1"
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
|
freerangerouting/frr
|
tests/topotests/bgp_community_alias/test_bgp-community-alias.py
|
Python
|
gpl-2.0
| 4,645
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutItemLegend.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2017 by Nyall Dawson'
__date__ = '24/10/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
from qgis.PyQt.QtCore import QRectF, QDir
from qgis.PyQt.QtGui import QColor
from qgis.core import (QgsPrintLayout,
QgsLayoutItemLegend,
QgsLayoutItemMap,
QgsLayout,
QgsMapSettings,
QgsVectorLayer,
QgsMarkerSymbol,
QgsSingleSymbolRenderer,
QgsRectangle,
QgsProject,
QgsLayoutObject,
QgsProperty,
QgsLayoutMeasurement,
QgsLayoutItem,
QgsLayoutPoint,
QgsLayoutSize,
QgsExpression,
QgsMapLayerLegendUtils,
QgsLegendStyle,
QgsFontUtils,
QgsLineSymbol,
QgsMapThemeCollection,
QgsCategorizedSymbolRenderer,
QgsRendererCategory,
QgsFillSymbol,
QgsApplication)
from qgis.testing import (start_app,
unittest
)
from utilities import unitTestDataPath
from qgslayoutchecker import QgsLayoutChecker
import os
from time import sleep
from test_qgslayoutitem import LayoutItemTestCase
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLayoutItemLegend(unittest.TestCase, LayoutItemTestCase):
@classmethod
def setUpClass(cls):
cls.item_class = QgsLayoutItemLegend
def setUp(self):
self.report = "<h1>Python QgsLayoutItemLegend Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def testInitialSizeSymbolMapUnits(self):
"""Test initial size of legend with a symbol size in map units"""
QgsProject.instance().removeAllMapLayers()
point_path = os.path.join(TEST_DATA_DIR, 'points.shp')
point_layer = QgsVectorLayer(point_path, 'points', 'ogr')
QgsProject.instance().clear()
QgsProject.instance().addMapLayers([point_layer])
marker_symbol = QgsMarkerSymbol.createSimple(
{'color': '#ff0000', 'outline_style': 'no', 'size': '5', 'size_unit': 'MapUnit'})
point_layer.setRenderer(QgsSingleSymbolRenderer(marker_symbol))
s = QgsMapSettings()
s.setLayers([point_layer])
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 80, 80))
map.setFrameEnabled(True)
map.setLayers([point_layer])
layout.addLayoutItem(map)
map.setExtent(point_layer.extent())
legend = QgsLayoutItemLegend(layout)
legend.setTitle("Legend")
legend.attemptSetSceneRect(QRectF(120, 20, 80, 80))
legend.setFrameEnabled(True)
legend.setFrameStrokeWidth(QgsLayoutMeasurement(2))
legend.setBackgroundColor(QColor(200, 200, 200))
legend.setTitle('')
layout.addLayoutItem(legend)
legend.setLinkedMap(map)
checker = QgsLayoutChecker(
'composer_legend_mapunits', layout)
checker.setControlPathPrefix("composer_legend")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
# resize with non-top-left reference point
legend.setResizeToContents(False)
legend.setReferencePoint(QgsLayoutItem.LowerRight)
legend.attemptMove(QgsLayoutPoint(120, 90))
legend.attemptResize(QgsLayoutSize(50, 60))
self.assertEqual(legend.positionWithUnits().x(), 120.0)
self.assertEqual(legend.positionWithUnits().y(), 90.0)
self.assertAlmostEqual(legend.pos().x(), 70, -1)
self.assertAlmostEqual(legend.pos().y(), 30, -1)
legend.setResizeToContents(True)
legend.updateLegend()
self.assertEqual(legend.positionWithUnits().x(), 120.0)
self.assertEqual(legend.positionWithUnits().y(), 90.0)
self.assertAlmostEqual(legend.pos().x(), 91, -1)
self.assertAlmostEqual(legend.pos().y(), 71, -1)
QgsProject.instance().removeMapLayers([point_layer.id()])
def testResizeWithMapContent(self):
"""Test test legend resizes to match map content"""
point_path = os.path.join(TEST_DATA_DIR, 'points.shp')
point_layer = QgsVectorLayer(point_path, 'points', 'ogr')
QgsProject.instance().addMapLayers([point_layer])
s = QgsMapSettings()
s.setLayers([point_layer])
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 80, 80))
map.setFrameEnabled(True)
map.setLayers([point_layer])
layout.addLayoutItem(map)
map.setExtent(point_layer.extent())
legend = QgsLayoutItemLegend(layout)
legend.setTitle("Legend")
legend.attemptSetSceneRect(QRectF(120, 20, 80, 80))
legend.setFrameEnabled(True)
legend.setFrameStrokeWidth(QgsLayoutMeasurement(2))
legend.setBackgroundColor(QColor(200, 200, 200))
legend.setTitle('')
legend.setLegendFilterByMapEnabled(True)
layout.addLayoutItem(legend)
legend.setLinkedMap(map)
map.setExtent(QgsRectangle(-102.51, 41.16, -102.36, 41.30))
checker = QgsLayoutChecker(
'composer_legend_size_content', layout)
checker.setControlPathPrefix("composer_legend")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
QgsProject.instance().removeMapLayers([point_layer.id()])
def testResizeWithMapContentNoDoublePaint(self):
"""Test test legend resizes to match map content"""
poly_path = os.path.join(TEST_DATA_DIR, 'polys.shp')
poly_layer = QgsVectorLayer(poly_path, 'polys', 'ogr')
p = QgsProject()
p.addMapLayers([poly_layer])
fill_symbol = QgsFillSymbol.createSimple({'color': '255,0,0,125', 'outline_style': 'no'})
poly_layer.setRenderer(QgsSingleSymbolRenderer(fill_symbol))
s = QgsMapSettings()
s.setLayers([poly_layer])
layout = QgsLayout(p)
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 80, 80))
map.setFrameEnabled(True)
map.setLayers([poly_layer])
layout.addLayoutItem(map)
map.setExtent(poly_layer.extent())
legend = QgsLayoutItemLegend(layout)
legend.setTitle("Legend")
legend.attemptSetSceneRect(QRectF(120, 20, 80, 80))
legend.setFrameEnabled(True)
legend.setFrameStrokeWidth(QgsLayoutMeasurement(2))
legend.setBackgroundEnabled(False)
legend.setTitle('')
layout.addLayoutItem(legend)
legend.setLinkedMap(map)
map.setExtent(QgsRectangle(-102.51, 41.16, -102.36, 41.30))
checker = QgsLayoutChecker(
'composer_legend_size_content_no_double_paint', layout)
checker.setControlPathPrefix("composer_legend")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
def testResizeDisabled(self):
"""Test that test legend does not resize if auto size is disabled"""
point_path = os.path.join(TEST_DATA_DIR, 'points.shp')
point_layer = QgsVectorLayer(point_path, 'points', 'ogr')
QgsProject.instance().addMapLayers([point_layer])
s = QgsMapSettings()
s.setLayers([point_layer])
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 80, 80))
map.setFrameEnabled(True)
map.setLayers([point_layer])
layout.addLayoutItem(map)
map.setExtent(point_layer.extent())
legend = QgsLayoutItemLegend(layout)
legend.setTitle("Legend")
legend.attemptSetSceneRect(QRectF(120, 20, 80, 80))
legend.setFrameEnabled(True)
legend.setFrameStrokeWidth(QgsLayoutMeasurement(2))
legend.setBackgroundColor(QColor(200, 200, 200))
legend.setTitle('')
legend.setLegendFilterByMapEnabled(True)
# disable auto resizing
legend.setResizeToContents(False)
layout.addLayoutItem(legend)
legend.setLinkedMap(map)
map.setExtent(QgsRectangle(-102.51, 41.16, -102.36, 41.30))
checker = QgsLayoutChecker(
'composer_legend_noresize', layout)
checker.setControlPathPrefix("composer_legend")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
QgsProject.instance().removeMapLayers([point_layer.id()])
def testResizeDisabledCrop(self):
"""Test that if legend resizing is disabled, and legend is too small, then content is cropped"""
point_path = os.path.join(TEST_DATA_DIR, 'points.shp')
point_layer = QgsVectorLayer(point_path, 'points', 'ogr')
QgsProject.instance().addMapLayers([point_layer])
s = QgsMapSettings()
s.setLayers([point_layer])
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 80, 80))
map.setFrameEnabled(True)
map.setLayers([point_layer])
layout.addLayoutItem(map)
map.setExtent(point_layer.extent())
legend = QgsLayoutItemLegend(layout)
legend.setTitle("Legend")
legend.attemptSetSceneRect(QRectF(120, 20, 20, 20))
legend.setFrameEnabled(True)
legend.setFrameStrokeWidth(QgsLayoutMeasurement(2))
legend.setBackgroundColor(QColor(200, 200, 200))
legend.setTitle('')
legend.setLegendFilterByMapEnabled(True)
# disable auto resizing
legend.setResizeToContents(False)
layout.addLayoutItem(legend)
legend.setLinkedMap(map)
map.setExtent(QgsRectangle(-102.51, 41.16, -102.36, 41.30))
checker = QgsLayoutChecker(
'composer_legend_noresize_crop', layout)
checker.setControlPathPrefix("composer_legend")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
QgsProject.instance().removeMapLayers([point_layer.id()])
def testDataDefinedTitle(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
legend = QgsLayoutItemLegend(layout)
layout.addLayoutItem(legend)
legend.setTitle('original')
self.assertEqual(legend.title(), 'original')
self.assertEqual(legend.legendSettings().title(), 'original')
legend.dataDefinedProperties().setProperty(QgsLayoutObject.LegendTitle, QgsProperty.fromExpression("'new'"))
legend.refreshDataDefinedProperty()
self.assertEqual(legend.title(), 'original')
self.assertEqual(legend.legendSettings().title(), 'new')
def testDataDefinedColumnCount(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
legend = QgsLayoutItemLegend(layout)
legend.setTitle("Legend")
layout.addLayoutItem(legend)
legend.setColumnCount(2)
self.assertEqual(legend.columnCount(), 2)
self.assertEqual(legend.legendSettings().columnCount(), 2)
legend.dataDefinedProperties().setProperty(QgsLayoutObject.LegendColumnCount, QgsProperty.fromExpression("5"))
legend.refreshDataDefinedProperty()
self.assertEqual(legend.columnCount(), 2)
self.assertEqual(legend.legendSettings().columnCount(), 5)
def testLegendScopeVariables(self):
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
legend = QgsLayoutItemLegend(layout)
legend.setTitle("Legend")
layout.addLayoutItem(legend)
legend.setColumnCount(2)
legend.setWrapString('d')
legend.setLegendFilterOutAtlas(True)
expc = legend.createExpressionContext()
exp1 = QgsExpression("@legend_title")
self.assertEqual(exp1.evaluate(expc), "Legend")
exp2 = QgsExpression("@legend_column_count")
self.assertEqual(exp2.evaluate(expc), 2)
exp3 = QgsExpression("@legend_wrap_string")
self.assertEqual(exp3.evaluate(expc), 'd')
exp4 = QgsExpression("@legend_split_layers")
self.assertEqual(exp4.evaluate(expc), False)
exp5 = QgsExpression("@legend_filter_out_atlas")
self.assertEqual(exp5.evaluate(expc), True)
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 80, 80))
map.setFrameEnabled(True)
map.setExtent(QgsRectangle(781662.375, 3339523.125, 793062.375, 3345223.125))
layout.addLayoutItem(map)
map.setScale(15000)
legend.setLinkedMap(map)
expc2 = legend.createExpressionContext()
exp6 = QgsExpression("@map_scale")
self.assertAlmostEqual(exp6.evaluate(expc2), 15000, 2)
def testExpressionInText(self):
"""Test expressions embedded in legend node text"""
point_path = os.path.join(TEST_DATA_DIR, 'points.shp')
point_layer = QgsVectorLayer(point_path, 'points', 'ogr')
layout = QgsPrintLayout(QgsProject.instance())
layout.setName('LAYOUT')
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 80, 80))
map.setFrameEnabled(True)
map.setLayers([point_layer])
layout.addLayoutItem(map)
map.setExtent(point_layer.extent())
legend = QgsLayoutItemLegend(layout)
legend.setTitle("Legend")
legend.attemptSetSceneRect(QRectF(120, 20, 100, 100))
legend.setFrameEnabled(True)
legend.setFrameStrokeWidth(QgsLayoutMeasurement(2))
legend.setBackgroundColor(QColor(200, 200, 200))
legend.setTitle('')
legend.setLegendFilterByMapEnabled(False)
legend.setStyleFont(QgsLegendStyle.Title, QgsFontUtils.getStandardTestFont('Bold', 16))
legend.setStyleFont(QgsLegendStyle.Group, QgsFontUtils.getStandardTestFont('Bold', 16))
legend.setStyleFont(QgsLegendStyle.Subgroup, QgsFontUtils.getStandardTestFont('Bold', 16))
legend.setStyleFont(QgsLegendStyle.Symbol, QgsFontUtils.getStandardTestFont('Bold', 16))
legend.setStyleFont(QgsLegendStyle.SymbolLabel, QgsFontUtils.getStandardTestFont('Bold', 16))
legend.setAutoUpdateModel(False)
QgsProject.instance().addMapLayers([point_layer])
s = QgsMapSettings()
s.setLayers([point_layer])
group = legend.model().rootGroup().addGroup("Group [% 1 + 5 %] [% @layout_name %]")
layer_tree_layer = group.addLayer(point_layer)
layer_tree_layer.setCustomProperty("legend/title-label",
'bbbb [% 1+2 %] xx [% @layout_name %] [% @layer_name %]')
QgsMapLayerLegendUtils.setLegendNodeUserLabel(layer_tree_layer, 0, 'xxxx')
legend.model().refreshLayerLegend(layer_tree_layer)
legend.model().layerLegendNodes(layer_tree_layer)[0].setUserLabel(
'bbbb [% 1+2 %] xx [% @layout_name %] [% @layer_name %]')
layout.addLayoutItem(legend)
legend.setLinkedMap(map)
map.setExtent(QgsRectangle(-102.51, 41.16, -102.36, 41.30))
checker = QgsLayoutChecker(
'composer_legend_expressions', layout)
checker.setControlPathPrefix("composer_legend")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
QgsProject.instance().removeMapLayers([point_layer.id()])
def testSymbolExpressions(self):
"Test expressions embedded in legend node text"
QgsProject.instance().clear()
point_path = os.path.join(TEST_DATA_DIR, 'points.shp')
point_layer = QgsVectorLayer(point_path, 'points', 'ogr')
layout = QgsPrintLayout(QgsProject.instance())
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.setLayers([point_layer])
layout.addLayoutItem(map)
map.setExtent(point_layer.extent())
legend = QgsLayoutItemLegend(layout)
layer = QgsProject.instance().addMapLayer(point_layer)
legendlayer = legend.model().rootGroup().addLayer(point_layer)
counterTask = point_layer.countSymbolFeatures()
counterTask.waitForFinished()
legend.model().refreshLayerLegend(legendlayer)
legendnodes = legend.model().layerLegendNodes(legendlayer)
legendnodes[0].setUserLabel('[% @symbol_id %]')
legendnodes[1].setUserLabel('[% @symbol_count %]')
legendnodes[2].setUserLabel('[% sum("Pilots") %]')
label1 = legendnodes[0].evaluateLabel()
label2 = legendnodes[1].evaluateLabel()
label3 = legendnodes[2].evaluateLabel()
self.assertEqual(label1, '0')
# self.assertEqual(label2, '5')
# self.assertEqual(label3, '12')
legendlayer.setLabelExpression("Concat(@symbol_label, @symbol_id)")
label1 = legendnodes[0].evaluateLabel()
label2 = legendnodes[1].evaluateLabel()
label3 = legendnodes[2].evaluateLabel()
self.assertEqual(label1, ' @symbol_id 0')
# self.assertEqual(label2, '@symbol_count 1')
# self.assertEqual(label3, 'sum("Pilots") 2')
QgsProject.instance().clear()
def testSymbolExpressionRender(self):
"""Test expressions embedded in legend node text"""
point_path = os.path.join(TEST_DATA_DIR, 'points.shp')
point_layer = QgsVectorLayer(point_path, 'points', 'ogr')
layout = QgsPrintLayout(QgsProject.instance())
layout.setName('LAYOUT')
layout.initializeDefaults()
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(20, 20, 80, 80))
map.setFrameEnabled(True)
map.setLayers([point_layer])
layout.addLayoutItem(map)
map.setExtent(point_layer.extent())
legend = QgsLayoutItemLegend(layout)
legend.setTitle("Legend")
legend.attemptSetSceneRect(QRectF(120, 20, 100, 100))
legend.setFrameEnabled(True)
legend.setFrameStrokeWidth(QgsLayoutMeasurement(2))
legend.setBackgroundColor(QColor(200, 200, 200))
legend.setTitle('')
legend.setLegendFilterByMapEnabled(False)
legend.setStyleFont(QgsLegendStyle.Title, QgsFontUtils.getStandardTestFont('Bold', 16))
legend.setStyleFont(QgsLegendStyle.Group, QgsFontUtils.getStandardTestFont('Bold', 16))
legend.setStyleFont(QgsLegendStyle.Subgroup, QgsFontUtils.getStandardTestFont('Bold', 16))
legend.setStyleFont(QgsLegendStyle.Symbol, QgsFontUtils.getStandardTestFont('Bold', 16))
legend.setStyleFont(QgsLegendStyle.SymbolLabel, QgsFontUtils.getStandardTestFont('Bold', 16))
legend.setAutoUpdateModel(False)
QgsProject.instance().addMapLayers([point_layer])
s = QgsMapSettings()
s.setLayers([point_layer])
group = legend.model().rootGroup().addGroup("Group [% 1 + 5 %] [% @layout_name %]")
layer_tree_layer = group.addLayer(point_layer)
counterTask = point_layer.countSymbolFeatures()
counterTask.waitForFinished()
layer_tree_layer.setCustomProperty("legend/title-label",
'bbbb [% 1+2 %] xx [% @layout_name %] [% @layer_name %]')
QgsMapLayerLegendUtils.setLegendNodeUserLabel(layer_tree_layer, 0, 'xxxx')
legend.model().refreshLayerLegend(layer_tree_layer)
layer_tree_layer.setLabelExpression('Concat(@symbol_id, @symbol_label, count("Class"))')
legend.model().layerLegendNodes(layer_tree_layer)[0].setUserLabel(' sym 1')
legend.model().layerLegendNodes(layer_tree_layer)[1].setUserLabel('[%@symbol_count %]')
legend.model().layerLegendNodes(layer_tree_layer)[2].setUserLabel('[% count("Class") %]')
layout.addLayoutItem(legend)
legend.setLinkedMap(map)
legend.updateLegend()
print(layer_tree_layer.labelExpression())
map.setExtent(QgsRectangle(-102.51, 41.16, -102.36, 41.30))
checker = QgsLayoutChecker(
'composer_legend_symbol_expression', layout)
checker.setControlPathPrefix("composer_legend")
sleep(4)
result, message = checker.testLayout()
self.assertTrue(result, message)
QgsProject.instance().removeMapLayers([point_layer.id()])
def testThemes(self):
layout = QgsPrintLayout(QgsProject.instance())
layout.setName('LAYOUT')
map = QgsLayoutItemMap(layout)
layout.addLayoutItem(map)
legend = QgsLayoutItemLegend(layout)
self.assertFalse(legend.themeName())
legend.setLinkedMap(map)
self.assertFalse(legend.themeName())
map.setFollowVisibilityPresetName('theme1')
map.setFollowVisibilityPreset(True)
self.assertEqual(legend.themeName(), 'theme1')
map.setFollowVisibilityPresetName('theme2')
self.assertEqual(legend.themeName(), 'theme2')
map.setFollowVisibilityPreset(False)
self.assertFalse(legend.themeName())
# with theme set before linking map
map2 = QgsLayoutItemMap(layout)
map2.setFollowVisibilityPresetName('theme3')
map2.setFollowVisibilityPreset(True)
legend.setLinkedMap(map2)
self.assertEqual(legend.themeName(), 'theme3')
map2.setFollowVisibilityPresetName('theme2')
self.assertEqual(legend.themeName(), 'theme2')
# replace with map with no theme
map3 = QgsLayoutItemMap(layout)
legend.setLinkedMap(map3)
self.assertFalse(legend.themeName())
def testLegendRenderWithMapTheme(self):
"""Test rendering legends linked to map themes"""
QgsProject.instance().removeAllMapLayers()
point_path = os.path.join(TEST_DATA_DIR, 'points.shp')
point_layer = QgsVectorLayer(point_path, 'points', 'ogr')
line_path = os.path.join(TEST_DATA_DIR, 'lines.shp')
line_layer = QgsVectorLayer(line_path, 'lines', 'ogr')
QgsProject.instance().clear()
QgsProject.instance().addMapLayers([point_layer, line_layer])
marker_symbol = QgsMarkerSymbol.createSimple({'color': '#ff0000', 'outline_style': 'no', 'size': '5'})
point_layer.setRenderer(QgsSingleSymbolRenderer(marker_symbol))
point_layer.styleManager().addStyleFromLayer("red")
line_symbol = QgsLineSymbol.createSimple({'color': '#ff0000', 'line_width': '2'})
line_layer.setRenderer(QgsSingleSymbolRenderer(line_symbol))
line_layer.styleManager().addStyleFromLayer("red")
red_record = QgsMapThemeCollection.MapThemeRecord()
point_red_record = QgsMapThemeCollection.MapThemeLayerRecord(point_layer)
point_red_record.usingCurrentStyle = True
point_red_record.currentStyle = 'red'
red_record.addLayerRecord(point_red_record)
line_red_record = QgsMapThemeCollection.MapThemeLayerRecord(line_layer)
line_red_record.usingCurrentStyle = True
line_red_record.currentStyle = 'red'
red_record.addLayerRecord(line_red_record)
QgsProject.instance().mapThemeCollection().insert('red', red_record)
marker_symbol1 = QgsMarkerSymbol.createSimple({'color': '#0000ff', 'outline_style': 'no', 'size': '5'})
marker_symbol2 = QgsMarkerSymbol.createSimple(
{'color': '#0000ff', 'name': 'diamond', 'outline_style': 'no', 'size': '5'})
marker_symbol3 = QgsMarkerSymbol.createSimple(
{'color': '#0000ff', 'name': 'rectangle', 'outline_style': 'no', 'size': '5'})
point_layer.setRenderer(QgsCategorizedSymbolRenderer('Class', [QgsRendererCategory('B52', marker_symbol1, ''),
QgsRendererCategory('Biplane', marker_symbol2,
''),
QgsRendererCategory('Jet', marker_symbol3, ''),
]))
point_layer.styleManager().addStyleFromLayer("blue")
line_symbol = QgsLineSymbol.createSimple({'color': '#0000ff', 'line_width': '2'})
line_layer.setRenderer(QgsSingleSymbolRenderer(line_symbol))
line_layer.styleManager().addStyleFromLayer("blue")
blue_record = QgsMapThemeCollection.MapThemeRecord()
point_blue_record = QgsMapThemeCollection.MapThemeLayerRecord(point_layer)
point_blue_record.usingCurrentStyle = True
point_blue_record.currentStyle = 'blue'
blue_record.addLayerRecord(point_blue_record)
line_blue_record = QgsMapThemeCollection.MapThemeLayerRecord(line_layer)
line_blue_record.usingCurrentStyle = True
line_blue_record.currentStyle = 'blue'
blue_record.addLayerRecord(line_blue_record)
QgsProject.instance().mapThemeCollection().insert('blue', blue_record)
layout = QgsLayout(QgsProject.instance())
layout.initializeDefaults()
map1 = QgsLayoutItemMap(layout)
map1.attemptSetSceneRect(QRectF(20, 20, 80, 80))
map1.setFrameEnabled(True)
map1.setLayers([point_layer, line_layer])
layout.addLayoutItem(map1)
map1.setExtent(point_layer.extent())
map1.setFollowVisibilityPreset(True)
map1.setFollowVisibilityPresetName('red')
map2 = QgsLayoutItemMap(layout)
map2.attemptSetSceneRect(QRectF(20, 120, 80, 80))
map2.setFrameEnabled(True)
map2.setLayers([point_layer, line_layer])
layout.addLayoutItem(map2)
map2.setExtent(point_layer.extent())
map2.setFollowVisibilityPreset(True)
map2.setFollowVisibilityPresetName('blue')
legend = QgsLayoutItemLegend(layout)
legend.setTitle("Legend")
legend.attemptSetSceneRect(QRectF(120, 20, 80, 80))
legend.setFrameEnabled(True)
legend.setFrameStrokeWidth(QgsLayoutMeasurement(2))
legend.setBackgroundColor(QColor(200, 200, 200))
legend.setTitle('')
layout.addLayoutItem(legend)
legend.setLinkedMap(map1)
legend2 = QgsLayoutItemLegend(layout)
legend2.setTitle("Legend")
legend2.attemptSetSceneRect(QRectF(120, 120, 80, 80))
legend2.setFrameEnabled(True)
legend2.setFrameStrokeWidth(QgsLayoutMeasurement(2))
legend2.setBackgroundColor(QColor(200, 200, 200))
legend2.setTitle('')
layout.addLayoutItem(legend2)
legend2.setLinkedMap(map2)
checker = QgsLayoutChecker(
'composer_legend_theme', layout)
checker.setControlPathPrefix("composer_legend")
result, message = checker.testLayout()
self.report += checker.report()
self.assertTrue(result, message)
QgsProject.instance().clear()
if __name__ == '__main__':
unittest.main()
|
SrNetoChan/Quantum-GIS
|
tests/src/python/test_qgslayoutlegend.py
|
Python
|
gpl-2.0
| 28,136
|
# Check translations of pango markup
#
# This will look for translatable strings that appear to contain markup and
# check that the markup in the translation matches.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Shea <dshea@redhat.com>
try:
import polib
except ImportError:
print("You need to install the python-polib package to read translations")
raise
from pocketlint.pangocheck import is_markup, markup_match
import xml.etree.ElementTree as ET
def test_markup(mofile):
mo = polib.mofile(mofile)
for entry in mo.translated_entries():
if is_markup(entry.msgid):
# If this is a plural, check each of the plural translations
if entry.msgid_plural:
xlations = entry.msgstr_plural
else:
xlations = {None: entry.msgstr}
for plural_id, msgstr in xlations.items():
# Check if the markup is valid at all
try:
# pylint: disable=unescaped-markup
ET.fromstring('<markup>%s</markup>' % msgstr)
except ET.ParseError:
if entry.msgid_plural:
raise AssertionError("Invalid markup translation for %d translation of msgid %s" %
(plural_id, entry.msgid))
else:
raise AssertionError("Invalid markup translation for msgid %s" % entry.msgid)
# Check if the markup has the same number and kind of tags
if not markup_match(entry.msgid, msgstr):
if entry.msgid_plural:
raise AssertionError("Markup does not match for %d translation of msgid %s" %
(plural_id, entry.msgid))
else:
raise AssertionError("Markup does not match for msgid %s" % entry.msgid)
|
wgwoods/anaconda
|
translation-canary/translation_canary/translated/test_markup.py
|
Python
|
gpl-2.0
| 2,842
|
# coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
from units.compat import unittest
from ansible import errors
from ansible.module_utils.six import text_type, binary_type
from ansible.module_utils.common._collections_compat import Sequence, Set, Mapping
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing import vault
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.parsing.yaml.dumper import AnsibleDumper
from units.mock.yaml_helper import YamlTestUtils
from units.mock.vault_helper import TextVaultSecret
from yaml.parser import ParserError
from yaml.scanner import ScannerError
class NameStringIO(StringIO):
"""In py2.6, StringIO doesn't let you set name because a baseclass has it
as readonly property"""
name = None
def __init__(self, *args, **kwargs):
super(NameStringIO, self).__init__(*args, **kwargs)
class TestAnsibleLoaderBasic(unittest.TestCase):
def test_parse_number(self):
stream = StringIO(u"""
1
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, 1)
# No line/column info saved yet
def test_parse_string(self):
stream = StringIO(u"""
Ansible
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, u'Ansible')
self.assertIsInstance(data, text_type)
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
def test_parse_utf8_string(self):
stream = StringIO(u"""
Cafè Eñyei
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, u'Cafè Eñyei')
self.assertIsInstance(data, text_type)
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
def test_parse_dict(self):
stream = StringIO(u"""
webster: daniel
oed: oxford
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, {'webster': 'daniel', 'oed': 'oxford'})
self.assertEqual(len(data), 2)
self.assertIsInstance(list(data.keys())[0], text_type)
self.assertIsInstance(list(data.values())[0], text_type)
# Beginning of the first key
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
self.assertEqual(data[u'webster'].ansible_pos, ('myfile.yml', 2, 26))
self.assertEqual(data[u'oed'].ansible_pos, ('myfile.yml', 3, 22))
def test_parse_list(self):
stream = StringIO(u"""
- a
- b
""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, [u'a', u'b'])
self.assertEqual(len(data), 2)
self.assertIsInstance(data[0], text_type)
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 17))
self.assertEqual(data[0].ansible_pos, ('myfile.yml', 2, 19))
self.assertEqual(data[1].ansible_pos, ('myfile.yml', 3, 19))
def test_parse_short_dict(self):
stream = StringIO(u"""{"foo": "bar"}""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, dict(foo=u'bar'))
self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1))
self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 9))
stream = StringIO(u"""foo: bar""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, dict(foo=u'bar'))
self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 1))
self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 1, 6))
def test_error_conditions(self):
stream = StringIO(u"""{""")
loader = AnsibleLoader(stream, 'myfile.yml')
self.assertRaises(ParserError, loader.get_single_data)
def test_tab_error(self):
stream = StringIO(u"""---\nhosts: localhost\nvars:\n foo: bar\n\tblip: baz""")
loader = AnsibleLoader(stream, 'myfile.yml')
self.assertRaises(ScannerError, loader.get_single_data)
def test_front_matter(self):
stream = StringIO(u"""---\nfoo: bar""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, dict(foo=u'bar'))
self.assertEqual(data.ansible_pos, ('myfile.yml', 2, 1))
self.assertEqual(data[u'foo'].ansible_pos, ('myfile.yml', 2, 6))
# Initial indent (See: #6348)
stream = StringIO(u""" - foo: bar\n baz: qux""")
loader = AnsibleLoader(stream, 'myfile.yml')
data = loader.get_single_data()
self.assertEqual(data, [{u'foo': u'bar', u'baz': u'qux'}])
self.assertEqual(data.ansible_pos, ('myfile.yml', 1, 2))
self.assertEqual(data[0].ansible_pos, ('myfile.yml', 1, 4))
self.assertEqual(data[0][u'foo'].ansible_pos, ('myfile.yml', 1, 9))
self.assertEqual(data[0][u'baz'].ansible_pos, ('myfile.yml', 2, 9))
class TestAnsibleLoaderVault(unittest.TestCase, YamlTestUtils):
def setUp(self):
self.vault_password = "hunter42"
vault_secret = TextVaultSecret(self.vault_password)
self.vault_secrets = [('vault_secret', vault_secret),
('default', vault_secret)]
self.vault = vault.VaultLib(self.vault_secrets)
@property
def vault_secret(self):
return vault.match_encrypt_secret(self.vault_secrets)[1]
def test_wrong_password(self):
plaintext = u"Ansible"
bob_password = "this is a different password"
bobs_secret = TextVaultSecret(bob_password)
bobs_secrets = [('default', bobs_secret)]
bobs_vault = vault.VaultLib(bobs_secrets)
ciphertext = bobs_vault.encrypt(plaintext, vault.match_encrypt_secret(bobs_secrets)[1])
try:
self.vault.decrypt(ciphertext)
except Exception as e:
self.assertIsInstance(e, errors.AnsibleError)
self.assertEqual(e.message, 'Decryption failed (no vault secrets were found that could decrypt)')
def _encrypt_plaintext(self, plaintext):
# Construct a yaml repr of a vault by hand
vaulted_var_bytes = self.vault.encrypt(plaintext, self.vault_secret)
# add yaml tag
vaulted_var = vaulted_var_bytes.decode()
lines = vaulted_var.splitlines()
lines2 = []
for line in lines:
lines2.append(' %s' % line)
vaulted_var = '\n'.join(lines2)
tagged_vaulted_var = u"""!vault |\n%s""" % vaulted_var
return tagged_vaulted_var
def _build_stream(self, yaml_text):
stream = NameStringIO(yaml_text)
stream.name = 'my.yml'
return stream
def _loader(self, stream):
return AnsibleLoader(stream, vault_secrets=self.vault.secrets)
def _load_yaml(self, yaml_text, password):
stream = self._build_stream(yaml_text)
loader = self._loader(stream)
data_from_yaml = loader.get_single_data()
return data_from_yaml
def test_dump_load_cycle(self):
avu = AnsibleVaultEncryptedUnicode.from_plaintext('The plaintext for test_dump_load_cycle.', self.vault, self.vault_secret)
self._dump_load_cycle(avu)
def test_embedded_vault_from_dump(self):
avu = AnsibleVaultEncryptedUnicode.from_plaintext('setec astronomy', self.vault, self.vault_secret)
blip = {'stuff1': [{'a dict key': 24},
{'shhh-ssh-secrets': avu,
'nothing to see here': 'move along'}],
'another key': 24.1}
blip = ['some string', 'another string', avu]
stream = NameStringIO()
self._dump_stream(blip, stream, dumper=AnsibleDumper)
stream.seek(0)
stream.seek(0)
loader = self._loader(stream)
data_from_yaml = loader.get_data()
stream2 = NameStringIO(u'')
# verify we can dump the object again
self._dump_stream(data_from_yaml, stream2, dumper=AnsibleDumper)
def test_embedded_vault(self):
plaintext_var = u"""This is the plaintext string."""
tagged_vaulted_var = self._encrypt_plaintext(plaintext_var)
another_vaulted_var = self._encrypt_plaintext(plaintext_var)
different_var = u"""A different string that is not the same as the first one."""
different_vaulted_var = self._encrypt_plaintext(different_var)
yaml_text = u"""---\nwebster: daniel\noed: oxford\nthe_secret: %s\nanother_secret: %s\ndifferent_secret: %s""" % (tagged_vaulted_var,
another_vaulted_var,
different_vaulted_var)
data_from_yaml = self._load_yaml(yaml_text, self.vault_password)
vault_string = data_from_yaml['the_secret']
self.assertEqual(plaintext_var, data_from_yaml['the_secret'])
test_dict = {}
test_dict[vault_string] = 'did this work?'
self.assertEqual(vault_string.data, vault_string)
# This looks weird and useless, but the object in question has a custom __eq__
self.assertEqual(vault_string, vault_string)
another_vault_string = data_from_yaml['another_secret']
different_vault_string = data_from_yaml['different_secret']
self.assertEqual(vault_string, another_vault_string)
self.assertNotEqual(vault_string, different_vault_string)
# More testing of __eq__/__ne__
self.assertTrue('some string' != vault_string)
self.assertNotEqual('some string', vault_string)
# Note this is a compare of the str/unicode of these, they are different types
# so we want to test self == other, and other == self etc
self.assertEqual(plaintext_var, vault_string)
self.assertEqual(vault_string, plaintext_var)
self.assertFalse(plaintext_var != vault_string)
self.assertFalse(vault_string != plaintext_var)
class TestAnsibleLoaderPlay(unittest.TestCase):
def setUp(self):
stream = NameStringIO(u"""
- hosts: localhost
vars:
number: 1
string: Ansible
utf8_string: Cafè Eñyei
dictionary:
webster: daniel
oed: oxford
list:
- a
- b
- 1
- 2
tasks:
- name: Test case
ping:
data: "{{ utf8_string }}"
- name: Test 2
ping:
data: "Cafè Eñyei"
- name: Test 3
command: "printf 'Cafè Eñyei\\n'"
""")
self.play_filename = '/path/to/myplay.yml'
stream.name = self.play_filename
self.loader = AnsibleLoader(stream)
self.data = self.loader.get_single_data()
def tearDown(self):
pass
def test_data_complete(self):
self.assertEqual(len(self.data), 1)
self.assertIsInstance(self.data, list)
self.assertEqual(frozenset(self.data[0].keys()), frozenset((u'hosts', u'vars', u'tasks')))
self.assertEqual(self.data[0][u'hosts'], u'localhost')
self.assertEqual(self.data[0][u'vars'][u'number'], 1)
self.assertEqual(self.data[0][u'vars'][u'string'], u'Ansible')
self.assertEqual(self.data[0][u'vars'][u'utf8_string'], u'Cafè Eñyei')
self.assertEqual(self.data[0][u'vars'][u'dictionary'], {
u'webster': u'daniel',
u'oed': u'oxford'
})
self.assertEqual(self.data[0][u'vars'][u'list'], [u'a', u'b', 1, 2])
self.assertEqual(self.data[0][u'tasks'], [
{u'name': u'Test case', u'ping': {u'data': u'{{ utf8_string }}'}},
{u'name': u'Test 2', u'ping': {u'data': u'Cafè Eñyei'}},
{u'name': u'Test 3', u'command': u'printf \'Cafè Eñyei\n\''},
])
def walk(self, data):
# Make sure there's no str in the data
self.assertNotIsInstance(data, binary_type)
# Descend into various container types
if isinstance(data, text_type):
# strings are a sequence so we have to be explicit here
return
elif isinstance(data, (Sequence, Set)):
for element in data:
self.walk(element)
elif isinstance(data, Mapping):
for k, v in data.items():
self.walk(k)
self.walk(v)
# Scalars were all checked so we're good to go
return
def test_no_str_in_data(self):
# Checks that no strings are str type
self.walk(self.data)
def check_vars(self):
# Numbers don't have line/col information yet
# self.assertEqual(self.data[0][u'vars'][u'number'].ansible_pos, (self.play_filename, 4, 21))
self.assertEqual(self.data[0][u'vars'][u'string'].ansible_pos, (self.play_filename, 5, 29))
self.assertEqual(self.data[0][u'vars'][u'utf8_string'].ansible_pos, (self.play_filename, 6, 34))
self.assertEqual(self.data[0][u'vars'][u'dictionary'].ansible_pos, (self.play_filename, 8, 23))
self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'webster'].ansible_pos, (self.play_filename, 8, 32))
self.assertEqual(self.data[0][u'vars'][u'dictionary'][u'oed'].ansible_pos, (self.play_filename, 9, 28))
self.assertEqual(self.data[0][u'vars'][u'list'].ansible_pos, (self.play_filename, 11, 23))
self.assertEqual(self.data[0][u'vars'][u'list'][0].ansible_pos, (self.play_filename, 11, 25))
self.assertEqual(self.data[0][u'vars'][u'list'][1].ansible_pos, (self.play_filename, 12, 25))
# Numbers don't have line/col info yet
# self.assertEqual(self.data[0][u'vars'][u'list'][2].ansible_pos, (self.play_filename, 13, 25))
# self.assertEqual(self.data[0][u'vars'][u'list'][3].ansible_pos, (self.play_filename, 14, 25))
def check_tasks(self):
#
# First Task
#
self.assertEqual(self.data[0][u'tasks'][0].ansible_pos, (self.play_filename, 16, 23))
self.assertEqual(self.data[0][u'tasks'][0][u'name'].ansible_pos, (self.play_filename, 16, 29))
self.assertEqual(self.data[0][u'tasks'][0][u'ping'].ansible_pos, (self.play_filename, 18, 25))
self.assertEqual(self.data[0][u'tasks'][0][u'ping'][u'data'].ansible_pos, (self.play_filename, 18, 31))
#
# Second Task
#
self.assertEqual(self.data[0][u'tasks'][1].ansible_pos, (self.play_filename, 20, 23))
self.assertEqual(self.data[0][u'tasks'][1][u'name'].ansible_pos, (self.play_filename, 20, 29))
self.assertEqual(self.data[0][u'tasks'][1][u'ping'].ansible_pos, (self.play_filename, 22, 25))
self.assertEqual(self.data[0][u'tasks'][1][u'ping'][u'data'].ansible_pos, (self.play_filename, 22, 31))
#
# Third Task
#
self.assertEqual(self.data[0][u'tasks'][2].ansible_pos, (self.play_filename, 24, 23))
self.assertEqual(self.data[0][u'tasks'][2][u'name'].ansible_pos, (self.play_filename, 24, 29))
self.assertEqual(self.data[0][u'tasks'][2][u'command'].ansible_pos, (self.play_filename, 25, 32))
def test_line_numbers(self):
# Check the line/column numbers are correct
# Note: Remember, currently dicts begin at the start of their first entry
self.assertEqual(self.data[0].ansible_pos, (self.play_filename, 2, 19))
self.assertEqual(self.data[0][u'hosts'].ansible_pos, (self.play_filename, 2, 26))
self.assertEqual(self.data[0][u'vars'].ansible_pos, (self.play_filename, 4, 21))
self.check_vars()
self.assertEqual(self.data[0][u'tasks'].ansible_pos, (self.play_filename, 16, 21))
self.check_tasks()
|
ganeshrn/ansible
|
test/units/parsing/yaml/test_loader.py
|
Python
|
gpl-3.0
| 17,230
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vlan
extends_documentation_fragment: nxos
version_added: "2.1"
short_description: Manages VLAN resources and attributes.
description:
- Manages VLAN configurations on NX-OS switches.
author: Jason Edelman (@jedelman8)
options:
vlan_id:
description:
- Single VLAN ID.
required: false
default: null
vlan_range:
description:
- Range of VLANs such as 2-10 or 2,5,10-15, etc.
required: false
default: null
name:
description:
- Name of VLAN.
required: false
default: null
vlan_state:
description:
- Manage the vlan operational state of the VLAN
(equivalent to state {active | suspend} command.
required: false
default: active
choices: ['active','suspend']
admin_state:
description:
- Manage the VLAN administrative state of the VLAN equivalent
to shut/no shut in VLAN config mode.
required: false
default: up
choices: ['up','down']
mapped_vni:
description:
- The Virtual Network Identifier (VNI) ID that is mapped to the
VLAN. Valid values are integer and keyword 'default'.
required: false
default: null
version_added: "2.2"
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: Ensure a range of VLANs are not present on the switch
nxos_vlan:
vlan_range: "2-10,20,50,55-60,100-150"
host: 68.170.147.165
username: cisco
password: cisco
state: absent
transport: nxapi
- name: Ensure VLAN 50 exists with the name WEB and is in the shutdown state
nxos_vlan:
vlan_id: 50
host: 68.170.147.165
admin_state: down
name: WEB
transport: nxapi
username: cisco
password: cisco
- name: Ensure VLAN is NOT on the device
nxos_vlan:
vlan_id: 50
host: 68.170.147.165
state: absent
transport: nxapi
username: cisco
password: cisco
'''
RETURN = '''
proposed_vlans_list:
description: list of VLANs being proposed
returned: when debug enabled
type: list
sample: ["100"]
existing_vlans_list:
description: list of existing VLANs on the switch prior to making changes
returned: when debug enabled
type: list
sample: ["1", "2", "3", "4", "5", "20"]
end_state_vlans_list:
description: list of VLANs after the module is executed
returned: when debug enabled
type: list
sample: ["1", "2", "3", "4", "5", "20", "100"]
proposed:
description: k/v pairs of parameters passed into module (does not include
vlan_id or vlan_range)
returned: when debug enabled
type: dict or null
sample: {"admin_state": "down", "name": "app_vlan",
"vlan_state": "suspend", "mapped_vni": "5000"}
existing:
description: k/v pairs of existing vlan or null when using vlan_range
returned: when debug enabled
type: dict
sample: {"admin_state": "down", "name": "app_vlan",
"vlan_id": "20", "vlan_state": "suspend", "mapped_vni": ""}
end_state:
description: k/v pairs of the VLAN after executing module or null
when using vlan_range
returned: when debug enabled
type: dict or null
sample: {"admin_state": "down", "name": "app_vlan", "vlan_id": "20",
"vlan_state": "suspend", "mapped_vni": "5000"}
updates:
description: command string sent to the device
returned: always
type: list
sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
commands:
description: command string sent to the device
returned: always
type: list
sample: ["vlan 20", "vlan 55", "vn-segment 5000"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.nxos import run_commands, load_config, get_config
from ansible.module_utils.basic import AnsibleModule
def vlan_range_to_list(vlans):
result = []
if vlans:
for part in vlans.split(','):
if part == 'none':
break
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
else:
a = int(part)
result.append(a)
return numerical_sort(result)
return result
def numerical_sort(string_int_list):
"""Sort list of strings (VLAN IDs) that are digits in numerical order.
"""
as_int_list = []
as_str_list = []
for vlan in string_int_list:
as_int_list.append(int(vlan))
as_int_list.sort()
for vlan in as_int_list:
as_str_list.append(str(vlan))
return as_str_list
def build_commands(vlans, state):
commands = []
for vlan in vlans:
if state == 'present':
command = 'vlan {0}'.format(vlan)
commands.append(command)
elif state == 'absent':
command = 'no vlan {0}'.format(vlan)
commands.append(command)
return commands
def get_vlan_config_commands(vlan, vid):
"""Build command list required for VLAN configuration
"""
reverse_value_map = {
"admin_state": {
"down": "shutdown",
"up": "no shutdown"
}
}
if vlan.get('admin_state'):
# apply value map when making change to the admin state
# note: would need to be a loop or more in depth check if
# value map has more than 1 key
vlan = apply_value_map(reverse_value_map, vlan)
VLAN_ARGS = {
'name': 'name {0}',
'vlan_state': 'state {0}',
'admin_state': '{0}',
'mode': 'mode {0}',
'mapped_vni': 'vn-segment {0}'
}
commands = []
for param, value in vlan.items():
if param == 'mapped_vni' and value == 'default':
command = 'no vn-segment'
else:
command = VLAN_ARGS.get(param).format(vlan.get(param))
if command:
commands.append(command)
commands.insert(0, 'vlan ' + vid)
commands.append('exit')
return commands
def get_list_of_vlans(module):
body = run_commands(module, ['show vlan | json'])
vlan_list = []
vlan_table = body[0].get('TABLE_vlanbrief')['ROW_vlanbrief']
if isinstance(vlan_table, list):
for vlan in vlan_table:
vlan_list.append(str(vlan['vlanshowbr-vlanid-utf']))
else:
vlan_list.append('1')
return vlan_list
def get_vni(vlanid, module):
flags = str('all | section vlan.{0}'.format(vlanid)).split(' ')
body = get_config(module, flags=flags)
#command = 'show run all | section vlan.{0}'.format(vlanid)
#body = execute_show_command(command, module, command_type='cli_show_ascii')[0]
value = ''
if body:
REGEX = re.compile(r'(?:vn-segment\s)(?P<value>.*)$', re.M)
if 'vn-segment' in body:
value = REGEX.search(body).group('value')
return value
def get_vlan(vlanid, module):
"""Get instance of VLAN as a dictionary
"""
command = 'show vlan id %s | json' % vlanid
body = run_commands(module, [command])
#command = 'show vlan id ' + vlanid
#body = execute_show_command(command, module)
try:
vlan_table = body[0]['TABLE_vlanbriefid']['ROW_vlanbriefid']
except (TypeError, IndexError):
return {}
key_map = {
"vlanshowbr-vlanid-utf": "vlan_id",
"vlanshowbr-vlanname": "name",
"vlanshowbr-vlanstate": "vlan_state",
"vlanshowbr-shutstate": "admin_state"
}
vlan = apply_key_map(key_map, vlan_table)
value_map = {
"admin_state": {
"shutdown": "down",
"noshutdown": "up"
}
}
vlan = apply_value_map(value_map, vlan)
vlan['mapped_vni'] = get_vni(vlanid, module)
return vlan
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = str(value)
return new_dict
def apply_value_map(value_map, resource):
for key, value in value_map.items():
resource[key] = value[resource.get(key)]
return resource
def main():
argument_spec = dict(
vlan_id=dict(required=False, type='str'),
vlan_range=dict(required=False),
name=dict(required=False),
vlan_state=dict(choices=['active', 'suspend'], required=False),
mapped_vni=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
admin_state=dict(choices=['up', 'down'], required=False),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['vlan_range', 'name'],
['vlan_id', 'vlan_range']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
warnings = list()
check_args(module, warnings)
vlan_range = module.params['vlan_range']
vlan_id = module.params['vlan_id']
name = module.params['name']
vlan_state = module.params['vlan_state']
admin_state = module.params['admin_state']
mapped_vni = module.params['mapped_vni']
state = module.params['state']
changed = False
if vlan_id:
if not vlan_id.isdigit():
module.fail_json(msg='vlan_id must be a valid VLAN ID')
args = dict(name=name, vlan_state=vlan_state,
admin_state=admin_state, mapped_vni=mapped_vni)
proposed = dict((k, v) for k, v in args.items() if v is not None)
proposed_vlans_list = numerical_sort(vlan_range_to_list(
vlan_id or vlan_range))
existing_vlans_list = numerical_sort(get_list_of_vlans(module))
commands = []
existing = {}
if vlan_range:
if state == 'present':
# These are all of the VLANs being proposed that don't
# already exist on the switch
vlans_delta = list(
set(proposed_vlans_list).difference(existing_vlans_list))
commands = build_commands(vlans_delta, state)
elif state == 'absent':
# VLANs that are common between what is being proposed and
# what is on the switch
vlans_common = list(
set(proposed_vlans_list).intersection(existing_vlans_list))
commands = build_commands(vlans_common, state)
else:
existing = get_vlan(vlan_id, module)
if state == 'absent':
if existing:
commands = ['no vlan ' + vlan_id]
elif state == 'present':
if (existing.get('mapped_vni') == '0' and
proposed.get('mapped_vni') == 'default'):
proposed.pop('mapped_vni')
delta = dict(set(
proposed.items()).difference(existing.items()))
if delta or not existing:
commands = get_vlan_config_commands(delta, vlan_id)
end_state = existing
end_state_vlans_list = existing_vlans_list
if commands:
if existing.get('mapped_vni') and state != 'absent':
if (existing.get('mapped_vni') != proposed.get('mapped_vni') and
existing.get('mapped_vni') != '0' and proposed.get('mapped_vni') != 'default'):
commands.insert(1, 'no vn-segment')
if module.check_mode:
module.exit_json(changed=True,
commands=commands)
else:
load_config(module, commands)
changed = True
end_state_vlans_list = numerical_sort(get_list_of_vlans(module))
if 'configure' in commands:
commands.pop(0)
if vlan_id:
end_state = get_vlan(vlan_id, module)
results = {
'commands': commands,
'updates': commands,
'changed': changed,
'warnings': warnings
}
if module._debug:
results.update({
'proposed_vlans_list': proposed_vlans_list,
'existing_vlans_list': existing_vlans_list,
'proposed': proposed,
'existing': existing,
'end_state': end_state,
'end_state_vlans_list': end_state_vlans_list
})
module.exit_json(**results)
if __name__ == '__main__':
main()
|
cmelange/ansible
|
lib/ansible/modules/network/nxos/nxos_vlan.py
|
Python
|
gpl-3.0
| 13,912
|
import dbus
from gwibber.microblog import util
class GwibberPublic:
"""
GwibberPublic is the public python class which provides convience methods
for using Gwibber.
"""
def __init__(self):
self.bus = dbus.SessionBus()
self.accounts = self.getbus("Accounts")
self.service = self.getbus("Service")
self.shortener = self.getbus("URLShorten")
def getbus(self, name):
obj = self.bus.get_object(
"com.Gwibber.%s" % name,
"/com/gwibber/%s" % name,
follow_name_owner_changes=True)
return dbus.Interface(obj, "com.Gwibber.%s" % name)
def post(self, message):
args = [message]
self.microblog.operation({
"args": args,
"opname": "send",
})
def GetServices(self):
"""
Returns a list of services available as json string
example:
import json, gwibber.lib
gw = gwibber.lib.GwibberPublic()
services = json.loads(gw.GetServices())
"""
return self.service.GetServices()
def GetAccounts(self):
"""
Returns a list of services available as json string
example:
import json, gwibber.lib
gw = gwibber.lib.GwibberPublic()
accounts = json.loads(gw.GetAccounts())
"""
return self.accounts.List()
def SendMessage(self, message):
"""
Posts a message/status update to all accounts with send_enabled = True. It
takes one argument, which is a message formated as a string.
example:
import gwibber.lib
gw = gwibber.lib.GwibberPublic()
gw.SendMessage("This is a message")
"""
return self.service.SendMessage(message)
def Refresh(self):
"""
Calls the Gwibber Service to trigger a refresh operation
example:
import gwibber.lib
gw = gwibber.lib.GwibberPublic()
gw.Refresh()
"""
return self.service.Refresh()
def Shorten(self, url):
"""
Takes a long url in and returns a shortened url as a string, based on your
configured shortening service
example:
import gwibber.lib
gw = gwibber.lib.GwibberPublic()
gw.Shorten(url)
"""
return self.shortener.Shorten(url)
def MonitorAccountCreated(self, cb):
self.accounts.connect_to_signal("AccountCreated", cb)
def MonitorAccountChanged(self, cb):
self.accounts.connect_to_signal("AccountChanged", cb)
def MonitorAccountDeleted(self, cb):
self.accounts.connect_to_signal("AccountDeleted", cb)
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/gwibber/lib/__init__.py
|
Python
|
gpl-3.0
| 2,740
|
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import aeneas.globalfunctions as gf
class TestCEW(unittest.TestCase):
def test_cew_synthesize_multiple(self):
handler, output_file_path = gf.tmp_file(suffix=".wav")
try:
c_quit_after = 0.0
c_backwards = 0
c_text = [
(u"en", u"Dummy 1"), # NOTE cew requires the actual eSpeak voice code
(u"en", u"Dummy 2"), # NOTE cew requires the actual eSpeak voice code
(u"en", u"Dummy 3"), # NOTE cew requires the actual eSpeak voice code
]
import aeneas.cew.cew
sr, sf, intervals = aeneas.cew.cew.synthesize_multiple(
output_file_path,
c_quit_after,
c_backwards,
c_text
)
self.assertEqual(sr, 22050)
self.assertEqual(sf, 3)
self.assertEqual(len(intervals), 3)
except ImportError:
pass
gf.delete_file(handler, output_file_path)
def test_cew_synthesize_multiple_lang(self):
handler, output_file_path = gf.tmp_file(suffix=".wav")
try:
c_quit_after = 0.0
c_backwards = 0
c_text = [
(u"en", u"Dummy 1"), # NOTE cew requires the actual eSpeak voice code
(u"it", u"Segnaposto 2"), # NOTE cew requires the actual eSpeak voice code
(u"en", u"Dummy 3"), # NOTE cew requires the actual eSpeak voice code
]
import aeneas.cew.cew
sr, sf, intervals = aeneas.cew.cew.synthesize_multiple(
output_file_path,
c_quit_after,
c_backwards,
c_text
)
self.assertEqual(sr, 22050)
self.assertEqual(sf, 3)
self.assertEqual(len(intervals), 3)
except ImportError:
pass
gf.delete_file(handler, output_file_path)
if __name__ == "__main__":
unittest.main()
|
danielbair/aeneas
|
aeneas/tests/test_cew.py
|
Python
|
agpl-3.0
| 3,056
|
"""MySQLdb Cursors
This module implements Cursors of various types for MySQLdb. By
default, MySQLdb uses the Cursor class.
"""
import re
import sys
try:
from types import ListType, TupleType, UnicodeType
except ImportError:
# Python 3
ListType = list
TupleType = tuple
UnicodeType = str
restr = r"""
\s
values
\s*
(
\(
[^()']*
(?:
(?:
(?:\(
# ( - editor hightlighting helper
[^)]*
\))
|
'
[^\\']*
(?:\\.[^\\']*)*
'
)
[^()']*
)*
\)
)
"""
insert_values = re.compile(restr, re.S | re.I | re.X)
from _mysql_exceptions import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError
class BaseCursor(object):
"""A base for Cursor classes. Useful attributes:
description
A tuple of DB API 7-tuples describing the columns in
the last executed query; see PEP-249 for details.
description_flags
Tuple of column flags for last query, one entry per column
in the result set. Values correspond to those in
MySQLdb.constants.FLAG. See MySQL documentation (C API)
for more information. Non-standard extension.
arraysize
default number of rows fetchmany() will fetch
"""
from _mysql_exceptions import MySQLError, Warning, Error, InterfaceError, \
DatabaseError, DataError, OperationalError, IntegrityError, \
InternalError, ProgrammingError, NotSupportedError
_defer_warnings = False
def __init__(self, connection):
from weakref import proxy
self.connection = proxy(connection)
self.description = None
self.description_flags = None
self.rowcount = -1
self.arraysize = 1
self._executed = None
self.lastrowid = None
self.messages = []
self.errorhandler = connection.errorhandler
self._result = None
self._warnings = 0
self._info = None
self.rownumber = None
def __del__(self):
self.close()
self.errorhandler = None
self._result = None
def close(self):
"""Close the cursor. No further queries will be possible."""
if not self.connection: return
while self.nextset(): pass
self.connection = None
def _check_executed(self):
if not self._executed:
self.errorhandler(self, ProgrammingError, "execute() first")
def _warning_check(self):
from warnings import warn
if self._warnings:
warnings = self._get_db().show_warnings()
if warnings:
# This is done in two loops in case
# Warnings are set to raise exceptions.
for w in warnings:
self.messages.append((self.Warning, w))
for w in warnings:
warn(w[-1], self.Warning, 3)
elif self._info:
self.messages.append((self.Warning, self._info))
warn(self._info, self.Warning, 3)
def nextset(self):
"""Advance to the next result set.
Returns None if there are no more result sets.
"""
if self._executed:
self.fetchall()
del self.messages[:]
db = self._get_db()
nr = db.next_result()
if nr == -1:
return None
self._do_get_result()
self._post_get_result()
self._warning_check()
return 1
def _post_get_result(self): pass
def _do_get_result(self):
db = self._get_db()
self._result = self._get_result()
self.rowcount = db.affected_rows()
self.rownumber = 0
self.description = self._result and self._result.describe() or None
self.description_flags = self._result and self._result.field_flags() or None
self.lastrowid = db.insert_id()
self._warnings = db.warning_count()
self._info = db.info()
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
def _get_db(self):
if not self.connection:
self.errorhandler(self, ProgrammingError, "cursor closed")
return self.connection
def execute(self, query, args=None):
"""Execute a query.
query -- string, query to execute on server
args -- optional sequence or mapping, parameters to use with query.
Note: If args is a sequence, then %s must be used as the
parameter placeholder in the query. If a mapping is used,
%(key)s must be used as the placeholder.
Returns long integer rows affected, if any
"""
del self.messages[:]
db = self._get_db()
if isinstance(query, unicode):
query = query.encode(db.unicode_literal.charset)
if args is not None:
query = query % db.literal(args)
try:
r = None
r = self._query(query)
except TypeError, m:
if m.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.messages.append((ProgrammingError, m.args[0]))
self.errorhandler(self, ProgrammingError, m.args[0])
else:
self.messages.append((TypeError, m))
self.errorhandler(self, TypeError, m)
except (SystemExit, KeyboardInterrupt):
raise
except:
exc, value, tb = sys.exc_info()
del tb
self.messages.append((exc, value))
self.errorhandler(self, exc, value)
self._executed = query
if not self._defer_warnings: self._warning_check()
return r
def executemany(self, query, args):
"""Execute a multi-row query.
query -- string, query to execute on server
args
Sequence of sequences or mappings, parameters to use with
query.
Returns long integer rows affected, if any.
This method improves performance on multiple-row INSERT and
REPLACE. Otherwise it is equivalent to looping over args with
execute().
"""
del self.messages[:]
db = self._get_db()
if not args: return
if isinstance(query, unicode):
query = query.encode(db.unicode_literal.charset)
m = insert_values.search(query)
if not m:
r = 0
for a in args:
r = r + self.execute(query, a)
return r
p = m.start(1)
e = m.end(1)
qv = m.group(1)
try:
q = [ qv % db.literal(a) for a in args ]
except TypeError, msg:
if msg.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.errorhandler(self, ProgrammingError, msg.args[0])
else:
self.errorhandler(self, TypeError, msg)
except (SystemExit, KeyboardInterrupt):
raise
except:
exc, value, tb = sys.exc_info()
del tb
self.errorhandler(self, exc, value)
r = self._query('\n'.join([query[:p], ',\n'.join(q), query[e:]]))
if not self._defer_warnings: self._warning_check()
return r
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
db = self._get_db()
for index, arg in enumerate(args):
q = "SET @_%s_%d=%s" % (procname, index,
db.literal(arg))
if isinstance(q, unicode):
q = q.encode(db.unicode_literal.charset)
self._query(q)
self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range(len(args))]))
if type(q) is UnicodeType:
q = q.encode(db.unicode_literal.charset)
self._query(q)
self._executed = q
if not self._defer_warnings: self._warning_check()
return args
def _do_query(self, q):
db = self._get_db()
self._last_executed = q
db.query(q)
self._do_get_result()
return self.rowcount
def _query(self, q): return self._do_query(q)
def _fetch_row(self, size=1):
if not self._result:
return ()
return self._result.fetch_row(size, self._fetch_type)
def __iter__(self):
return iter(self.fetchone, None)
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
class CursorStoreResultMixIn(object):
"""This is a MixIn class which causes the entire result set to be
stored on the client side, i.e. it uses mysql_store_result(). If the
result set can be very large, consider adding a LIMIT clause to your
query, or using CursorUseResultMixIn instead."""
def _get_result(self): return self._get_db().store_result()
def _query(self, q):
rowcount = self._do_query(q)
self._post_get_result()
return rowcount
def _post_get_result(self):
self._rows = self._fetch_row(0)
self._result = None
def fetchone(self):
"""Fetches a single row from the cursor. None indicates that
no more rows are available."""
self._check_executed()
if self.rownumber >= len(self._rows): return None
result = self._rows[self.rownumber]
self.rownumber = self.rownumber+1
return result
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
"""Fetchs all available rows from the cursor."""
self._check_executed()
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
"""Scroll the cursor in the result set to a new position according
to mode.
If mode is 'relative' (default), value is taken as offset to
the current position in the result set, if set to 'absolute',
value states an absolute target position."""
self._check_executed()
if mode == 'relative':
r = self.rownumber + value
elif mode == 'absolute':
r = value
else:
self.errorhandler(self, ProgrammingError,
"unknown scroll mode %s" % repr(mode))
if r < 0 or r >= len(self._rows):
self.errorhandler(self, IndexError, "out of range")
self.rownumber = r
def __iter__(self):
self._check_executed()
result = self.rownumber and self._rows[self.rownumber:] or self._rows
return iter(result)
class CursorUseResultMixIn(object):
"""This is a MixIn class which causes the result set to be stored
in the server and sent row-by-row to client side, i.e. it uses
mysql_use_result(). You MUST retrieve the entire result set and
close() the cursor before additional queries can be peformed on
the connection."""
_defer_warnings = True
def _get_result(self): return self._get_db().use_result()
def fetchone(self):
"""Fetches a single row from the cursor."""
self._check_executed()
r = self._fetch_row(1)
if not r:
self._warning_check()
return None
self.rownumber = self.rownumber + 1
return r[0]
def fetchmany(self, size=None):
"""Fetch up to size rows from the cursor. Result set may be smaller
than size. If size is not defined, cursor.arraysize is used."""
self._check_executed()
r = self._fetch_row(size or self.arraysize)
self.rownumber = self.rownumber + len(r)
if not r:
self._warning_check()
return r
def fetchall(self):
"""Fetchs all available rows from the cursor."""
self._check_executed()
r = self._fetch_row(0)
self.rownumber = self.rownumber + len(r)
self._warning_check()
return r
def __iter__(self):
return self
def next(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
class CursorTupleRowsMixIn(object):
"""This is a MixIn class that causes all rows to be returned as tuples,
which is the standard form required by DB API."""
_fetch_type = 0
class CursorDictRowsMixIn(object):
"""This is a MixIn class that causes all rows to be returned as
dictionaries. This is a non-standard feature."""
_fetch_type = 1
def fetchoneDict(self):
"""Fetch a single row as a dictionary. Deprecated:
Use fetchone() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchoneDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchone()
def fetchmanyDict(self, size=None):
"""Fetch several rows as a list of dictionaries. Deprecated:
Use fetchmany() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchmanyDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchmany(size)
def fetchallDict(self):
"""Fetch all available rows as a list of dictionaries. Deprecated:
Use fetchall() instead. Will be removed in 1.3."""
from warnings import warn
warn("fetchallDict() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
return self.fetchall()
class CursorOldDictRowsMixIn(CursorDictRowsMixIn):
"""This is a MixIn class that returns rows as dictionaries with
the same key convention as the old Mysqldb (MySQLmodule). Don't
use this."""
_fetch_type = 2
class Cursor(CursorStoreResultMixIn, CursorTupleRowsMixIn,
BaseCursor):
"""This is the standard Cursor class that returns rows as tuples
and stores the result set in the client."""
class DictCursor(CursorStoreResultMixIn, CursorDictRowsMixIn,
BaseCursor):
"""This is a Cursor class that returns rows as dictionaries and
stores the result set in the client."""
class SSCursor(CursorUseResultMixIn, CursorTupleRowsMixIn,
BaseCursor):
"""This is a Cursor class that returns rows as tuples and stores
the result set in the server."""
class SSDictCursor(CursorUseResultMixIn, CursorDictRowsMixIn,
BaseCursor):
"""This is a Cursor class that returns rows as dictionaries and
stores the result set in the server."""
|
ecolitan/fatics
|
venv/lib/python2.7/site-packages/MySQLdb/cursors.py
|
Python
|
agpl-3.0
| 17,253
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import event_event
from . import event_registration
from . import event_type
from . import website
from . import website_event_menu
from . import website_menu
from . import website_visitor
|
rven/odoo
|
addons/website_event/models/__init__.py
|
Python
|
agpl-3.0
| 296
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate
from frappe import _
from erpnext.utilities.transaction_base import delete_events
from frappe.model.document import Document
class Project(Document):
def get_gross_profit(self):
pft, per_pft =0, 0
pft = flt(self.project_value) - flt(self.est_material_cost)
#if pft > 0:
per_pft = (flt(pft) / flt(self.project_value)) * 100
ret = {'gross_margin_value': pft, 'per_gross_margin': per_pft}
return ret
def validate(self):
"""validate start date before end date"""
if self.project_start_date and self.completion_date:
if getdate(self.completion_date) < getdate(self.project_start_date):
frappe.throw(_("Expected Completion Date can not be less than Project Start Date"))
self.update_milestones_completed()
def update_milestones_completed(self):
if self.project_milestones:
completed = filter(lambda x: x.status=="Completed", self.project_milestones)
self.percent_milestones_completed = len(completed) * 100 / len(self.project_milestones)
def on_update(self):
self.add_calendar_event()
def update_percent_complete(self):
total = frappe.db.sql("""select count(*) from tabTask where project=%s""",
self.name)[0][0]
if total:
completed = frappe.db.sql("""select count(*) from tabTask where
project=%s and status in ('Closed', 'Cancelled')""", self.name)[0][0]
frappe.db.set_value("Project", self.name, "percent_complete",
int(float(completed) / total * 100))
def add_calendar_event(self):
# delete any earlier event for this project
delete_events(self.doctype, self.name)
# add events
for milestone in self.get("project_milestones"):
if milestone.milestone_date:
description = (milestone.milestone or "Milestone") + " for " + self.name
frappe.get_doc({
"doctype": "Event",
"owner": self.owner,
"subject": description,
"description": description,
"starts_on": milestone.milestone_date + " 10:00:00",
"event_type": "Private",
"ref_type": self.doctype,
"ref_name": self.name
}).insert(ignore_permissions=True)
def on_trash(self):
delete_events(self.doctype, self.name)
@frappe.whitelist()
def get_cost_center_name(project_name):
return frappe.db.get_value("Project", project_name, "cost_center")
|
indictranstech/focal-erpnext
|
projects/doctype/project/project.py
|
Python
|
agpl-3.0
| 2,450
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import copy
from rapidsms.connection import Connection
from rapidsms.person import Person
from datetime import datetime
from rapidsms import utils
class StatusCodes:
'''Enum for representing status types of a message or response.'''
NONE = "None" # we don't know. the default
OK = "Ok" # is great success!
APP_ERROR = "Application Error" # application specific errors - e.g. bad data
GENERIC_ERROR = "Generic error" # generic errors - e.g. a catch all responder
class Message(object):
def __init__(self, connection=None, text=None, person=None, date=None):
if connection == None and person == None:
raise Exception("Message __init__() must take one of: connection, person")
self._connection = connection
self.text = text
self.date = ( datetime.utcnow() if date is None
else utils.to_naive_utc_dt(date) )
self.person = person
self.responses = []
self.status = StatusCodes.NONE
# a message is considered "unprocessed" until
# rapidsms has dispatched it to all apps, and
# flushed the responses out
self.processed = False
def __unicode__(self):
return self.text
@property
def connection(self):
# connection is read-only, since it's an
# immutable property of this object
if self._connection is not None:
return self._connection
else:
return self.person.connection
@property
def peer (self):
# return the identity (e.g. phone number) of
# the other end of this message's connection
return self.connection.identity
def send(self):
"""Send this message via self.connection.backend, returning
True if the message was sent successfully."""
return self.connection.backend.router.outgoing(self)
def flush_responses (self):
"""Sends all responses added to this message (via the
Message.respond method) in the order which they were
added, and clears self.responses"""
# keep on iterating until all of
# the messages have been sent
while self.responses:
self.responses.pop(0).send()
def error(self, text, level):
"""Apps send error messages here rather than through respond
so users only receive one - the with the highest level of specificity"""
#TODO implement this
pass
def respond(self, text, status = StatusCodes.NONE):
"""Send the given text back to the original caller of this
message on the same route that it came in on"""
if self.connection:
response = self.get_response(text, status)
self.responses.append(response)
return True
else:
return False
def get_response(self, text, status):
response = copy.copy(self)
response.text = text
response.status = status
return response
def forward (self, identity, text=None):
if self.connection:
target = self.connection.fork(identity)
if text is None: text = self.text
message = type(self)(connection=target, text=text)
self.responses.append(message)
return True
else:
return False
class EmailMessage(Message):
"""Email version of a message object, with some extra stuff that can
be consumed by email backends/apps."""
def __init__(self, connection=None, text=None, person=None, date=None,
subject=None, mime_type="text/plain"):
super(EmailMessage, self).__init__(connection=connection, text=text,
person=person, date=date)
self.subject = subject
self.mime_type = mime_type
def get_response(self, text, status):
response = Message.get_response(self, text, status)
response.subject = "re: %s" % self.subject
return response
|
icomms/rapidsms
|
lib/rapidsms/message.py
|
Python
|
lgpl-3.0
| 4,140
|
from temboo.Library.Google.Drive.Changes.Get import Get, GetInputSet, GetResultSet, GetChoreographyExecution
from temboo.Library.Google.Drive.Changes.List import List, ListInputSet, ListResultSet, ListChoreographyExecution
|
jordanemedlock/psychtruths
|
temboo/core/Library/Google/Drive/Changes/__init__.py
|
Python
|
apache-2.0
| 223
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""contrib module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import bayesflow
from tensorflow.contrib import cloud
from tensorflow.contrib import compiler
from tensorflow.contrib import copy_graph
from tensorflow.contrib import crf
from tensorflow.contrib import cudnn_rnn
from tensorflow.contrib import data
from tensorflow.contrib import deprecated
from tensorflow.contrib import distributions
from tensorflow.contrib import factorization
from tensorflow.contrib import framework
from tensorflow.contrib import graph_editor
from tensorflow.contrib import grid_rnn
from tensorflow.contrib import image
from tensorflow.contrib import input_pipeline
from tensorflow.contrib import integrate
from tensorflow.contrib import keras
from tensorflow.contrib import kernel_methods
from tensorflow.contrib import labeled_tensor
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib import legacy_seq2seq
from tensorflow.contrib import linalg
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import memory_stats
from tensorflow.contrib import metrics
from tensorflow.contrib import nccl
from tensorflow.contrib import nn
from tensorflow.contrib import opt
from tensorflow.contrib import quantization
from tensorflow.contrib import rnn
from tensorflow.contrib import saved_model
from tensorflow.contrib import seq2seq
from tensorflow.contrib import signal
from tensorflow.contrib import slim
from tensorflow.contrib import solvers
from tensorflow.contrib import sparsemax
from tensorflow.contrib import staging
from tensorflow.contrib import stat_summarizer
from tensorflow.contrib import stateless
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
from tensorflow.contrib import testing
from tensorflow.contrib import tfprof
from tensorflow.contrib import training
from tensorflow.contrib import util
from tensorflow.contrib.ndlstm import python as ndlstm
from tensorflow.contrib.specs import python as specs
from tensorflow.python.util.lazy_loader import LazyLoader
ffmpeg = LazyLoader("ffmpeg", globals(),
"tensorflow.contrib.ffmpeg")
del LazyLoader
del absolute_import
del division
del print_function
|
sjperkins/tensorflow
|
tensorflow/contrib/__init__.py
|
Python
|
apache-2.0
| 3,181
|
def pre_listen(task_id, transport, attr_array):
new_attrs = []
for (scope, name, value) in attr_array:
if scope == transport and name == 'port':
value = str(int(value) + 1)
new_attr = (scope, name, value)
new_attrs.append(new_attr)
return new_attrs
|
gridcf/gct
|
gridftp/net_manager/test/port_plus_one.py
|
Python
|
apache-2.0
| 297
|
"""Support for the Airly air_quality service."""
from homeassistant.components.air_quality import (
ATTR_AQI,
ATTR_PM_2_5,
ATTR_PM_10,
AirQualityEntity,
)
from homeassistant.const import CONF_NAME
from .const import (
ATTR_API_ADVICE,
ATTR_API_CAQI,
ATTR_API_CAQI_DESCRIPTION,
ATTR_API_CAQI_LEVEL,
ATTR_API_PM10,
ATTR_API_PM10_LIMIT,
ATTR_API_PM10_PERCENT,
ATTR_API_PM25,
ATTR_API_PM25_LIMIT,
ATTR_API_PM25_PERCENT,
DOMAIN,
)
ATTRIBUTION = "Data provided by Airly"
LABEL_ADVICE = "advice"
LABEL_AQI_DESCRIPTION = f"{ATTR_AQI}_description"
LABEL_AQI_LEVEL = f"{ATTR_AQI}_level"
LABEL_PM_2_5_LIMIT = f"{ATTR_PM_2_5}_limit"
LABEL_PM_2_5_PERCENT = f"{ATTR_PM_2_5}_percent_of_limit"
LABEL_PM_10_LIMIT = f"{ATTR_PM_10}_limit"
LABEL_PM_10_PERCENT = f"{ATTR_PM_10}_percent_of_limit"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Airly air_quality entity based on a config entry."""
name = config_entry.data[CONF_NAME]
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[AirlyAirQuality(coordinator, name, config_entry.unique_id)], False
)
def round_state(func):
"""Round state."""
def _decorator(self):
res = func(self)
if isinstance(res, float):
return round(res)
return res
return _decorator
class AirlyAirQuality(AirQualityEntity):
"""Define an Airly air quality."""
def __init__(self, coordinator, name, unique_id):
"""Initialize."""
self.coordinator = coordinator
self._name = name
self._unique_id = unique_id
self._icon = "mdi:blur"
@property
def name(self):
"""Return the name."""
return self._name
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
@round_state
def air_quality_index(self):
"""Return the air quality index."""
return self.coordinator.data[ATTR_API_CAQI]
@property
@round_state
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self.coordinator.data[ATTR_API_PM25]
@property
@round_state
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self.coordinator.data[ATTR_API_PM10]
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def available(self):
"""Return True if entity is available."""
return self.coordinator.last_update_success
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
LABEL_AQI_DESCRIPTION: self.coordinator.data[ATTR_API_CAQI_DESCRIPTION],
LABEL_ADVICE: self.coordinator.data[ATTR_API_ADVICE],
LABEL_AQI_LEVEL: self.coordinator.data[ATTR_API_CAQI_LEVEL],
LABEL_PM_2_5_LIMIT: self.coordinator.data[ATTR_API_PM25_LIMIT],
LABEL_PM_2_5_PERCENT: round(self.coordinator.data[ATTR_API_PM25_PERCENT]),
LABEL_PM_10_LIMIT: self.coordinator.data[ATTR_API_PM10_LIMIT],
LABEL_PM_10_PERCENT: round(self.coordinator.data[ATTR_API_PM10_PERCENT]),
}
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Update Airly entity."""
await self.coordinator.async_request_refresh()
|
pschmitt/home-assistant
|
homeassistant/components/airly/air_quality.py
|
Python
|
apache-2.0
| 3,907
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import netaddr
from oslo.config import cfg
from neutron.agent.linux import utils
from neutron.common import exceptions
OPTS = [
cfg.BoolOpt('ip_lib_force_root',
default=False,
help=_('Force ip_lib calls to use the root helper')),
]
LOOPBACK_DEVNAME = 'lo'
# NOTE(ethuleau): depend of the version of iproute2, the vlan
# interface details vary.
VLAN_INTERFACE_DETAIL = ['vlan protocol 802.1q',
'vlan protocol 802.1Q',
'vlan id']
class SubProcessBase(object):
def __init__(self, root_helper=None, namespace=None,
log_fail_as_error=True):
self.root_helper = root_helper
self.namespace = namespace
self.log_fail_as_error = log_fail_as_error
try:
self.force_root = cfg.CONF.ip_lib_force_root
except cfg.NoSuchOptError:
# Only callers that need to force use of the root helper
# need to register the option.
self.force_root = False
def _run(self, options, command, args):
if self.namespace:
return self._as_root(options, command, args)
elif self.force_root:
# Force use of the root helper to ensure that commands
# will execute in dom0 when running under XenServer/XCP.
return self._execute(options, command, args, self.root_helper,
log_fail_as_error=self.log_fail_as_error)
else:
return self._execute(options, command, args,
log_fail_as_error=self.log_fail_as_error)
def enforce_root_helper(self):
if not self.root_helper and os.geteuid() != 0:
raise exceptions.SudoRequired()
def _as_root(self, options, command, args, use_root_namespace=False):
self.enforce_root_helper()
namespace = self.namespace if not use_root_namespace else None
return self._execute(options,
command,
args,
self.root_helper,
namespace,
log_fail_as_error=self.log_fail_as_error)
@classmethod
def _execute(cls, options, command, args, root_helper=None,
namespace=None, log_fail_as_error=True):
opt_list = ['-%s' % o for o in options]
if namespace:
ip_cmd = ['ip', 'netns', 'exec', namespace, 'ip']
else:
ip_cmd = ['ip']
return utils.execute(ip_cmd + opt_list + [command] + list(args),
root_helper=root_helper,
log_fail_as_error=log_fail_as_error)
def set_log_fail_as_error(self, fail_with_error):
self.log_fail_as_error = fail_with_error
class IPWrapper(SubProcessBase):
def __init__(self, root_helper=None, namespace=None):
super(IPWrapper, self).__init__(root_helper=root_helper,
namespace=namespace)
self.netns = IpNetnsCommand(self)
def device(self, name):
return IPDevice(name, self.root_helper, self.namespace)
def get_devices(self, exclude_loopback=False):
retval = []
output = self._execute(['o', 'd'], 'link', ('list',),
self.root_helper, self.namespace)
for line in output.split('\n'):
if '<' not in line:
continue
tokens = line.split(' ', 2)
if len(tokens) == 3:
if any(v in tokens[2] for v in VLAN_INTERFACE_DETAIL):
delimiter = '@'
else:
delimiter = ':'
name = tokens[1].rpartition(delimiter)[0].strip()
if exclude_loopback and name == LOOPBACK_DEVNAME:
continue
retval.append(IPDevice(name,
self.root_helper,
self.namespace))
return retval
def add_tuntap(self, name, mode='tap'):
self._as_root('', 'tuntap', ('add', name, 'mode', mode))
return IPDevice(name, self.root_helper, self.namespace)
def add_veth(self, name1, name2, namespace2=None):
args = ['add', name1, 'type', 'veth', 'peer', 'name', name2]
if namespace2 is None:
namespace2 = self.namespace
else:
self.ensure_namespace(namespace2)
args += ['netns', namespace2]
self._as_root('', 'link', tuple(args))
return (IPDevice(name1, self.root_helper, self.namespace),
IPDevice(name2, self.root_helper, namespace2))
def del_veth(self, name):
"""Delete a virtual interface between two namespaces."""
self._as_root('', 'link', ('del', name))
def ensure_namespace(self, name):
if not self.netns.exists(name):
ip = self.netns.add(name)
lo = ip.device(LOOPBACK_DEVNAME)
lo.link.set_up()
else:
ip = IPWrapper(self.root_helper, name)
return ip
def namespace_is_empty(self):
return not self.get_devices(exclude_loopback=True)
def garbage_collect_namespace(self):
"""Conditionally destroy the namespace if it is empty."""
if self.namespace and self.netns.exists(self.namespace):
if self.namespace_is_empty():
self.netns.delete(self.namespace)
return True
return False
def add_device_to_namespace(self, device):
if self.namespace:
device.link.set_netns(self.namespace)
def add_vxlan(self, name, vni, group=None, dev=None, ttl=None, tos=None,
local=None, port=None, proxy=False):
cmd = ['add', name, 'type', 'vxlan', 'id', vni]
if group:
cmd.extend(['group', group])
if dev:
cmd.extend(['dev', dev])
if ttl:
cmd.extend(['ttl', ttl])
if tos:
cmd.extend(['tos', tos])
if local:
cmd.extend(['local', local])
if proxy:
cmd.append('proxy')
# tuple: min,max
if port and len(port) == 2:
cmd.extend(['port', port[0], port[1]])
elif port:
raise exceptions.NetworkVxlanPortRangeError(vxlan_range=port)
self._as_root('', 'link', cmd)
return (IPDevice(name, self.root_helper, self.namespace))
@classmethod
def get_namespaces(cls, root_helper):
output = cls._execute('', 'netns', ('list',), root_helper=root_helper)
return [l.strip() for l in output.split('\n')]
class IpRule(IPWrapper):
def add_rule_from(self, ip, table, rule_pr):
args = ['add', 'from', ip, 'lookup', table, 'priority', rule_pr]
ip = self._as_root('', 'rule', tuple(args))
return ip
def delete_rule_priority(self, rule_pr):
args = ['del', 'priority', rule_pr]
ip = self._as_root('', 'rule', tuple(args))
return ip
class IPDevice(SubProcessBase):
def __init__(self, name, root_helper=None, namespace=None):
super(IPDevice, self).__init__(root_helper=root_helper,
namespace=namespace)
self.name = name
self.link = IpLinkCommand(self)
self.addr = IpAddrCommand(self)
self.route = IpRouteCommand(self)
self.neigh = IpNeighCommand(self)
def __eq__(self, other):
return (other is not None and self.name == other.name
and self.namespace == other.namespace)
def __str__(self):
return self.name
class IpCommandBase(object):
COMMAND = ''
def __init__(self, parent):
self._parent = parent
def _run(self, *args, **kwargs):
return self._parent._run(kwargs.get('options', []), self.COMMAND, args)
def _as_root(self, *args, **kwargs):
return self._parent._as_root(kwargs.get('options', []),
self.COMMAND,
args,
kwargs.get('use_root_namespace', False))
class IpDeviceCommandBase(IpCommandBase):
@property
def name(self):
return self._parent.name
class IpLinkCommand(IpDeviceCommandBase):
COMMAND = 'link'
def set_address(self, mac_address):
self._as_root('set', self.name, 'address', mac_address)
def set_mtu(self, mtu_size):
self._as_root('set', self.name, 'mtu', mtu_size)
def set_up(self):
self._as_root('set', self.name, 'up')
def set_down(self):
self._as_root('set', self.name, 'down')
def set_netns(self, namespace):
self._as_root('set', self.name, 'netns', namespace)
self._parent.namespace = namespace
def set_name(self, name):
self._as_root('set', self.name, 'name', name)
self._parent.name = name
def set_alias(self, alias_name):
self._as_root('set', self.name, 'alias', alias_name)
def delete(self):
self._as_root('delete', self.name)
@property
def address(self):
return self.attributes.get('link/ether')
@property
def state(self):
return self.attributes.get('state')
@property
def mtu(self):
return self.attributes.get('mtu')
@property
def qdisc(self):
return self.attributes.get('qdisc')
@property
def qlen(self):
return self.attributes.get('qlen')
@property
def alias(self):
return self.attributes.get('alias')
@property
def attributes(self):
return self._parse_line(self._run('show', self.name, options='o'))
def _parse_line(self, value):
if not value:
return {}
device_name, settings = value.replace("\\", '').split('>', 1)
tokens = settings.split()
keys = tokens[::2]
values = [int(v) if v.isdigit() else v for v in tokens[1::2]]
retval = dict(zip(keys, values))
return retval
class IpAddrCommand(IpDeviceCommandBase):
COMMAND = 'addr'
def add(self, ip_version, cidr, broadcast, scope='global'):
self._as_root('add',
cidr,
'brd',
broadcast,
'scope',
scope,
'dev',
self.name,
options=[ip_version])
def delete(self, ip_version, cidr):
self._as_root('del',
cidr,
'dev',
self.name,
options=[ip_version])
def flush(self):
self._as_root('flush', self.name)
def list(self, scope=None, to=None, filters=None):
if filters is None:
filters = []
retval = []
if scope:
filters += ['scope', scope]
if to:
filters += ['to', to]
for line in self._run('show', self.name, *filters).split('\n'):
line = line.strip()
if not line.startswith('inet'):
continue
parts = line.split()
if parts[0] == 'inet6':
version = 6
scope = parts[3]
broadcast = '::'
else:
version = 4
if parts[2] == 'brd':
broadcast = parts[3]
scope = parts[5]
else:
# sometimes output of 'ip a' might look like:
# inet 192.168.100.100/24 scope global eth0
# and broadcast needs to be calculated from CIDR
broadcast = str(netaddr.IPNetwork(parts[1]).broadcast)
scope = parts[3]
retval.append(dict(cidr=parts[1],
broadcast=broadcast,
scope=scope,
ip_version=version,
dynamic=('dynamic' == parts[-1])))
return retval
class IpRouteCommand(IpDeviceCommandBase):
COMMAND = 'route'
def add_gateway(self, gateway, metric=None, table=None):
args = ['replace', 'default', 'via', gateway]
if metric:
args += ['metric', metric]
args += ['dev', self.name]
if table:
args += ['table', table]
self._as_root(*args)
def delete_gateway(self, gateway=None, table=None):
args = ['del', 'default']
if gateway:
args += ['via', gateway]
args += ['dev', self.name]
if table:
args += ['table', table]
self._as_root(*args)
def list_onlink_routes(self):
def iterate_routes():
output = self._run('list', 'dev', self.name, 'scope', 'link')
for line in output.split('\n'):
line = line.strip()
if line and not line.count('src'):
yield line
return [x for x in iterate_routes()]
def add_onlink_route(self, cidr):
self._as_root('replace', cidr, 'dev', self.name, 'scope', 'link')
def delete_onlink_route(self, cidr):
self._as_root('del', cidr, 'dev', self.name, 'scope', 'link')
def get_gateway(self, scope=None, filters=None):
if filters is None:
filters = []
retval = None
if scope:
filters += ['scope', scope]
route_list_lines = self._run('list', 'dev', self.name,
*filters).split('\n')
default_route_line = next((x.strip() for x in
route_list_lines if
x.strip().startswith('default')), None)
if default_route_line:
gateway_index = 2
parts = default_route_line.split()
retval = dict(gateway=parts[gateway_index])
if 'metric' in parts:
metric_index = parts.index('metric') + 1
retval.update(metric=int(parts[metric_index]))
return retval
def pullup_route(self, interface_name):
"""Ensures that the route entry for the interface is before all
others on the same subnet.
"""
device_list = []
device_route_list_lines = self._run('list', 'proto', 'kernel',
'dev', interface_name).split('\n')
for device_route_line in device_route_list_lines:
try:
subnet = device_route_line.split()[0]
except Exception:
continue
subnet_route_list_lines = self._run('list', 'proto', 'kernel',
'match', subnet).split('\n')
for subnet_route_line in subnet_route_list_lines:
i = iter(subnet_route_line.split())
while(i.next() != 'dev'):
pass
device = i.next()
try:
while(i.next() != 'src'):
pass
src = i.next()
except Exception:
src = ''
if device != interface_name:
device_list.append((device, src))
else:
break
for (device, src) in device_list:
self._as_root('del', subnet, 'dev', device)
if (src != ''):
self._as_root('append', subnet, 'proto', 'kernel',
'src', src, 'dev', device)
else:
self._as_root('append', subnet, 'proto', 'kernel',
'dev', device)
def add_route(self, cidr, ip, table=None):
args = ['replace', cidr, 'via', ip, 'dev', self.name]
if table:
args += ['table', table]
self._as_root(*args)
def delete_route(self, cidr, ip, table=None):
args = ['del', cidr, 'via', ip, 'dev', self.name]
if table:
args += ['table', table]
self._as_root(*args)
class IpNeighCommand(IpDeviceCommandBase):
COMMAND = 'neigh'
def add(self, ip_version, ip_address, mac_address):
self._as_root('replace',
ip_address,
'lladdr',
mac_address,
'nud',
'permanent',
'dev',
self.name,
options=[ip_version])
def delete(self, ip_version, ip_address, mac_address):
self._as_root('del',
ip_address,
'lladdr',
mac_address,
'dev',
self.name,
options=[ip_version])
class IpNetnsCommand(IpCommandBase):
COMMAND = 'netns'
def add(self, name):
self._as_root('add', name, use_root_namespace=True)
wrapper = IPWrapper(self._parent.root_helper, name)
wrapper.netns.execute(['sysctl', '-w',
'net.ipv4.conf.all.promote_secondaries=1'])
return wrapper
def delete(self, name):
self._as_root('delete', name, use_root_namespace=True)
def execute(self, cmds, addl_env=None, check_exit_code=True,
extra_ok_codes=None):
ns_params = []
if self._parent.namespace:
self._parent.enforce_root_helper()
ns_params = ['ip', 'netns', 'exec', self._parent.namespace]
env_params = []
if addl_env:
env_params = (['env'] +
['%s=%s' % pair for pair in addl_env.items()])
return utils.execute(
ns_params + env_params + list(cmds),
root_helper=self._parent.root_helper,
check_exit_code=check_exit_code, extra_ok_codes=extra_ok_codes)
def exists(self, name):
output = self._parent._execute('o', 'netns', ['list'])
for line in output.split('\n'):
if name == line.strip():
return True
return False
def device_exists(device_name, root_helper=None, namespace=None):
"""Return True if the device exists in the namespace."""
try:
dev = IPDevice(device_name, root_helper, namespace)
dev.set_log_fail_as_error(False)
address = dev.link.address
except RuntimeError:
return False
return bool(address)
def device_exists_with_ip_mac(device_name, ip_cidr, mac, namespace=None,
root_helper=None):
"""Return True if the device with the given IP and MAC addresses
exists in the namespace.
"""
try:
device = IPDevice(device_name, root_helper, namespace)
if mac != device.link.address:
return False
if ip_cidr not in (ip['cidr'] for ip in device.addr.list()):
return False
except RuntimeError:
return False
else:
return True
def ensure_device_is_ready(device_name, root_helper=None, namespace=None):
dev = IPDevice(device_name, root_helper, namespace)
dev.set_log_fail_as_error(False)
try:
# Ensure the device is up, even if it is already up. If the device
# doesn't exist, a RuntimeError will be raised.
dev.link.set_up()
except RuntimeError:
return False
return True
def iproute_arg_supported(command, arg, root_helper=None):
command += ['help']
stdout, stderr = utils.execute(command, root_helper=root_helper,
check_exit_code=False, return_stderr=True)
return any(arg in line for line in stderr.split('\n'))
|
leeseuljeong/leeseulstack_neutron
|
neutron/agent/linux/ip_lib.py
|
Python
|
apache-2.0
| 20,419
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for volume name_id."""
from oslo.config import cfg
from cinder import context
from cinder import db
from cinder import test
from cinder.tests import utils as testutils
CONF = cfg.CONF
class NameIDsTestCase(test.TestCase):
"""Test cases for naming volumes with name_id."""
def setUp(self):
super(NameIDsTestCase, self).setUp()
self.ctxt = context.RequestContext(user_id='user_id',
project_id='project_id')
def tearDown(self):
super(NameIDsTestCase, self).tearDown()
def test_name_id_same(self):
"""New volume should have same 'id' and 'name_id'."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
self.assertEqual(vol_ref['name_id'], vol_ref['id'])
expected_name = CONF.volume_name_template % vol_ref['id']
self.assertEqual(vol_ref['name'], expected_name)
def test_name_id_diff(self):
"""Change name ID to mimic volume after migration."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
vol_ref = db.volume_get(self.ctxt, vol_ref['id'])
expected_name = CONF.volume_name_template % 'fake'
self.assertEqual(vol_ref['name'], expected_name)
def test_name_id_snapshot_volume_name(self):
"""Make sure snapshot['volume_name'] is updated."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id'])
expected_name = CONF.volume_name_template % 'fake'
self.assertEqual(snap_ref['volume_name'], expected_name)
|
rickerc/cinder_audit
|
cinder/tests/db/test_name_id.py
|
Python
|
apache-2.0
| 2,344
|
# http://www.creatis.insa-lyon.fr/~bernard/creaseg/
# http://ascratchpad.blogspot.com/2011/03/image-segmentation-using-active.html
#------------------------------------------------------------------------
# Region Based Active Contour Segmentation
#
# seg = region_seg(I,init_mask,max_its,alpha,display)
#
# Inputs: I 2D image
# init_mask Initialization (1 = foreground, 0 = bg)
# max_its Number of iterations to run segmentation for
# alpha (optional) Weight of smoothing term
# higer = smoother. default = 0.2
# display (optional) displays intermediate outputs
# default = true
#
# Outputs: seg Final segmentation mask (1=fg, 0=bg)
#
# Description: This code implements the paper: "Active Contours Without
# Edges" By Chan Vese. This is a nice way to segment images whose
# foregrounds and backgrounds are statistically different and homogeneous.
#
# Example:
# img = imread('tire.tif');
# m = zeros(size(img));
# m(33:33+117,44:44+128) = 1;
# seg = region_seg(img,m,500);
#
# Coded by: Shawn Lankton (www.shawnlankton.com)
#------------------------------------------------------------------------
import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
eps = np.finfo(np.float).eps
def chanvese(I,init_mask,max_its=200,alpha=0.2,thresh=0,color='r',display=False):
I = I.astype('float')
#-- Create a signed distance map (SDF) from mask
phi = mask2phi(init_mask)
if display:
plt.ion()
showCurveAndPhi(I, phi, color)
plt.savefig('levelset_start.pdf',bbox_inches='tight')
#--main loop
its = 0
stop = False
prev_mask = init_mask
c = 0
while (its < max_its and not stop):
# get the curve's narrow band
idx = np.flatnonzero( np.logical_and( phi <= 1.2, phi >= -1.2) )
if len(idx) > 0:
#-- intermediate output
if display:
if np.mod(its,50) == 0:
#set(ud.txtInfo1,'string',sprintf('iteration: %d',its),'color',[1 1 0]);
print 'iteration:', its
showCurveAndPhi(I, phi, color)
else:
if np.mod(its,10) == 0:
print 'iteration:', its
#set(ud.txtInfo1,'string',sprintf('iteration: %d',its),'color',[1 1 0]);
#drawnow;
#-- find interior and exterior mean
upts = np.flatnonzero(phi<=0) # interior points
vpts = np.flatnonzero(phi>0) # exterior points
u = np.sum(I.flat[upts])/(len(upts)+eps) # interior mean
v = np.sum(I.flat[vpts])/(len(vpts)+eps) # exterior mean
F = (I.flat[idx]-u)**2-(I.flat[idx]-v)**2 # force from image information
curvature = get_curvature(phi,idx) # force from curvature penalty
dphidt = F /np.max(np.abs(F)) + alpha*curvature # gradient descent to minimize energy
#-- maintain the CFL condition
dt = 0.45/(np.max(np.abs(dphidt))+eps)
#-- evolve the curve
phi.flat[idx] += dt*dphidt
#-- Keep SDF smooth
phi = sussman(phi, 0.5)
new_mask = phi<=0
c = convergence(prev_mask,new_mask,thresh,c)
if c <= 5:
its = its + 1
prev_mask = new_mask
else: stop = True
else:
break
#-- final output
if display:
showCurveAndPhi(I, phi, color)
#plt.savefig('levelset_end.pdf',bbox_inches='tight')
time.sleep(10)
#-- make mask from SDF
seg = phi<=0 #-- Get mask from levelset
return seg,phi,its
#---------------------------------------------------------------------
#---------------------------------------------------------------------
#-- AUXILIARY FUNCTIONS ----------------------------------------------
#---------------------------------------------------------------------
#---------------------------------------------------------------------
def bwdist(a):
"""
this is an intermediary function, 'a' has only True, False vals,
so we convert them into 0, 1 values -- in reverse. True is 0,
False is 1, distance_transform_edt wants it that way.
"""
return nd.distance_transform_edt(a == 0)
import time
#-- Displays the image with curve superimposed
def showCurveAndPhi(I, phi, color):
# subplot(numRows, numCols, plotNum)
#myplot = plt.subplot(121)
#fig, axes = plt.subplots()
#axes = myplot.axes
#axes.get_xaxis().set_visible(False)
#axes.get_yaxis().set_visible(False)
plt.clf()
plt.imshow(I, cmap='gray')
#plt.hold(True)
CS = plt.contour(phi, 0, colors=color)
plt.draw()
#plt.hold(False)
# myplot = plt.subplot(122)
# axes = myplot.axes
# axes.get_xaxis().set_visible(False)
# axes.get_yaxis().set_visible(False)
# plt.imshow(phi)
plt.draw()
#time.sleep(1)
def im2double(a):
a = a.astype('float')
a /= a.max()
return a
#-- converts a mask to a SDF
def mask2phi(init_a):
phi = bwdist(init_a)-bwdist(1-init_a)+im2double(init_a) -0.5
return phi
#-- compute curvature along SDF
def get_curvature(phi,idx):
dimy, dimx = phi.shape
yx = np.array([np.unravel_index(i, phi.shape)for i in idx]) # get subscripts
y = yx[:,0]
x = yx[:,1]
#-- get subscripts of neighbors
ym1 = y-1; xm1 = x-1; yp1 = y+1; xp1 = x+1;
#-- bounds checking
ym1[ym1<0] = 0; xm1[xm1<0] = 0;
yp1[yp1>=dimy]=dimy - 1; xp1[xp1>=dimx] = dimx - 1;
#-- get indexes for 8 neighbors
idup = np.ravel_multi_index( (yp1,x),phi.shape)
iddn = np.ravel_multi_index( (ym1,x),phi.shape)
idlt = np.ravel_multi_index( (y,xm1),phi.shape)
idrt = np.ravel_multi_index( (y,xp1),phi.shape)
idul = np.ravel_multi_index( (yp1,xm1),phi.shape)
idur = np.ravel_multi_index( (yp1,xp1),phi.shape)
iddl = np.ravel_multi_index( (ym1,xm1),phi.shape)
iddr = np.ravel_multi_index( (ym1,xp1),phi.shape)
#-- get central derivatives of SDF at x,y
phi_x = -phi.flat[idlt]+phi.flat[idrt]
phi_y = -phi.flat[iddn]+phi.flat[idup]
phi_xx = phi.flat[idlt]-2*phi.flat[idx]+phi.flat[idrt]
phi_yy = phi.flat[iddn]-2*phi.flat[idx]+phi.flat[idup]
phi_xy = (-0.25*phi.flat[iddl]-0.25*phi.flat[idur]
+0.25*phi.flat[iddr]+0.25*phi.flat[idul])
phi_x2 = phi_x**2
phi_y2 = phi_y**2
#-- compute curvature (Kappa)
curvature = ( ((phi_x2*phi_yy + phi_y2*phi_xx - 2*phi_x*phi_y*phi_xy)
/ (phi_x2 + phi_y2 +eps)**(3/2))
*(phi_x2 + phi_y2)**(1/2))
return curvature
#-- level set re-initialization by the sussman method
def sussman(D, dt):
# forward/backward differences
a = D - shiftR(D) # backward
b = shiftL(D) - D # forward
c = D - shiftD(D) # backward
d = shiftU(D) - D # forward
a_p = a.copy(); a_n = a.copy(); # a+ and a-
b_p = b.copy(); b_n = b.copy();
c_p = c.copy(); c_n = c.copy();
d_p = d.copy(); d_n = d.copy();
a_p[a < 0] = 0
a_n[a > 0] = 0
b_p[b < 0] = 0
b_n[b > 0] = 0
c_p[c < 0] = 0
c_n[c > 0] = 0
d_p[d < 0] = 0
d_n[d > 0] = 0
dD = np.zeros(D.shape)
D_neg_ind = np.flatnonzero(D < 0)
D_pos_ind = np.flatnonzero(D > 0)
dD.flat[D_pos_ind] = np.sqrt( np.max( np.concatenate( ([a_p.flat[D_pos_ind]**2],
[b_n.flat[D_pos_ind]**2]) ),
axis=0
)
+ np.max( np.concatenate( ([c_p.flat[D_pos_ind]**2],
[d_n.flat[D_pos_ind]**2])),
axis=0
)
) - 1
dD.flat[D_neg_ind] = np.sqrt( np.max( np.concatenate( ([a_n.flat[D_neg_ind]**2],
[b_p.flat[D_neg_ind]**2])),
axis=0
)
+ np.max( np.concatenate( ([c_n.flat[D_neg_ind]**2],
[d_p.flat[D_neg_ind]**2]) ),
axis=0
)
) - 1
D = D - dt * sussman_sign(D) * dD
return D
#-- whole matrix derivatives
def shiftD(M):
return shiftR(M.transpose()).transpose()
def shiftL(M):
#shift = np.concatenate( (M[:,1:], np.zeros((M.shape[1],1))), axis=1 )
#shift = np.concatenate( (M[:,1:], M[:,-1]), axis=1 )
shift = M[:,range(1,M.shape[1])+[M.shape[1]-1]]
return shift
def shiftR(M):
#shift = np.concatenate( (np.zeros((M.shape[1],1)), M[:,:-1]), axis=1 )
#shift = np.concatenate( (M[:,0], M[:,:-1]), axis=1 )
shift = M[:,[0]+range(0,M.shape[1]-1)]
return shift
def shiftU(M):
return shiftL(M.transpose()).transpose()
def sussman_sign(D):
return D / np.sqrt(D**2 + 1)
# Convergence Test
def convergence(p_mask,n_mask,thresh,c):
diff = p_mask - n_mask
n_diff = np.sum(np.abs(diff))
if n_diff < thresh:
c = c + 1
else:
c = 0
return c
if __name__ == "__main__":
import cv2
img = cv2.imread("/home/kevin/Imperial/PhD/DATASETS/Training/positive/246_cropped_c/8.png_0022_0115_0117_0132_0132_0.png",0)
#img = nd.imread('sagittal8.png')
mask = np.zeros(img.shape)
mask[55:65,55:65] = 1
chanvese(img,mask,max_its=2000,display=True,alpha=0.1)
|
BioMedIA/irtk-legacy
|
wrapping/cython/irtk/ext/chanvese.py
|
Python
|
bsd-3-clause
| 9,929
|
"""
/******************************************************************************
This source file is part of the Avogadro project.
Copyright 2013 Kitware, Inc.
This source code is released under the New BSD License, (the "License").
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/
"""
import argparse
import json
import sys
def getMetaData():
metaData = {}
metaData['inputFormat'] = 'xyz'
metaData['outputFormat'] = 'xyz'
metaData['operations'] = ['read', 'write']
metaData['identifier'] = 'ZYX Example Format'
metaData['name'] = 'ZYX'
metaData['description'] = "Mostly useless file format that reads xyz-style " +\
"files with reversed coordinates. Demonstrates " +\
"the implementation of a user-scripted file format."
metaData['fileExtensions'] = ['zyx']
metaData['mimeTypes'] = ['chemical/x-zyx']
return metaData
def write():
result = ""
# Just copy the first two lines: numAtoms and comment/title
result += sys.stdin.readline()
result += sys.stdin.readline()
for line in sys.stdin:
words = line.split()
result += '%-3s %9.5f %9.5f %9.5f' %\
(words[0], float(words[3]), float(words[2]), float(words[1]))
if len(words) > 4:
result += words[4:].join(' ')
result += '\n'
return result
def read():
result = ""
# Just copy the first two lines: numAtoms and comment/title
result += sys.stdin.readline()
result += sys.stdin.readline()
for line in sys.stdin:
words = line.split()
result += '%-3s %9.5f %9.5f %9.5f' %\
(words[0], float(words[3]), float(words[2]), float(words[1]))
if len(words) > 4:
result += words[4:].join(' ')
result += '\n'
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser('Example file format script.')
parser.add_argument('--metadata', action='store_true')
parser.add_argument('--read', action='store_true')
parser.add_argument('--write', action='store_true')
parser.add_argument('--display-name', action='store_true')
parser.add_argument('--lang', nargs='?', default='en')
args = vars(parser.parse_args())
if args['metadata']:
print(json.dumps(getMetaData()))
elif args['display_name']:
print(getMetaData()['name'])
elif args['read']:
print(read())
elif args['write']:
print(write())
|
OpenChemistry/avogadrolibs
|
avogadro/qtplugins/scriptfileformats/formatScripts/zyx.py
|
Python
|
bsd-3-clause
| 2,841
|
#
# [The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This implementation of {@link TokenStream} loads tokens from a
# {@link TokenSource} on-demand, and places the tokens in a buffer to provide
# access to any previous token by index.
#
# <p>
# This token stream ignores the value of {@link Token#getChannel}. If your
# parser requires the token stream filter tokens to only those on a particular
# channel, such as {@link Token#DEFAULT_CHANNEL} or
# {@link Token#HIDDEN_CHANNEL}, use a filtering token stream such a
# {@link CommonTokenStream}.</p>
from io import StringIO
from antlr4.Token import Token
from antlr4.error.Errors import IllegalStateException
# this is just to keep meaningful parameter types to Parser
class TokenStream(object):
pass
class BufferedTokenStream(TokenStream):
def __init__(self, tokenSource):
# The {@link TokenSource} from which tokens for this stream are fetched.
self.tokenSource = tokenSource
# A collection of all tokens fetched from the token source. The list is
# considered a complete view of the input once {@link #fetchedEOF} is set
# to {@code true}.
self.tokens = []
# The index into {@link #tokens} of the current token (next token to
# {@link #consume}). {@link #tokens}{@code [}{@link #p}{@code ]} should be
# {@link #LT LT(1)}.
#
# <p>This field is set to -1 when the stream is first constructed or when
# {@link #setTokenSource} is called, indicating that the first token has
# not yet been fetched from the token source. For additional information,
# see the documentation of {@link IntStream} for a description of
# Initializing Methods.</p>
self.index = -1
# Indicates whether the {@link Token#EOF} token has been fetched from
# {@link #tokenSource} and added to {@link #tokens}. This field improves
# performance for the following cases:
#
# <ul>
# <li>{@link #consume}: The lookahead check in {@link #consume} to prevent
# consuming the EOF symbol is optimized by checking the values of
# {@link #fetchedEOF} and {@link #p} instead of calling {@link #LA}.</li>
# <li>{@link #fetch}: The check to prevent adding multiple EOF symbols into
# {@link #tokens} is trivial with this field.</li>
# <ul>
self.fetchedEOF = False
def mark(self):
return 0
def release(self, marker):
# no resources to release
pass
def reset(self):
self.seek(0)
def seek(self, index):
self.lazyInit()
self.index = self.adjustSeekIndex(index)
def get(self, index):
self.lazyInit()
return self.tokens[index]
def consume(self):
skipEofCheck = False
if self.index >= 0:
if self.fetchedEOF:
# the last token in tokens is EOF. skip check if p indexes any
# fetched token except the last.
skipEofCheck = self.index < len(self.tokens) - 1
else:
# no EOF token in tokens. skip check if p indexes a fetched token.
skipEofCheck = self.index < len(self.tokens)
else:
# not yet initialized
skipEofCheck = False
if not skipEofCheck and self.LA(1) == Token.EOF:
raise IllegalStateException("cannot consume EOF")
if self.sync(self.index + 1):
self.index = self.adjustSeekIndex(self.index + 1)
# Make sure index {@code i} in tokens has a token.
#
# @return {@code true} if a token is located at index {@code i}, otherwise
# {@code false}.
# @see #get(int i)
#/
def sync(self, i):
assert i >= 0
n = i - len(self.tokens) + 1 # how many more elements we need?
if n > 0 :
fetched = self.fetch(n)
return fetched >= n
return True
# Add {@code n} elements to buffer.
#
# @return The actual number of elements added to the buffer.
#/
def fetch(self, n):
if self.fetchedEOF:
return 0
for i in range(0, n):
t = self.tokenSource.nextToken()
t.tokenIndex = len(self.tokens)
self.tokens.append(t)
if t.type==Token.EOF:
self.fetchedEOF = True
return i + 1
return n
# Get all tokens from start..stop inclusively#/
def getTokens(self, start, stop, types=None):
if start<0 or stop<0:
return None
self.lazyInit()
subset = []
if stop >= len(self.tokens):
stop = len(self.tokens)-1
for i in range(start, stop):
t = self.tokens[i]
if t.type==Token.EOF:
break
if types is None or t.type in types:
subset.append(t)
return subset
def LA(self, i):
return self.LT(i).type
def LB(self, k):
if (self.index-k) < 0:
return None
return self.tokens[self.index-k]
def LT(self, k):
self.lazyInit()
if k==0:
return None
if k < 0:
return self.LB(-k)
i = self.index + k - 1
self.sync(i)
if i >= len(self.tokens): # return EOF token
# EOF must be last token
return self.tokens[len(self.tokens)-1]
return self.tokens[i]
# Allowed derived classes to modify the behavior of operations which change
# the current stream position by adjusting the target token index of a seek
# operation. The default implementation simply returns {@code i}. If an
# exception is thrown in this method, the current stream index should not be
# changed.
#
# <p>For example, {@link CommonTokenStream} overrides this method to ensure that
# the seek target is always an on-channel token.</p>
#
# @param i The target token index.
# @return The adjusted target token index.
def adjustSeekIndex(self, i):
return i
def lazyInit(self):
if self.index == -1:
self.setup()
def setup(self):
self.sync(0)
self.index = self.adjustSeekIndex(0)
# Reset this token stream by setting its token source.#/
def setTokenSource(self, tokenSource):
self.tokenSource = tokenSource
self.tokens = []
self.index = -1
# Given a starting index, return the index of the next token on channel.
# Return i if tokens[i] is on channel. Return -1 if there are no tokens
# on channel between i and EOF.
#/
def nextTokenOnChannel(self, i, channel):
self.sync(i)
if i>=len(self.tokens):
return -1
token = self.tokens[i]
while token.channel!=channel:
if token.type==Token.EOF:
return -1
i += 1
self.sync(i)
token = self.tokens[i]
return i
# Given a starting index, return the index of the previous token on channel.
# Return i if tokens[i] is on channel. Return -1 if there are no tokens
# on channel between i and 0.
def previousTokenOnChannel(self, i, channel):
while i>=0 and self.tokens[i].channel!=channel:
i -= 1
return i
# Collect all tokens on specified channel to the right of
# the current token up until we see a token on DEFAULT_TOKEN_CHANNEL or
# EOF. If channel is -1, find any non default channel token.
def getHiddenTokensToRight(self, tokenIndex, channel=-1):
self.lazyInit()
if tokenIndex<0 or tokenIndex>=len(self.tokens):
raise Exception(str(tokenIndex) + " not in 0.." + str(len(self.tokens)-1))
from antlr4.Lexer import Lexer
nextOnChannel = self.nextTokenOnChannel(tokenIndex + 1, Lexer.DEFAULT_TOKEN_CHANNEL)
from_ = tokenIndex+1
# if none onchannel to right, nextOnChannel=-1 so set to = last token
to = (len(self.tokens)-1) if nextOnChannel==-1 else nextOnChannel
return self.filterForChannel(from_, to, channel)
# Collect all tokens on specified channel to the left of
# the current token up until we see a token on DEFAULT_TOKEN_CHANNEL.
# If channel is -1, find any non default channel token.
def getHiddenTokensToLeft(self, tokenIndex, channel=-1):
self.lazyInit()
if tokenIndex<0 or tokenIndex>=len(self.tokens):
raise Exception(str(tokenIndex) + " not in 0.." + str(len(self.tokens)-1))
from antlr4.Lexer import Lexer
prevOnChannel = self.previousTokenOnChannel(tokenIndex - 1, Lexer.DEFAULT_TOKEN_CHANNEL)
if prevOnChannel == tokenIndex - 1:
return None
# if none on channel to left, prevOnChannel=-1 then from=0
from_ = prevOnChannel+1
to = tokenIndex-1
return self.filterForChannel(from_, to, channel)
def filterForChannel(self, left, right, channel):
hidden = []
for i in range(left, right+1):
t = self.tokens[i]
if channel==-1:
from antlr4.Lexer import Lexer
if t.channel!= Lexer.DEFAULT_TOKEN_CHANNEL:
hidden.append(t)
elif t.channel==channel:
hidden.append(t)
if len(hidden)==0:
return None
return hidden
def getSourceName(self):
return self.tokenSource.getSourceName()
# Get the text of all tokens in this buffer.#/
def getText(self, interval=None):
self.lazyInit()
self.fill()
if interval is None:
interval = (0, len(self.tokens)-1)
start = interval[0]
if isinstance(start, Token):
start = start.tokenIndex
stop = interval[1]
if isinstance(stop, Token):
stop = stop.tokenIndex
if start is None or stop is None or start<0 or stop<0:
return ""
if stop >= len(self.tokens):
stop = len(self.tokens)-1
with StringIO() as buf:
for i in range(start, stop+1):
t = self.tokens[i]
if t.type==Token.EOF:
break
buf.write(t.text)
return buf.getvalue()
# Get all tokens from lexer until EOF#/
def fill(self):
self.lazyInit()
while self.fetch(1000)==1000:
pass
|
cocosli/antlr4
|
runtime/Python2/src/antlr4/BufferedTokenStream.py
|
Python
|
bsd-3-clause
| 11,959
|
#!/usr/bin/env python
# GoodFET SPI and SPIFlash Client Library
#
# (C) 2009 Travis Goodspeed <travis at radiantmachines.com>
#
# This code is being rewritten and refactored. You've been warned!
import sys, time, string, cStringIO, struct, glob, os;
from GoodFET import GoodFET;
class GoodFETAVR(GoodFET):
AVRAPP=0x32;
APP=AVRAPP;
AVRVendors={0x1E: "Atmel",
0x00: "Locked",
};
#List imported from http://avr.fenceline.de/device_data.html
AVRDevices={
0x9003: "ATtiny10",
0x9004: "ATtiny11",
0x9005: "ATtiny12",
0x9007: "ATtiny13",
0x9006: "ATtiny15",
0x9106: "ATtiny22",
0x910A: "ATtiny2313",
0x9108: "ATtiny25",
0x9109: "ATtiny26",
0x9107: "ATtiny28",
0x9206: "ATtiny45",
0x930B: "ATtiny85",
0x9304: "AT90C8534",
0x9001: "AT90S1200",
0x9101: "AT90S2313",
0x9102: "AT90S2323",
0x9105: "AT90S2333",
0x9103: "AT90S2343",
0x9201: "AT90S4414",
0x9203: "AT90S4433",
0x9202: "AT90S4434",
0x9301: "AT90S8515",
0x9303: "AT90S8535",
0x9381: "AT90PWM2",
0x9381: "AT90PWM3",
0x9781: "AT90CAN128",
0x9205: "ATmega48",
0x9306: "ATmega8515",
0x9308: "ATmega8535",
0x9307: "ATmega8",
0x930A: "ATmega88",
0x9403: "ATmega16",
0x9401: "ATmega161",
0x9404: "ATmega162",
0x9402: "ATmega163",
0x9407: "ATmega165",
0x9406: "ATmega168",
0x9405: "ATmega169",
0x9502: "ATmega32",
0x958a: "ATmega32U2", #TODO add the other U series.
0x9501: "ATmega323",
0x9503: "ATmega325",
0x9504: "ATmega3250",
0x9503: "ATmega329",
0x9504: "ATmega3290",
0x9507: "ATmega406",
0x9602: "ATmega64",
0x9607: "ATmega640",
0x9603: "ATmega645",
0x9604: "ATmega6450",
0x9603: "ATmega649",
0x9604: "ATmega6490",
0x0101: "ATmega103",
0x9701: "ATmega103",
0x9702: "ATmega128",
0x9703: "ATmega1280",
0x9704: "ATmega1281",
0x9801: "ATmega2560",
0x9802: "ATmega2561",
0x9002: "ATtiny19",
0x9302: "ATmega85",
0x9305: "ATmega83",
0x9601: "ATmega603",
#These are missing from the Fenceline DB.
0x960a: "ATmega644P",
};
def setup(self):
"""Move the FET into the AVR application."""
self.writecmd(self.AVRAPP,0x10,0,self.data); #SPI/SETUP
def trans(self,data):
"""Exchange data by AVR.
Input should probably be 4 bytes."""
self.data=data;
self.writecmd(self.AVRAPP,0x00,len(data),data);
return self.data;
def start(self):
"""Start the connection."""
self.writecmd(self.AVRAPP,0x20,0,None);
def forcestart(self):
"""Forcibly start a connection."""
for i in range(0x880,0xfff):
#self.glitchVoltages(0x880, i);
self.start();
bits=self.lockbits();
print "At %04x, Lockbits: %02x" % (i,bits);
if(bits==0xFF): return;
def erase(self):
"""Erase the target chip."""
self.writecmd(self.AVRAPP,0xF0,0,None);
def lockbits(self):
"""Read the target's lockbits."""
self.writecmd(self.AVRAPP,0x82,0,None);
return ord(self.data[0]);
def setlockbits(self,bits=0x00):
"""Read the target's lockbits."""
self.writecmd(self.AVRAPP,0x92,1,[bits]);
return self.lockbits();
def lock(self):
self.setlockbits(0xFC);
def eeprompeek(self, adr):
"""Read a byte of the target's EEPROM."""
self.writecmd(self.AVRAPP,0x81 ,2,
[ (adr&0xFF), (adr>>8)]
);#little-endian address
return ord(self.data[0]);
def flashpeek(self, adr):
"""Read a byte of the target's Flash memory."""
self.writecmd(self.AVRAPP,0x02 ,2,
[ (adr&0xFF), (adr>>8)]
);#little-endian address
return ord(self.data[0]);
def flashpeekblock(self, adr):
"""Read a byte of the target's Flash memory."""
self.writecmd(self.AVRAPP,0x02 ,4,
[ (adr&0xFF), (adr>>8) &0xFF, 0x80, 0x00]
);
return self.data;
def eeprompoke(self, adr, val):
"""Write a byte of the target's EEPROM."""
self.writecmd(self.AVRAPP,0x91 ,3,
[ (adr&0xFF), (adr>>8), val]
);#little-endian address
return ord(self.data[0]);
def identstr(self):
"""Return an identifying string."""
self.writecmd(self.AVRAPP,0x83,0, None);
vendor=self.AVRVendors.get(ord(self.data[0]));
deviceid=(ord(self.data[1])<<8)+ord(self.data[2]);
device=self.AVRDevices.get(deviceid);
#Return hex if device is unknown.
#They are similar enough that it needn't be known.
if device==None:
device=("0x%04x" % deviceid);
return "%s %s" % (vendor,device);
|
rfmcpherson/killerbee
|
killerbee/GoodFETAVR.py
|
Python
|
bsd-3-clause
| 5,235
|
#!/usr/bin/env python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2017 Intel Corporation.
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_lsm303d as sensorObj
def main():
# Instantiate a BMP250E instance using default i2c bus and address
sensor = sensorObj.LSM303D()
## Exit handlers ##
# This function stops python from printing a stacktrace when you
# hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# now output data every 250 milliseconds
while (1):
sensor.update()
data = sensor.getAccelerometer()
print("Accelerometer x:", data[0], end=' ')
print(" y:", data[1], end=' ')
print(" z:", data[2], end=' ')
print(" g")
data = sensor.getMagnetometer()
print("Magnetometer x:", data[0], end=' ')
print(" y:", data[1], end=' ')
print(" z:", data[2], end=' ')
print(" uT")
print("Temperature: ", sensor.getTemperature())
print()
time.sleep(.250)
if __name__ == '__main__':
main()
|
pylbert/upm
|
examples/python/lsm303d.py
|
Python
|
mit
| 2,423
|
#!/usr/bin/env python
# coding: utf-8
import unittest
import sys
import os
PROJECT_PATH = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2])
ROOT_PATH = os.path.dirname(__file__)
if __name__ == '__main__':
if 'GAE_SDK' in os.environ:
SDK_PATH = os.environ['GAE_SDK']
sys.path.insert(0, SDK_PATH)
import dev_appserver
dev_appserver.fix_sys_path()
sys.path.append(os.path.join(PROJECT_PATH, 'src'))
tests = unittest.TestLoader().discover(ROOT_PATH, "*.py")
result = unittest.TextTestRunner().run(tests)
if not result.wasSuccessful():
sys.exit(1)
|
renzon/blob_app
|
test/testloader.py
|
Python
|
mit
| 630
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
from twisted.internet import reactor
from twisted.python import log
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory, \
connectWS
class EchoClientProtocol(WebSocketClientProtocol):
def sendHello(self):
self.sendMessage("Hello, world!".encode('utf8'))
def onOpen(self):
self.sendHello()
def onMessage(self, payload, isBinary):
if not isBinary:
print("Text message received: {}".format(payload.decode('utf8')))
reactor.callLater(1, self.sendHello)
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Need the WebSocket server address, i.e. ws://127.0.0.1:9000")
sys.exit(1)
if len(sys.argv) > 2 and sys.argv[2] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WebSocketClientFactory(sys.argv[1],
debug=debug,
debugCodePaths=debug)
factory.protocol = EchoClientProtocol
connectWS(factory)
reactor.run()
|
nucular/AutobahnPython
|
examples/twisted/websocket/echo_variants/client.py
|
Python
|
mit
| 2,386
|
# Django settings for test_remote_project project.
import os.path
import posixpath
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, 'fixtures'),
]
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'db.sqlite', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'mzdvd*#0=$g(-!v_vj_7^(=zrh3klia(u&cqd3nr7p^khh^ui#'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_remote_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'test_remote_project.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'cities_light',
'djangorestframework',
'south',
'autocomplete_light',
'remote_autocomplete',
'remote_autocomplete_inline',
'navigation_autocomplete',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers':['console'],
'propagate': True,
'level':'DEBUG',
},
'cities_light': {
'handlers':['console'],
'propagate': True,
'level':'DEBUG',
},
}
}
|
dsanders11/django-autocomplete-light
|
test_remote_project/test_remote_project/settings.py
|
Python
|
mit
| 5,785
|
"""engine.SCons.Tool.f03
Tool-specific initialization for the generic Posix f03 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/f03.py 2014/07/05 09:42:21 garyo"
import SCons.Defaults
import SCons.Tool
import SCons.Util
import fortran
from SCons.Tool.FortranCommon import add_all_to_env, add_f03_to_env
compilers = ['f03']
def generate(env):
add_all_to_env(env)
add_f03_to_env(env)
fcomp = env.Detect(compilers) or 'f03'
env['F03'] = fcomp
env['SHF03'] = fcomp
env['FORTRAN'] = fcomp
env['SHFORTRAN'] = fcomp
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Bforartists/scons
|
scons-local/SCons/Tool/f03.py
|
Python
|
mit
| 1,990
|
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2016 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# Test for the fdt modules
import os
import sys
import tempfile
import unittest
from dtoc import fdt
from dtoc import fdt_util
from dtoc.fdt import FdtScan
from patman import tools
class TestFdt(unittest.TestCase):
@classmethod
def setUpClass(self):
self._binman_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
self._indir = tempfile.mkdtemp(prefix='binmant.')
tools.PrepareOutputDir(self._indir, True)
@classmethod
def tearDownClass(self):
tools._FinaliseForTest()
def TestFile(self, fname):
return os.path.join(self._binman_dir, 'test', fname)
def GetCompiled(self, fname):
return fdt_util.EnsureCompiled(self.TestFile(fname))
def _DeleteProp(self, dt):
node = dt.GetNode('/microcode/update@0')
node.DeleteProp('data')
def testFdtNormal(self):
fname = self.GetCompiled('034_x86_ucode.dts')
dt = FdtScan(fname)
self._DeleteProp(dt)
def testFdtNormalProp(self):
fname = self.GetCompiled('045_prop_test.dts')
dt = FdtScan(fname)
node = dt.GetNode('/binman/intel-me')
self.assertEquals('intel-me', node.name)
val = fdt_util.GetString(node, 'filename')
self.assertEquals(str, type(val))
self.assertEquals('me.bin', val)
prop = node.props['intval']
self.assertEquals(fdt.TYPE_INT, prop.type)
self.assertEquals(3, fdt_util.GetInt(node, 'intval'))
prop = node.props['intarray']
self.assertEquals(fdt.TYPE_INT, prop.type)
self.assertEquals(list, type(prop.value))
self.assertEquals(2, len(prop.value))
self.assertEquals([5, 6],
[fdt_util.fdt32_to_cpu(val) for val in prop.value])
prop = node.props['byteval']
self.assertEquals(fdt.TYPE_BYTE, prop.type)
self.assertEquals(chr(8), prop.value)
prop = node.props['bytearray']
self.assertEquals(fdt.TYPE_BYTE, prop.type)
self.assertEquals(list, type(prop.value))
self.assertEquals(str, type(prop.value[0]))
self.assertEquals(3, len(prop.value))
self.assertEquals([chr(1), '#', '4'], prop.value)
prop = node.props['longbytearray']
self.assertEquals(fdt.TYPE_INT, prop.type)
self.assertEquals(0x090a0b0c, fdt_util.GetInt(node, 'longbytearray'))
prop = node.props['stringval']
self.assertEquals(fdt.TYPE_STRING, prop.type)
self.assertEquals('message2', fdt_util.GetString(node, 'stringval'))
prop = node.props['stringarray']
self.assertEquals(fdt.TYPE_STRING, prop.type)
self.assertEquals(list, type(prop.value))
self.assertEquals(3, len(prop.value))
self.assertEquals(['another', 'multi-word', 'message'], prop.value)
|
Digilent/u-boot-digilent
|
tools/binman/fdt_test.py
|
Python
|
gpl-2.0
| 2,920
|
import datetime
import gzip
from itertools import count
import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
import sys
import threading
import time
import urllib
import cherrypy
from cherrypy._cpcompat import next, ntob, quote, xrange
from cherrypy.lib import httputil
gif_bytes = ntob(
'GIF89a\x01\x00\x01\x00\x82\x00\x01\x99"\x1e\x00\x00\x00\x00\x00'
'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
'\x00,\x00\x00\x00\x00\x01\x00\x01\x00\x02\x03\x02\x08\t\x00;'
)
from cherrypy.test import helper
class CacheTest(helper.CPWebCase):
def setup_server():
@cherrypy.config(**{'tools.caching.on': True})
class Root:
def __init__(self):
self.counter = 0
self.control_counter = 0
self.longlock = threading.Lock()
@cherrypy.expose
def index(self):
self.counter += 1
msg = "visit #%s" % self.counter
return msg
@cherrypy.expose
def control(self):
self.control_counter += 1
return "visit #%s" % self.control_counter
@cherrypy.expose
def a_gif(self):
cherrypy.response.headers[
'Last-Modified'] = httputil.HTTPDate()
return gif_bytes
@cherrypy.expose
def long_process(self, seconds='1'):
try:
self.longlock.acquire()
time.sleep(float(seconds))
finally:
self.longlock.release()
return 'success!'
@cherrypy.expose
def clear_cache(self, path):
cherrypy._cache.store[cherrypy.request.base + path].clear()
@cherrypy.config(**{
'tools.caching.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [
('Vary', 'Our-Varying-Header')
],
})
class VaryHeaderCachingServer(object):
def __init__(self):
self.counter = count(1)
@cherrypy.expose
def index(self):
return "visit #%s" % next(self.counter)
@cherrypy.config(**{
'tools.expires.on': True,
'tools.expires.secs': 60,
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': curdir,
})
class UnCached(object):
@cherrypy.expose
@cherrypy.config(**{'tools.expires.secs': 0})
def force(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
self._cp_config['tools.expires.force'] = True
self._cp_config['tools.expires.secs'] = 0
return "being forceful"
@cherrypy.expose
def dynamic(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
cherrypy.response.headers['Cache-Control'] = 'private'
return "D-d-d-dynamic!"
@cherrypy.expose
def cacheable(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
return "Hi, I'm cacheable."
@cherrypy.expose
@cherrypy.config(**{'tools.expires.secs': 86400})
def specific(self):
cherrypy.response.headers[
'Etag'] = 'need_this_to_make_me_cacheable'
return "I am being specific"
class Foo(object):
pass
@cherrypy.expose
@cherrypy.config(**{'tools.expires.secs': Foo()})
def wrongtype(self):
cherrypy.response.headers[
'Etag'] = 'need_this_to_make_me_cacheable'
return "Woops"
cherrypy.tree.mount(Root())
cherrypy.tree.mount(UnCached(), "/expires")
cherrypy.tree.mount(VaryHeaderCachingServer(), "/varying_headers")
cherrypy.config.update({'tools.gzip.on': True})
setup_server = staticmethod(setup_server)
def testCaching(self):
elapsed = 0.0
for trial in range(10):
self.getPage("/")
# The response should be the same every time,
# except for the Age response header.
self.assertBody('visit #1')
if trial != 0:
age = int(self.assertHeader("Age"))
self.assert_(age >= elapsed)
elapsed = age
# POST, PUT, DELETE should not be cached.
self.getPage("/", method="POST")
self.assertBody('visit #2')
# Because gzip is turned on, the Vary header should always Vary for
# content-encoding
self.assertHeader('Vary', 'Accept-Encoding')
# The previous request should have invalidated the cache,
# so this request will recalc the response.
self.getPage("/", method="GET")
self.assertBody('visit #3')
# ...but this request should get the cached copy.
self.getPage("/", method="GET")
self.assertBody('visit #3')
self.getPage("/", method="DELETE")
self.assertBody('visit #4')
# The previous request should have invalidated the cache,
# so this request will recalc the response.
self.getPage("/", method="GET", headers=[('Accept-Encoding', 'gzip')])
self.assertHeader('Content-Encoding', 'gzip')
self.assertHeader('Vary')
self.assertEqual(
cherrypy.lib.encoding.decompress(self.body), ntob("visit #5"))
# Now check that a second request gets the gzip header and gzipped body
# This also tests a bug in 3.0 to 3.0.2 whereby the cached, gzipped
# response body was being gzipped a second time.
self.getPage("/", method="GET", headers=[('Accept-Encoding', 'gzip')])
self.assertHeader('Content-Encoding', 'gzip')
self.assertEqual(
cherrypy.lib.encoding.decompress(self.body), ntob("visit #5"))
# Now check that a third request that doesn't accept gzip
# skips the cache (because the 'Vary' header denies it).
self.getPage("/", method="GET")
self.assertNoHeader('Content-Encoding')
self.assertBody('visit #6')
def testVaryHeader(self):
self.getPage("/varying_headers/")
self.assertStatus("200 OK")
self.assertHeaderItemValue('Vary', 'Our-Varying-Header')
self.assertBody('visit #1')
# Now check that different 'Vary'-fields don't evict each other.
# This test creates 2 requests with different 'Our-Varying-Header'
# and then tests if the first one still exists.
self.getPage("/varying_headers/",
headers=[('Our-Varying-Header', 'request 2')])
self.assertStatus("200 OK")
self.assertBody('visit #2')
self.getPage("/varying_headers/",
headers=[('Our-Varying-Header', 'request 2')])
self.assertStatus("200 OK")
self.assertBody('visit #2')
self.getPage("/varying_headers/")
self.assertStatus("200 OK")
self.assertBody('visit #1')
def testExpiresTool(self):
# test setting an expires header
self.getPage("/expires/specific")
self.assertStatus("200 OK")
self.assertHeader("Expires")
# test exceptions for bad time values
self.getPage("/expires/wrongtype")
self.assertStatus(500)
self.assertInBody("TypeError")
# static content should not have "cache prevention" headers
self.getPage("/expires/index.html")
self.assertStatus("200 OK")
self.assertNoHeader("Pragma")
self.assertNoHeader("Cache-Control")
self.assertHeader("Expires")
# dynamic content that sets indicators should not have
# "cache prevention" headers
self.getPage("/expires/cacheable")
self.assertStatus("200 OK")
self.assertNoHeader("Pragma")
self.assertNoHeader("Cache-Control")
self.assertHeader("Expires")
self.getPage('/expires/dynamic')
self.assertBody("D-d-d-dynamic!")
# the Cache-Control header should be untouched
self.assertHeader("Cache-Control", "private")
self.assertHeader("Expires")
# configure the tool to ignore indicators and replace existing headers
self.getPage("/expires/force")
self.assertStatus("200 OK")
# This also gives us a chance to test 0 expiry with no other headers
self.assertHeader("Pragma", "no-cache")
if cherrypy.server.protocol_version == "HTTP/1.1":
self.assertHeader("Cache-Control", "no-cache, must-revalidate")
self.assertHeader("Expires", "Sun, 28 Jan 2007 00:00:00 GMT")
# static content should now have "cache prevention" headers
self.getPage("/expires/index.html")
self.assertStatus("200 OK")
self.assertHeader("Pragma", "no-cache")
if cherrypy.server.protocol_version == "HTTP/1.1":
self.assertHeader("Cache-Control", "no-cache, must-revalidate")
self.assertHeader("Expires", "Sun, 28 Jan 2007 00:00:00 GMT")
# the cacheable handler should now have "cache prevention" headers
self.getPage("/expires/cacheable")
self.assertStatus("200 OK")
self.assertHeader("Pragma", "no-cache")
if cherrypy.server.protocol_version == "HTTP/1.1":
self.assertHeader("Cache-Control", "no-cache, must-revalidate")
self.assertHeader("Expires", "Sun, 28 Jan 2007 00:00:00 GMT")
self.getPage('/expires/dynamic')
self.assertBody("D-d-d-dynamic!")
# dynamic sets Cache-Control to private but it should be
# overwritten here ...
self.assertHeader("Pragma", "no-cache")
if cherrypy.server.protocol_version == "HTTP/1.1":
self.assertHeader("Cache-Control", "no-cache, must-revalidate")
self.assertHeader("Expires", "Sun, 28 Jan 2007 00:00:00 GMT")
def testLastModified(self):
self.getPage("/a.gif")
self.assertStatus(200)
self.assertBody(gif_bytes)
lm1 = self.assertHeader("Last-Modified")
# this request should get the cached copy.
self.getPage("/a.gif")
self.assertStatus(200)
self.assertBody(gif_bytes)
self.assertHeader("Age")
lm2 = self.assertHeader("Last-Modified")
self.assertEqual(lm1, lm2)
# this request should match the cached copy, but raise 304.
self.getPage("/a.gif", [('If-Modified-Since', lm1)])
self.assertStatus(304)
self.assertNoHeader("Last-Modified")
if not getattr(cherrypy.server, "using_apache", False):
self.assertHeader("Age")
def test_antistampede(self):
SECONDS = 4
# We MUST make an initial synchronous request in order to create the
# AntiStampedeCache object, and populate its selecting_headers,
# before the actual stampede.
self.getPage("/long_process?seconds=%d" % SECONDS)
self.assertBody('success!')
self.getPage("/clear_cache?path=" +
quote('/long_process?seconds=%d' % SECONDS, safe=''))
self.assertStatus(200)
start = datetime.datetime.now()
def run():
self.getPage("/long_process?seconds=%d" % SECONDS)
# The response should be the same every time
self.assertBody('success!')
ts = [threading.Thread(target=run) for i in xrange(100)]
for t in ts:
t.start()
for t in ts:
t.join()
self.assertEqualDates(start, datetime.datetime.now(),
# Allow a second (two, for slow hosts)
# for our thread/TCP overhead etc.
seconds=SECONDS + 2)
def test_cache_control(self):
self.getPage("/control")
self.assertBody('visit #1')
self.getPage("/control")
self.assertBody('visit #1')
self.getPage("/control", headers=[('Cache-Control', 'no-cache')])
self.assertBody('visit #2')
self.getPage("/control")
self.assertBody('visit #2')
self.getPage("/control", headers=[('Pragma', 'no-cache')])
self.assertBody('visit #3')
self.getPage("/control")
self.assertBody('visit #3')
time.sleep(1)
self.getPage("/control", headers=[('Cache-Control', 'max-age=0')])
self.assertBody('visit #4')
self.getPage("/control")
self.assertBody('visit #4')
|
flakey-bit/plugin.audio.spotify
|
resources/libs/cherrypy/test/test_caching.py
|
Python
|
gpl-3.0
| 12,738
|
import orange
data = orange.ExampleTable("lenses")
print "\nAssociation rules"
rules = orange.AssociationRulesInducer(data, support = 0.3)
for r in rules:
print "%5.3f %5.3f %s" % (r.support, r.confidence, r)
print "\nClassification rules"
rules = orange.AssociationRulesInducer(data, support = 0.3, classificationRules = 1)
for r in rules:
print "%5.3f %5.3f %s" % (r.support, r.confidence, r)
|
yzl0083/orange
|
Orange/testing/regression/tests_20/reference_assoc.py
|
Python
|
gpl-3.0
| 410
|
# -*- coding: utf-8 -*-
from nose.tools import * # flake8: noqa
import urlparse
from api.base.settings.defaults import API_BASE
from website.identifiers.model import Identifier
from tests.base import ApiTestCase
from osf_tests.factories import (
RegistrationFactory,
AuthUserFactory,
IdentifierFactory,
NodeFactory,
)
class TestIdentifierDetail(ApiTestCase):
def setUp(self):
super(TestIdentifierDetail, self).setUp()
self.user = AuthUserFactory()
self.registration = RegistrationFactory(creator=self.user, is_public=True)
self.registration_identifier = IdentifierFactory(referent=self.registration)
self.registration_url = '/{}identifiers/{}/'.format(API_BASE, self.registration_identifier._id)
self.node = NodeFactory(creator=self.user, is_public=True)
self.node_identifier = IdentifierFactory(referent=self.node)
self.node_url = '/{}identifiers/{}/'.format(API_BASE, self.node_identifier._id)
self.registration_res = self.app.get(self.registration_url)
self.registration_data = self.registration_res.json['data']
self.node_res = self.app.get(self.node_url)
self.node_data = self.node_res.json['data']
def test_identifier_detail_success_registration(self):
assert_equal(self.registration_res.status_code, 200)
assert_equal(self.registration_res.content_type, 'application/vnd.api+json')
def test_identifier_detail_success_node(self):
assert_equal(self.node_res.status_code, 200)
assert_equal(self.node_res.content_type, 'application/vnd.api+json')
def test_identifier_detail_returns_correct_referent_registration(self):
path = urlparse.urlparse(self.registration_data['relationships']['referent']['links']['related']['href']).path
assert_equal('/{}registrations/{}/'.format(API_BASE, self.registration._id), path)
def test_identifier_detail_returns_correct_referent_node(self):
path = urlparse.urlparse(self.node_data['relationships']['referent']['links']['related']['href']).path
assert_equal('/{}nodes/{}/'.format(API_BASE, self.node._id), path)
def test_identifier_detail_returns_correct_category_registration(self):
assert_equal(self.registration_data['attributes']['category'], self.registration_identifier.category)
def test_identifier_detail_returns_correct_category_node(self):
assert_equal(self.node_data['attributes']['category'], self.node_identifier.category)
def test_identifier_detail_returns_correct_value_registration(self):
assert_equal(self.registration_data['attributes']['value'], self.registration_identifier.value)
def test_identifier_detail_returns_correct_value_node(self):
assert_equal(self.node_data['attributes']['value'], self.node_identifier.value)
|
hmoco/osf.io
|
api_tests/identifiers/views/test_identifier_detail.py
|
Python
|
apache-2.0
| 2,841
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for [N, height, width] numpy arrays representing masks.
Example mask operations that are supported:
* Areas: compute mask areas
* IOU: pairwise intersection-over-union scores
"""
import numpy as np
EPSILON = 1e-7
def area(masks):
"""Computes area of masks.
Args:
masks: Numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*1] representing mask areas.
Raises:
ValueError: If masks.dtype is not np.uint8
"""
if masks.dtype != np.uint8:
raise ValueError('Masks type should be np.uint8')
return np.sum(masks, axis=(1, 2), dtype=np.float32)
def intersection(masks1, masks2):
"""Compute pairwise intersection areas between masks.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding M masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N*M] representing pairwise intersection area.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
n = masks1.shape[0]
m = masks2.shape[0]
answer = np.zeros([n, m], dtype=np.float32)
for i in np.arange(n):
for j in np.arange(m):
answer[i, j] = np.sum(np.minimum(masks1[i], masks2[j]), dtype=np.float32)
return answer
def iou(masks1, masks2):
"""Computes pairwise intersection-over-union between mask collections.
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
intersect = intersection(masks1, masks2)
area1 = area(masks1)
area2 = area(masks2)
union = np.expand_dims(area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / np.maximum(union, EPSILON)
def ioa(masks1, masks2):
"""Computes pairwise intersection-over-area between box collections.
Intersection-over-area (ioa) between two masks, mask1 and mask2 is defined as
their intersection area over mask2's area. Note that ioa is not symmetric,
that is, IOA(mask1, mask2) != IOA(mask2, mask1).
Args:
masks1: a numpy array with shape [N, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
masks2: a numpy array with shape [M, height, width] holding N masks. Masks
values are of type np.uint8 and values are in {0,1}.
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
Raises:
ValueError: If masks1 and masks2 are not of type np.uint8.
"""
if masks1.dtype != np.uint8 or masks2.dtype != np.uint8:
raise ValueError('masks1 and masks2 should be of type np.uint8')
intersect = intersection(masks1, masks2)
areas = np.expand_dims(area(masks2), axis=0)
return intersect / (areas + EPSILON)
|
jiaphuan/models
|
research/object_detection/utils/np_mask_ops.py
|
Python
|
apache-2.0
| 4,214
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nested structure coding."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from google.protobuf import text_format
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.saved_model import nested_structure_coder
class NestedStructureTest(test.TestCase):
def setUp(self):
super(NestedStructureTest, self).setUp()
self._coder = nested_structure_coder.StructureCoder()
def testEncodeDecodeList(self):
structure = [1.5, 2.5, 3.0]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected.list_value.values.add().float64_value = 1.5
expected.list_value.values.add().float64_value = 2.5
expected.list_value.values.add().float64_value = 3.0
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeTuple(self):
structure = ("hello", [3, (2, 1)])
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected.tuple_value.values.add().string_value = "hello"
list_value = expected.tuple_value.values.add().list_value
list_value.values.add().int64_value = 3
tuple_value = list_value.values.add().tuple_value
tuple_value.values.add().int64_value = 2
tuple_value.values.add().int64_value = 1
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeDict(self):
structure = dict(a=3, b=[7, 2.5])
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected.dict_value.fields["a"].int64_value = 3
list_value = expected.dict_value.fields["b"].list_value
list_value.values.add().int64_value = 7
list_value.values.add().float64_value = 2.5
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertIsInstance(decoded["a"], int)
self.assertEqual(structure, decoded)
def testEncodeDecodeTensorShape(self):
structure = [tensor_shape.TensorShape([1, 2, 3]), "hello"]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected_list = expected.list_value
expected_tensor_shape = expected_list.values.add().tensor_shape_value
expected_tensor_shape.dim.add().size = 1
expected_tensor_shape.dim.add().size = 2
expected_tensor_shape.dim.add().size = 3
expected_tensor_shape = expected_list.values.add().string_value = "hello"
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeNamedTuple(self):
named_tuple_type = collections.namedtuple("NamedTuple", ["x", "y"])
named_tuple = named_tuple_type(x=[1, 2], y="hello")
self.assertTrue(self._coder.can_encode(named_tuple))
encoded = self._coder.encode_structure(named_tuple)
expected = struct_pb2.StructuredValue()
expected_named_tuple = expected.named_tuple_value
expected_named_tuple.name = "NamedTuple"
key_value_pair = expected_named_tuple.values.add()
key_value_pair.key = "x"
list_value = key_value_pair.value.list_value
list_value.values.add().int64_value = 1
list_value.values.add().int64_value = 2
key_value_pair = expected_named_tuple.values.add()
key_value_pair.key = "y"
key_value_pair.value.string_value = "hello"
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(named_tuple._asdict(), decoded._asdict())
self.assertEqual(named_tuple.__class__.__name__, decoded.__class__.__name__)
def testNone(self):
structure = [1.0, None]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected.list_value.values.add().float64_value = 1.0
expected.list_value.values.add().none_value.CopyFrom(struct_pb2.NoneValue())
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testBool(self):
structure = [False]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected.list_value.values.add().bool_value = False
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEmptyStructures(self):
structure = [list(), dict(), tuple()]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected.list_value.values.add().list_value.CopyFrom(struct_pb2.ListValue())
expected.list_value.values.add().dict_value.CopyFrom(struct_pb2.DictValue())
expected.list_value.values.add().tuple_value.CopyFrom(
struct_pb2.TupleValue())
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testDtype(self):
structure = [dtypes.int64]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
list_value = expected.list_value.values.add()
list_value.tensor_dtype_value = dtypes.int64.as_datatype_enum
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeTensorSpec(self):
structure = [tensor_spec.TensorSpec([1, 2, 3], dtypes.int64, "hello")]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected_list = expected.list_value
expected_tensor_spec = expected_list.values.add().tensor_spec_value
expected_tensor_spec.shape.dim.add().size = 1
expected_tensor_spec.shape.dim.add().size = 2
expected_tensor_spec.shape.dim.add().size = 3
expected_tensor_spec.name = "hello"
expected_tensor_spec.dtype = dtypes.int64.as_datatype_enum
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeTensorSpecWithNoName(self):
structure = [tensor_spec.TensorSpec([1, 2, 3], dtypes.int64)]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected_list = expected.list_value
expected_tensor_spec = expected_list.values.add().tensor_spec_value
expected_tensor_spec.shape.dim.add().size = 1
expected_tensor_spec.shape.dim.add().size = 2
expected_tensor_spec.shape.dim.add().size = 3
expected_tensor_spec.name = ""
expected_tensor_spec.dtype = dtypes.int64.as_datatype_enum
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeRaggedTensorSpec(self):
structure = [ragged_tensor.RaggedTensorSpec(
[1, 2, 3], dtypes.int64, 2, dtypes.int32)]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected_pbtxt = r"""
list_value {
values {
type_spec_value {
type_spec_class: RAGGED_TENSOR_SPEC
type_spec_class_name: 'RaggedTensorSpec'
type_state {
tuple_value {
# spec._shape
values {
tensor_shape_value {
dim { size: 1 }
dim { size: 2 }
dim { size: 3 }
}
}
# spec._dtype
values { tensor_dtype_value: DT_INT64 }
# spec._ragged_rank
values { int64_value: 2 }
# spec._row_splits_dtype
values { tensor_dtype_value: DT_INT32 }
}
}
}
}
}
"""
expected = struct_pb2.StructuredValue()
text_format.Parse(expected_pbtxt, expected)
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeSparseTensorSpec(self):
structure = [sparse_tensor.SparseTensorSpec([10, 20], dtypes.float32)]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected_pbtxt = r"""
list_value {
values {
type_spec_value {
type_spec_class: SPARSE_TENSOR_SPEC
type_spec_class_name: 'SparseTensorSpec'
type_state {
tuple_value {
# spec._shape
values {
tensor_shape_value {
dim { size: 10 }
dim { size: 20 }
}
}
# spec._dtype
values { tensor_dtype_value: DT_FLOAT }
}
}
}
}
}
"""
expected = struct_pb2.StructuredValue()
text_format.Parse(expected_pbtxt, expected)
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testDecodeUnknownTensorSpec(self):
encoded = struct_pb2.StructuredValue()
encoded.type_spec_value.type_spec_class = 0
encoded.type_spec_value.type_spec_class_name = "FutureTensorSpec"
with self.assertRaisesRegex(ValueError,
"The type 'FutureTensorSpec' is not supported"):
self._coder.decode_proto(encoded)
def testEncodeDecodeBoundedTensorSpec(self):
structure = [
tensor_spec.BoundedTensorSpec([1, 2, 3], dtypes.int64, 0, 10,
"hello-0-10")
]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected_list = expected.list_value
expected_tensor_spec = expected_list.values.add().bounded_tensor_spec_value
expected_tensor_spec.shape.dim.add().size = 1
expected_tensor_spec.shape.dim.add().size = 2
expected_tensor_spec.shape.dim.add().size = 3
expected_tensor_spec.name = "hello-0-10"
expected_tensor_spec.dtype = dtypes.int64.as_datatype_enum
expected_tensor_spec.minimum.CopyFrom(
tensor_util.make_tensor_proto([0], dtype=dtypes.int64, shape=[]))
expected_tensor_spec.maximum.CopyFrom(
tensor_util.make_tensor_proto([10], dtype=dtypes.int64, shape=[]))
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeBoundedTensorSpecNoName(self):
structure = [
tensor_spec.BoundedTensorSpec((28, 28, 3), dtypes.float64, -2,
(1, 1, 20))
]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
expected = struct_pb2.StructuredValue()
expected_list = expected.list_value
expected_tensor_spec = expected_list.values.add().bounded_tensor_spec_value
expected_tensor_spec.shape.dim.add().size = 28
expected_tensor_spec.shape.dim.add().size = 28
expected_tensor_spec.shape.dim.add().size = 3
expected_tensor_spec.name = ""
expected_tensor_spec.dtype = dtypes.float64.as_datatype_enum
expected_tensor_spec.minimum.CopyFrom(
tensor_util.make_tensor_proto([-2], dtype=dtypes.float64, shape=[]))
expected_tensor_spec.maximum.CopyFrom(
tensor_util.make_tensor_proto([1, 1, 20],
dtype=dtypes.float64,
shape=[3]))
self.assertEqual(expected, encoded)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDataSetSpec(self):
structure = [dataset_ops.DatasetSpec(
{"rt": ragged_tensor.RaggedTensorSpec([10, None], dtypes.int32),
"st": sparse_tensor.SparseTensorSpec([10, 20], dtypes.float32),
"t": tensor_spec.TensorSpec([10, 8], dtypes.string)})]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testEncodeDecodeNdarraySpec(self):
structure = [np_arrays.NdarraySpec(
tensor_spec.TensorSpec([4, 2], dtypes.float32))]
self.assertTrue(self._coder.can_encode(structure))
encoded = self._coder.encode_structure(structure)
decoded = self._coder.decode_proto(encoded)
self.assertEqual(structure, decoded)
def testNotEncodable(self):
class NotEncodable(object):
pass
self.assertFalse(self._coder.can_encode([NotEncodable()]))
if __name__ == "__main__":
test.main()
|
freedomtan/tensorflow
|
tensorflow/python/saved_model/nested_structure_coder_test.py
|
Python
|
apache-2.0
| 14,586
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module customizes `test_combinations` for Tensorflow.
Additionally it provides `generate()`, `combine()` and `times()` with Tensorflow
customizations as a default.
"""
import functools
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_combinations
from tensorflow.python.util.tf_export import tf_export
class EagerGraphCombination(test_combinations.TestCombination):
"""Run the test in Graph or Eager mode.
The optional `mode` parameter controls the test's execution mode. Its
accepted values are "graph" or "eager" literals.
"""
def context_managers(self, kwargs):
mode = kwargs.pop("mode", None)
if mode is None:
return []
elif mode == "eager":
return [context.eager_mode()]
elif mode == "graph":
return [ops.Graph().as_default(), context.graph_mode()]
else:
raise ValueError(
"Argument 'mode' must be either 'eager' or 'graph'. "
f"Received: {mode}.")
def parameter_modifiers(self):
return [test_combinations.OptionalParameter("mode")]
class TFVersionCombination(test_combinations.TestCombination):
"""Control the execution of the test in TF1.x and TF2.
If TF2 is enabled then a test with TF1 test is going to be skipped and vice
versa.
Test targets continuously run in TF2 thanks to the tensorflow.v2 TAP target.
A test can be run in TF2 with bazel by passing --test_env=TF2_BEHAVIOR=1.
"""
def should_execute_combination(self, kwargs):
tf_api_version = kwargs.pop("tf_api_version", None)
if tf_api_version == 1 and tf2.enabled():
return (False, "Skipping a TF1.x test when TF2 is enabled.")
elif tf_api_version == 2 and not tf2.enabled():
return (False, "Skipping a TF2 test when TF2 is not enabled.")
return (True, None)
def parameter_modifiers(self):
return [test_combinations.OptionalParameter("tf_api_version")]
generate = functools.partial(
test_combinations.generate,
test_combinations=(EagerGraphCombination(), TFVersionCombination()))
combine = test_combinations.combine
times = test_combinations.times
NamedObject = test_combinations.NamedObject
tf_export("__internal__.test.combinations.generate", v1=[])(generate)
|
tensorflow/tensorflow
|
tensorflow/python/framework/combinations.py
|
Python
|
apache-2.0
| 2,998
|
# ==============================================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upgrade script to move from pre-release schema to new schema.
Usage examples:
bazel run tensorflow/lite/schema/upgrade_schema -- in.json out.json
bazel run tensorflow/lite/schema/upgrade_schema -- in.bin out.bin
bazel run tensorflow/lite/schema/upgrade_schema -- in.bin out.json
bazel run tensorflow/lite/schema/upgrade_schema -- in.json out.bin
bazel run tensorflow/lite/schema/upgrade_schema -- in.tflite out.tflite
"""
import argparse
import contextlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import tensorflow as tf
from tensorflow.python.platform import resource_loader
parser = argparse.ArgumentParser(
description="Script to move TFLite models from pre-release schema to "
"new schema.")
parser.add_argument(
"input",
type=str,
help="Input TensorFlow lite file in `.json`, `.bin` or `.tflite` format.")
parser.add_argument(
"output",
type=str,
help="Output json or bin TensorFlow lite model compliant with "
"the new schema. Extension must be `.json`, `.bin` or `.tflite`.")
# RAII Temporary Directory, because flatc doesn't allow direct use of tempfiles.
@contextlib.contextmanager
def TemporaryDirectoryResource():
temporary = tempfile.mkdtemp()
try:
yield temporary
finally:
shutil.rmtree(temporary)
class Converter(object):
"""Converts TensorFlow flatbuffer models from old to new version of schema.
This can convert between any version to the latest version. It uses
an incremental upgrade strategy to go from version to version.
Usage:
converter = Converter()
converter.Convert("a.tflite", "a.json")
converter.Convert("b.json", "b.tflite")
"""
def __init__(self):
# TODO(aselle): make this work in the open source version with better
# path.
paths_to_try = [
"../../../../flatbuffers/flatc", # not bazel
"../../../../external/flatbuffers/flatc" # bazel
]
for p in paths_to_try:
self._flatc_path = resource_loader.get_path_to_datafile(p)
if os.path.exists(self._flatc_path): break
def FindSchema(base_name):
return resource_loader.get_path_to_datafile("%s" % base_name)
# Supported schemas for upgrade.
self._schemas = [
(0, FindSchema("schema_v0.fbs"), True, self._Upgrade0To1),
(1, FindSchema("schema_v1.fbs"), True, self._Upgrade1To2),
(2, FindSchema("schema_v2.fbs"), True, self._Upgrade2To3),
(3, FindSchema("schema_v3.fbs"), False, None) # Non-callable by design.
]
# Ensure schemas are sorted, and extract latest version and upgrade
# dispatch function table.
self._schemas.sort()
self._new_version, self._new_schema = self._schemas[-1][:2]
self._upgrade_dispatch = {
version: dispatch
for version, unused1, unused2, dispatch in self._schemas}
def _Read(self, input_file, schema, raw_binary=False):
"""Read a tflite model assuming the given flatbuffer schema.
If `input_file` is in bin, then we must use flatc to convert the schema
from binary to json.
Args:
input_file: a binary (flatbuffer) or json file to read from. Extension
must be `.tflite`, `.bin`, or `.json` for FlatBuffer Binary or
FlatBuffer JSON.
schema: which schema to use for reading
raw_binary: whether to assume raw_binary (versions previous to v3)
that lacked file_identifier require this.
Raises:
RuntimeError: 1. When flatc cannot be invoked.
2. When json file does not exists.
ValueError: When the extension is not json or bin.
Returns:
A dictionary representing the read tflite model.
"""
raw_binary = ["--raw-binary"] if raw_binary else []
with TemporaryDirectoryResource() as tempdir:
basename = os.path.basename(input_file)
basename_no_extension, extension = os.path.splitext(basename)
if extension in [".bin", ".tflite"]:
# Convert to json using flatc
returncode = subprocess.call([
self._flatc_path,
"-t",
"--strict-json",
"--defaults-json",
] + raw_binary + ["-o", tempdir, schema, "--", input_file])
if returncode != 0:
raise RuntimeError("flatc failed to convert from binary to json.")
json_file = os.path.join(tempdir, basename_no_extension + ".json")
if not os.path.exists(json_file):
raise RuntimeError("Could not find %r" % json_file)
elif extension == ".json":
json_file = input_file
else:
raise ValueError("Invalid extension on input file %r" % input_file)
return json.load(open(json_file))
def _Write(self, data, output_file):
"""Output a json or bin version of the flatbuffer model.
Args:
data: Dict representing the TensorFlow Lite model to write.
output_file: filename to write the converted flatbuffer to. (json,
tflite, or bin extension is required).
Raises:
ValueError: When the extension is not json or bin
RuntimeError: When flatc fails to convert json data to binary.
"""
_, extension = os.path.splitext(output_file)
with TemporaryDirectoryResource() as tempdir:
if extension == ".json":
json.dump(data, open(output_file, "w"), sort_keys=True, indent=2)
elif extension in [".tflite", ".bin"]:
input_json = os.path.join(tempdir, "temp.json")
with open(input_json, "w") as fp:
json.dump(data, fp, sort_keys=True, indent=2)
returncode = subprocess.call([
self._flatc_path, "-b", "--defaults-json", "--strict-json", "-o",
tempdir, self._new_schema, input_json
])
if returncode != 0:
raise RuntimeError("flatc failed to convert upgraded json to binary.")
shutil.copy(os.path.join(tempdir, "temp.tflite"), output_file)
else:
raise ValueError("Invalid extension on output file %r" % output_file)
def _Upgrade0To1(self, data):
"""Upgrade data from Version 0 to Version 1.
Changes: Added subgraphs (which contains a subset of formally global
entries).
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
"""
subgraph = {}
for key_to_promote in ["tensors", "operators", "inputs", "outputs"]:
subgraph[key_to_promote] = data[key_to_promote]
del data[key_to_promote]
data["subgraphs"] = [subgraph]
def _Upgrade1To2(self, data):
"""Upgrade data from Version 1 to Version 2.
Changes: Rename operators to Conform to NN API.
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
Raises:
ValueError: Throws when model builtins are numeric rather than symbols.
"""
def RemapOperator(opcode_name):
"""Go from old schema op name to new schema op name.
Args:
opcode_name: String representing the ops (see :schema.fbs).
Returns:
Converted opcode_name from V1 to V2.
"""
old_name_to_new_name = {
"CONVOLUTION": "CONV_2D",
"DEPTHWISE_CONVOLUTION": "DEPTHWISE_CONV_2D",
"AVERAGE_POOL": "AVERAGE_POOL_2D",
"MAX_POOL": "MAX_POOL_2D",
"L2_POOL": "L2_POOL_2D",
"SIGMOID": "LOGISTIC",
"L2NORM": "L2_NORMALIZATION",
"LOCAL_RESPONSE_NORM": "LOCAL_RESPONSE_NORMALIZATION",
"Basic_RNN": "RNN",
}
return (old_name_to_new_name[opcode_name]
if opcode_name in old_name_to_new_name else opcode_name)
def RemapOperatorType(operator_type):
"""Remap operator structs from old names to new names.
Args:
operator_type: String representing the builtin operator data type
string.
(see :schema.fbs).
Raises:
ValueError: When the model has consistency problems.
Returns:
Upgraded builtin operator data type as a string.
"""
old_to_new = {
"PoolOptions": "Pool2DOptions",
"DepthwiseConvolutionOptions": "DepthwiseConv2DOptions",
"ConvolutionOptions": "Conv2DOptions",
"LocalResponseNormOptions": "LocalResponseNormalizationOptions",
"BasicRNNOptions": "RNNOptions",
}
return (old_to_new[operator_type]
if operator_type in old_to_new else operator_type)
for subgraph in data["subgraphs"]:
for ops in subgraph["operators"]:
ops["builtin_options_type"] = RemapOperatorType(
ops["builtin_options_type"])
# Upgrade the operator codes
for operator_code in data["operator_codes"]:
# Check if builtin_code is the appropriate string type
# use type("") instead of str or unicode. for py2and3
if not isinstance(operator_code["builtin_code"], type(u"")):
raise ValueError("builtin_code %r is non-string. this usually means "
"your model has consistency problems." %
(operator_code["builtin_code"]))
operator_code["builtin_code"] = (RemapOperator(
operator_code["builtin_code"]))
def _Upgrade2To3(self, data):
"""Upgrade data from Version 2 to Version 3.
Changed actual read-only tensor data to be in a buffers table instead
of inline with the tensor.
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
"""
buffers = [{"data": []}] # Start with 1 empty buffer
for subgraph in data["subgraphs"]:
if "tensors" not in subgraph:
continue
for tensor in subgraph["tensors"]:
if "data_buffer" not in tensor:
tensor["buffer"] = 0
else:
if tensor["data_buffer"]:
tensor[u"buffer"] = len(buffers)
buffers.append({"data": tensor["data_buffer"]})
else:
tensor["buffer"] = 0
del tensor["data_buffer"]
data["buffers"] = buffers
def _PerformUpgrade(self, data):
"""Manipulate the `data` (parsed JSON) based on changes in format.
This incrementally will upgrade from version to version within data.
Args:
data: Dictionary representing the TensorFlow data. This will be upgraded
in place.
"""
while data["version"] < self._new_version:
self._upgrade_dispatch[data["version"]](data)
data["version"] += 1
def Convert(self, input_file, output_file):
"""Perform schema conversion from input_file to output_file.
Args:
input_file: Filename of TensorFlow Lite data to convert from. Must
be `.json` or `.bin` extension files for JSON or Binary forms of
the TensorFlow FlatBuffer schema.
output_file: Filename to write to. Extension also must be `.json`
or `.bin`.
Raises:
RuntimeError: Generated when none of the upgrader supported schemas
matche the `input_file` data.
"""
# Read data in each schema (since they are incompatible). Version is
# always present. Use the read data that matches the version of the
# schema.
for version, schema, raw_binary, _ in self._schemas:
try:
data_candidate = self._Read(input_file, schema, raw_binary)
except RuntimeError:
continue # Skip and hope another schema works
if "version" not in data_candidate: # Assume version 1 if not present.
data_candidate["version"] = 1
elif data_candidate["version"] == 0: # Version 0 doesn't exist in wild.
data_candidate["version"] = 1
if data_candidate["version"] == version:
self._PerformUpgrade(data_candidate)
self._Write(data_candidate, output_file)
return
raise RuntimeError("No schema that the converter understands worked with "
"the data file you provided.")
def main(argv):
del argv
Converter().Convert(FLAGS.input, FLAGS.output)
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
tensorflow/tensorflow
|
tensorflow/lite/schema/upgrade_schema.py
|
Python
|
apache-2.0
| 12,866
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Transformed Distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import distribution as distribution_lib
from tensorflow.python.ops.distributions import identity_bijector
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"TransformedDistribution",
]
# The following helper functions attempt to statically perform a TF operation.
# These functions make debugging easier since we can do more validation during
# graph construction.
def _static_value(x):
"""Returns the static value of a `Tensor` or `None`."""
return tensor_util.constant_value(ops.convert_to_tensor(x))
def _logical_and(*args):
"""Convenience function which attempts to statically `reduce_all`."""
args_ = [_static_value(x) for x in args]
if any(x is not None and not bool(x) for x in args_):
return constant_op.constant(False)
if all(x is not None and bool(x) for x in args_):
return constant_op.constant(True)
if len(args) == 2:
return math_ops.logical_and(*args)
return math_ops.reduce_all(args)
def _logical_equal(x, y):
"""Convenience function which attempts to statically compute `x == y`."""
x_ = _static_value(x)
y_ = _static_value(y)
if x_ is None or y_ is None:
return math_ops.equal(x, y)
return constant_op.constant(np.array_equal(x_, y_))
def _logical_not(x):
"""Convenience function which attempts to statically apply `logical_not`."""
x_ = _static_value(x)
if x_ is None:
return math_ops.logical_not(x)
return constant_op.constant(np.logical_not(x_))
def _concat_vectors(*args):
"""Convenience function which concatenates input vectors."""
args_ = [_static_value(x) for x in args]
if any(x_ is None for x_ in args_):
return array_ops.concat(args, 0)
return constant_op.constant([x_ for vec_ in args_ for x_ in vec_])
def _pick_scalar_condition(pred, cond_true, cond_false):
"""Convenience function which chooses the condition based on the predicate."""
# Note: This function is only valid if all of pred, cond_true, and cond_false
# are scalars. This means its semantics are arguably more like tf.cond than
# tf.select even though we use tf.select to implement it.
pred_ = _static_value(pred)
if pred_ is None:
return array_ops.where(pred, cond_true, cond_false)
return cond_true if pred_ else cond_false
def _ones_like(x):
"""Convenience function attempts to statically construct `ones_like`."""
# Should only be used for small vectors.
if x.get_shape().is_fully_defined():
return array_ops.ones(x.get_shape().as_list(), dtype=x.dtype)
return array_ops.ones_like(x)
def _ndims_from_shape(shape):
"""Returns `Tensor`'s `rank` implied by a `Tensor` shape."""
if shape.get_shape().ndims not in (None, 1):
raise ValueError("input is not a valid shape: not 1D")
if not shape.dtype.is_integer:
raise TypeError("input is not a valid shape: wrong dtype")
if shape.get_shape().is_fully_defined():
return constant_op.constant(shape.get_shape().as_list()[0])
return array_ops.shape(shape)[0]
def _is_scalar_from_shape(shape):
"""Returns `True` `Tensor` if `Tensor` shape implies a scalar."""
return _logical_equal(_ndims_from_shape(shape), 0)
class TransformedDistribution(distribution_lib.Distribution):
"""A Transformed Distribution.
A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`,
and a deterministic, invertible, differentiable transform, `Y = g(X)`. The
transform is typically an instance of the `Bijector` class and the base
distribution is typically an instance of the `Distribution` class.
A `Bijector` is expected to implement the following functions:
- `forward`,
- `inverse`,
- `inverse_log_det_jacobian`.
The semantics of these functions are outlined in the `Bijector` documentation.
We now describe how a `TransformedDistribution` alters the input/outputs of a
`Distribution` associated with a random variable (rv) `X`.
Write `cdf(Y=y)` for an absolutely continuous cumulative distribution function
of random variable `Y`; write the probability density function `pdf(Y=y) :=
d^k / (dy_1,...,dy_k) cdf(Y=y)` for its derivative wrt to `Y` evaluated at
`y`. Assume that `Y = g(X)` where `g` is a deterministic diffeomorphism,
i.e., a non-random, continuous, differentiable, and invertible function.
Write the inverse of `g` as `X = g^{-1}(Y)` and `(J o g)(x)` for the Jacobian
of `g` evaluated at `x`.
A `TransformedDistribution` implements the following operations:
* `sample`
Mathematically: `Y = g(X)`
Programmatically: `bijector.forward(distribution.sample(...))`
* `log_prob`
Mathematically: `(log o pdf)(Y=y) = (log o pdf o g^{-1})(y)
+ (log o abs o det o J o g^{-1})(y)`
Programmatically: `(distribution.log_prob(bijector.inverse(y))
+ bijector.inverse_log_det_jacobian(y))`
* `log_cdf`
Mathematically: `(log o cdf)(Y=y) = (log o cdf o g^{-1})(y)`
Programmatically: `distribution.log_cdf(bijector.inverse(x))`
* and similarly for: `cdf`, `prob`, `log_survival_function`,
`survival_function`.
A simple example constructing a Log-Normal distribution from a Normal
distribution:
```python
ds = tfp.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Exp(),
name="LogNormalTransformedDistribution")
```
A `LogNormal` made from callables:
```python
ds = tfp.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.log(y), axis=-1)),
name="LogNormalTransformedDistribution")
```
Another example constructing a Normal from a StandardNormal:
```python
ds = tfp.distributions
normal = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Affine(
shift=-1.,
scale_identity_multiplier=2.)
name="NormalTransformedDistribution")
```
A `TransformedDistribution`'s batch- and event-shape are implied by the base
distribution unless explicitly overridden by `batch_shape` or `event_shape`
arguments. Specifying an overriding `batch_shape` (`event_shape`) is
permitted only if the base distribution has scalar batch-shape (event-shape).
The bijector is applied to the distribution as if the distribution possessed
the overridden shape(s). The following example demonstrates how to construct a
multivariate Normal as a `TransformedDistribution`.
```python
ds = tfp.distributions
# We will create two MVNs with batch_shape = event_shape = 2.
mean = [[-1., 0], # batch:0
[0., 1]] # batch:1
chol_cov = [[[1., 0],
[0, 1]], # batch:0
[[1, 0],
[2, 2]]] # batch:1
mvn1 = ds.TransformedDistribution(
distribution=ds.Normal(loc=0., scale=1.),
bijector=ds.bijectors.Affine(shift=mean, scale_tril=chol_cov),
batch_shape=[2], # Valid because base_distribution.batch_shape == [].
event_shape=[2]) # Valid because base_distribution.event_shape == [].
mvn2 = ds.MultivariateNormalTriL(loc=mean, scale_tril=chol_cov)
# mvn1.log_prob(x) == mvn2.log_prob(x)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
distribution,
bijector=None,
batch_shape=None,
event_shape=None,
validate_args=False,
name=None):
"""Construct a Transformed Distribution.
Args:
distribution: The base distribution instance to transform. Typically an
instance of `Distribution`.
bijector: The object responsible for calculating the transformation.
Typically an instance of `Bijector`. `None` means `Identity()`.
batch_shape: `integer` vector `Tensor` which overrides `distribution`
`batch_shape`; valid only if `distribution.is_scalar_batch()`.
event_shape: `integer` vector `Tensor` which overrides `distribution`
`event_shape`; valid only if `distribution.is_scalar_event()`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class. Default:
`bijector.name + distribution.name`.
"""
parameters = dict(locals())
name = name or (("" if bijector is None else bijector.name) +
distribution.name)
with ops.name_scope(name, values=[event_shape, batch_shape]) as name:
# For convenience we define some handy constants.
self._zero = constant_op.constant(0, dtype=dtypes.int32, name="zero")
self._empty = constant_op.constant([], dtype=dtypes.int32, name="empty")
if bijector is None:
bijector = identity_bijector.Identity(validate_args=validate_args)
# We will keep track of a static and dynamic version of
# self._is_{batch,event}_override. This way we can do more prior to graph
# execution, including possibly raising Python exceptions.
self._override_batch_shape = self._maybe_validate_shape_override(
batch_shape, distribution.is_scalar_batch(), validate_args,
"batch_shape")
self._is_batch_override = _logical_not(_logical_equal(
_ndims_from_shape(self._override_batch_shape), self._zero))
self._is_maybe_batch_override = bool(
tensor_util.constant_value(self._override_batch_shape) is None or
tensor_util.constant_value(self._override_batch_shape).size != 0)
self._override_event_shape = self._maybe_validate_shape_override(
event_shape, distribution.is_scalar_event(), validate_args,
"event_shape")
self._is_event_override = _logical_not(_logical_equal(
_ndims_from_shape(self._override_event_shape), self._zero))
self._is_maybe_event_override = bool(
tensor_util.constant_value(self._override_event_shape) is None or
tensor_util.constant_value(self._override_event_shape).size != 0)
# To convert a scalar distribution into a multivariate distribution we
# will draw dims from the sample dims, which are otherwise iid. This is
# easy to do except in the case that the base distribution has batch dims
# and we're overriding event shape. When that case happens the event dims
# will incorrectly be to the left of the batch dims. In this case we'll
# cyclically permute left the new dims.
self._needs_rotation = _logical_and(
self._is_event_override,
_logical_not(self._is_batch_override),
_logical_not(distribution.is_scalar_batch()))
override_event_ndims = _ndims_from_shape(self._override_event_shape)
self._rotate_ndims = _pick_scalar_condition(
self._needs_rotation, override_event_ndims, 0)
# We'll be reducing the head dims (if at all), i.e., this will be []
# if we don't need to reduce.
self._reduce_event_indices = math_ops.range(
self._rotate_ndims - override_event_ndims, self._rotate_ndims)
self._distribution = distribution
self._bijector = bijector
super(TransformedDistribution, self).__init__(
dtype=self._distribution.dtype,
reparameterization_type=self._distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=self._distribution.allow_nan_stats,
parameters=parameters,
# We let TransformedDistribution access _graph_parents since this class
# is more like a baseclass than derived.
graph_parents=(distribution._graph_parents + # pylint: disable=protected-access
bijector.graph_parents),
name=name)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._distribution
@property
def bijector(self):
"""Function transforming x => y."""
return self._bijector
def _event_shape_tensor(self):
return self.bijector.forward_event_shape_tensor(
distribution_util.pick_vector(
self._is_event_override,
self._override_event_shape,
self.distribution.event_shape_tensor()))
def _event_shape(self):
# If there's a chance that the event_shape has been overridden, we return
# what we statically know about the `event_shape_override`. This works
# because: `_is_maybe_event_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `event_shape` or we do.
#
# Since the `bijector` may change the `event_shape`, we then forward what we
# know to the bijector. This allows the `bijector` to have final say in the
# `event_shape`.
static_override = tensor_util.constant_value_as_shape(
self._override_event_shape)
return self.bijector.forward_event_shape(
static_override
if self._is_maybe_event_override
else self.distribution.event_shape)
def _batch_shape_tensor(self):
return distribution_util.pick_vector(
self._is_batch_override,
self._override_batch_shape,
self.distribution.batch_shape_tensor())
def _batch_shape(self):
# If there's a chance that the batch_shape has been overridden, we return
# what we statically know about the `batch_shape_override`. This works
# because: `_is_maybe_batch_override` means `static_override` is `None` or a
# non-empty list, i.e., we don't statically know the `batch_shape` or we do.
#
# Notice that this implementation parallels the `_event_shape` except that
# the `bijector` doesn't get to alter the `batch_shape`. Recall that
# `batch_shape` is a property of a distribution while `event_shape` is
# shared between both the `distribution` instance and the `bijector`.
static_override = tensor_util.constant_value_as_shape(
self._override_batch_shape)
return (static_override
if self._is_maybe_batch_override
else self.distribution.batch_shape)
def _sample_n(self, n, seed=None):
sample_shape = _concat_vectors(
distribution_util.pick_vector(self._needs_rotation, self._empty, [n]),
self._override_batch_shape,
self._override_event_shape,
distribution_util.pick_vector(self._needs_rotation, [n], self._empty))
x = self.distribution.sample(sample_shape=sample_shape, seed=seed)
x = self._maybe_rotate_dims(x)
# We'll apply the bijector in the `_call_sample_n` function.
return x
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
# We override `_call_sample_n` rather than `_sample_n` so we can ensure that
# the result of `self.bijector.forward` is not modified (and thus caching
# works).
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
# First, generate samples. We will possibly generate extra samples in the
# event that we need to reinterpret the samples as part of the
# event_shape.
x = self._sample_n(n, seed, **kwargs)
# Next, we reshape `x` into its final form. We do this prior to the call
# to the bijector to ensure that the bijector caching works.
batch_event_shape = array_ops.shape(x)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
x = array_ops.reshape(x, final_shape)
# Finally, we apply the bijector's forward transformation. For caching to
# work, it is imperative that this is the last modification to the
# returned result.
y = self.bijector.forward(x, **kwargs)
y = self._set_sample_static_shape(y, sample_shape)
return y
def _log_prob(self, y):
# For caching to work, it is imperative that the bijector is the first to
# modify the input.
x = self.bijector.inverse(y)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_log_prob_for_one_fiber(y, x, ildj, event_ndims)
lp_on_fibers = [
self._finish_log_prob_for_one_fiber(y, x_i, ildj_i, event_ndims)
for x_i, ildj_i in zip(x, ildj)]
return math_ops.reduce_logsumexp(array_ops.stack(lp_on_fibers), axis=0)
def _finish_log_prob_for_one_fiber(self, y, x, ildj, event_ndims):
"""Finish computation of log_prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
log_prob = self.distribution.log_prob(x)
if self._is_maybe_event_override:
log_prob = math_ops.reduce_sum(log_prob, self._reduce_event_indices)
log_prob += math_ops.cast(ildj, log_prob.dtype)
if self._is_maybe_event_override and isinstance(event_ndims, int):
log_prob.set_shape(
array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-event_ndims],
self.batch_shape))
return log_prob
def _prob(self, y):
x = self.bijector.inverse(y)
event_ndims = self._maybe_get_static_event_ndims()
ildj = self.bijector.inverse_log_det_jacobian(y, event_ndims=event_ndims)
if self.bijector._is_injective: # pylint: disable=protected-access
return self._finish_prob_for_one_fiber(y, x, ildj, event_ndims)
prob_on_fibers = [
self._finish_prob_for_one_fiber(y, x_i, ildj_i, event_ndims)
for x_i, ildj_i in zip(x, ildj)]
return sum(prob_on_fibers)
def _finish_prob_for_one_fiber(self, y, x, ildj, event_ndims):
"""Finish computation of prob on one element of the inverse image."""
x = self._maybe_rotate_dims(x, rotate_right=True)
prob = self.distribution.prob(x)
if self._is_maybe_event_override:
prob = math_ops.reduce_prod(prob, self._reduce_event_indices)
prob *= math_ops.exp(math_ops.cast(ildj, prob.dtype))
if self._is_maybe_event_override and isinstance(event_ndims, int):
prob.set_shape(
array_ops.broadcast_static_shape(
y.get_shape().with_rank_at_least(1)[:-event_ndims],
self.batch_shape))
return prob
def _log_cdf(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("log_cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_cdf is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.log_cdf(x)
def _cdf(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("cdf is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("cdf is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.cdf(x)
def _log_survival_function(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("log_survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("log_survival_function is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.log_survival_function(x)
def _survival_function(self, y):
if self._is_maybe_event_override:
raise NotImplementedError("survival_function is not implemented when "
"overriding event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("survival_function is not implemented when "
"bijector is not injective.")
x = self.bijector.inverse(y)
return self.distribution.survival_function(x)
def _quantile(self, value):
if self._is_maybe_event_override:
raise NotImplementedError("quantile is not implemented when overriding "
"event_shape")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("quantile is not implemented when "
"bijector is not injective.")
# x_q is the "qth quantile" of X iff q = P[X <= x_q]. Now, since X =
# g^{-1}(Y), q = P[X <= x_q] = P[g^{-1}(Y) <= x_q] = P[Y <= g(x_q)],
# implies the qth quantile of Y is g(x_q).
inv_cdf = self.distribution.quantile(value)
return self.bijector.forward(inv_cdf)
def _entropy(self):
if not self.bijector.is_constant_jacobian:
raise NotImplementedError("entropy is not implemented")
if not self.bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError("entropy is not implemented when "
"bijector is not injective.")
# Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
# can be shown that:
# H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
# If is_constant_jacobian then:
# E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
# where c can by anything.
entropy = self.distribution.entropy()
if self._is_maybe_event_override:
# H[X] = sum_i H[X_i] if X_i are mutually independent.
# This means that a reduce_sum is a simple rescaling.
entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
dtype=entropy.dtype.base_dtype)
if self._is_maybe_batch_override:
new_shape = array_ops.concat([
_ones_like(self._override_batch_shape),
self.distribution.batch_shape_tensor()
], 0)
entropy = array_ops.reshape(entropy, new_shape)
multiples = array_ops.concat([
self._override_batch_shape,
_ones_like(self.distribution.batch_shape_tensor())
], 0)
entropy = array_ops.tile(entropy, multiples)
dummy = array_ops.zeros(
shape=array_ops.concat(
[self.batch_shape_tensor(), self.event_shape_tensor()],
0),
dtype=self.dtype)
event_ndims = (self.event_shape.ndims if self.event_shape.ndims is not None
else array_ops.size(self.event_shape_tensor()))
ildj = self.bijector.inverse_log_det_jacobian(
dummy, event_ndims=event_ndims)
entropy -= math_ops.cast(ildj, entropy.dtype)
entropy.set_shape(self.batch_shape)
return entropy
def _maybe_validate_shape_override(self, override_shape, base_is_scalar,
validate_args, name):
"""Helper to __init__ which ensures override batch/event_shape are valid."""
if override_shape is None:
override_shape = []
override_shape = ops.convert_to_tensor(override_shape, dtype=dtypes.int32,
name=name)
if not override_shape.dtype.is_integer:
raise TypeError("shape override must be an integer")
override_is_scalar = _is_scalar_from_shape(override_shape)
if tensor_util.constant_value(override_is_scalar):
return self._empty
dynamic_assertions = []
if override_shape.get_shape().ndims is not None:
if override_shape.get_shape().ndims != 1:
raise ValueError("shape override must be a vector")
elif validate_args:
dynamic_assertions += [check_ops.assert_rank(
override_shape, 1,
message="shape override must be a vector")]
if tensor_util.constant_value(override_shape) is not None:
if any(s <= 0 for s in tensor_util.constant_value(override_shape)):
raise ValueError("shape override must have positive elements")
elif validate_args:
dynamic_assertions += [check_ops.assert_positive(
override_shape,
message="shape override must have positive elements")]
is_both_nonscalar = _logical_and(_logical_not(base_is_scalar),
_logical_not(override_is_scalar))
if tensor_util.constant_value(is_both_nonscalar) is not None:
if tensor_util.constant_value(is_both_nonscalar):
raise ValueError("base distribution not scalar")
elif validate_args:
dynamic_assertions += [check_ops.assert_equal(
is_both_nonscalar, False,
message="base distribution not scalar")]
if not dynamic_assertions:
return override_shape
return control_flow_ops.with_dependencies(
dynamic_assertions, override_shape)
def _maybe_rotate_dims(self, x, rotate_right=False):
"""Helper which rolls left event_dims left or right event_dims right."""
needs_rotation_const = tensor_util.constant_value(self._needs_rotation)
if needs_rotation_const is not None and not needs_rotation_const:
return x
ndims = array_ops.rank(x)
n = (ndims - self._rotate_ndims) if rotate_right else self._rotate_ndims
return array_ops.transpose(
x, _concat_vectors(math_ops.range(n, ndims), math_ops.range(0, n)))
def _maybe_get_static_event_ndims(self):
if self.event_shape.ndims is not None:
return self.event_shape.ndims
event_ndims = array_ops.size(self.event_shape_tensor())
event_ndims_ = distribution_util.maybe_get_static_value(event_ndims)
if event_ndims_ is not None:
return event_ndims_
return event_ndims
|
jbedorf/tensorflow
|
tensorflow/python/ops/distributions/transformed_distribution.py
|
Python
|
apache-2.0
| 27,637
|
from pyjamas.ui.Sink import Sink, SinkInfo
from pyjamas.ui.Frame import Frame
class Frames(Sink):
def __init__(self):
Sink.__init__(self)
self.frame=Frame(self.baseURL() + "rembrandt/LaMarcheNocturne.html")
self.frame.setWidth("100%")
self.frame.setHeight("48em")
self.initWidget(self.frame)
def init():
text="If you need to include multiple pages of good ol' static HTML, it's easy to do using the <code>Frame</code> class."
return SinkInfo("Frames", text, Frames)
|
spaceone/pyjs
|
examples/kitchensink/sink/Frames.py
|
Python
|
apache-2.0
| 522
|
# $Id: __init__.py 6141 2009-09-25 18:50:30Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This is ``docutils.parsers.rst`` package. It exports a single class, `Parser`,
the reStructuredText parser.
Usage
=====
1. Create a parser::
parser = docutils.parsers.rst.Parser()
Several optional arguments may be passed to modify the parser's behavior.
Please see `Customizing the Parser`_ below for details.
2. Gather input (a multi-line string), by reading a file or the standard
input::
input = sys.stdin.read()
3. Create a new empty `docutils.nodes.document` tree::
document = docutils.utils.new_document(source, settings)
See `docutils.utils.new_document()` for parameter details.
4. Run the parser, populating the document tree::
parser.parse(input, document)
Parser Overview
===============
The reStructuredText parser is implemented as a state machine, examining its
input one line at a time. To understand how the parser works, please first
become familiar with the `docutils.statemachine` module, then see the
`states` module.
Customizing the Parser
----------------------
Anything that isn't already customizable is that way simply because that type
of customizability hasn't been implemented yet. Patches welcome!
When instantiating an object of the `Parser` class, two parameters may be
passed: ``rfc2822`` and ``inliner``. Pass ``rfc2822=1`` to enable an initial
RFC-2822 style header block, parsed as a "field_list" element (with "class"
attribute set to "rfc2822"). Currently this is the only body-level element
which is customizable without subclassing. (Tip: subclass `Parser` and change
its "state_classes" and "initial_state" attributes to refer to new classes.
Contact the author if you need more details.)
The ``inliner`` parameter takes an instance of `states.Inliner` or a subclass.
It handles inline markup recognition. A common extension is the addition of
further implicit hyperlinks, like "RFC 2822". This can be done by subclassing
`states.Inliner`, adding a new method for the implicit markup, and adding a
``(pattern, method)`` pair to the "implicit_dispatch" attribute of the
subclass. See `states.Inliner.implicit_inline()` for details. Explicit
inline markup can be customized in a `states.Inliner` subclass via the
``patterns.initial`` and ``dispatch`` attributes (and new methods as
appropriate).
"""
__docformat__ = 'reStructuredText'
import docutils.parsers
import docutils.statemachine
from docutils.parsers.rst import states
from docutils import frontend, nodes
class Parser(docutils.parsers.Parser):
"""The reStructuredText parser."""
supported = ('restructuredtext', 'rst', 'rest', 'restx', 'rtxt', 'rstx')
"""Aliases this parser supports."""
settings_spec = (
'reStructuredText Parser Options',
None,
(('Recognize and link to standalone PEP references (like "PEP 258").',
['--pep-references'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Base URL for PEP references '
'(default "http://www.python.org/dev/peps/").',
['--pep-base-url'],
{'metavar': '<URL>', 'default': 'http://www.python.org/dev/peps/',
'validator': frontend.validate_url_trailing_slash}),
('Template for PEP file part of URL. (default "pep-%04d")',
['--pep-file-url-template'],
{'metavar': '<URL>', 'default': 'pep-%04d'}),
('Recognize and link to standalone RFC references (like "RFC 822").',
['--rfc-references'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Base URL for RFC references (default "http://www.faqs.org/rfcs/").',
['--rfc-base-url'],
{'metavar': '<URL>', 'default': 'http://www.faqs.org/rfcs/',
'validator': frontend.validate_url_trailing_slash}),
('Set number of spaces for tab expansion (default 8).',
['--tab-width'],
{'metavar': '<width>', 'type': 'int', 'default': 8,
'validator': frontend.validate_nonnegative_int}),
('Remove spaces before footnote references.',
['--trim-footnote-reference-space'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Leave spaces before footnote references.',
['--leave-footnote-reference-space'],
{'action': 'store_false', 'dest': 'trim_footnote_reference_space'}),
('Disable directives that insert the contents of external file '
'("include" & "raw"); replaced with a "warning" system message.',
['--no-file-insertion'],
{'action': 'store_false', 'default': 1,
'dest': 'file_insertion_enabled',
'validator': frontend.validate_boolean}),
('Enable directives that insert the contents of external file '
'("include" & "raw"). Enabled by default.',
['--file-insertion-enabled'],
{'action': 'store_true'}),
('Disable the "raw" directives; replaced with a "warning" '
'system message.',
['--no-raw'],
{'action': 'store_false', 'default': 1, 'dest': 'raw_enabled',
'validator': frontend.validate_boolean}),
('Enable the "raw" directive. Enabled by default.',
['--raw-enabled'],
{'action': 'store_true'}),))
config_section = 'restructuredtext parser'
config_section_dependencies = ('parsers',)
def __init__(self, rfc2822=None, inliner=None):
if rfc2822:
self.initial_state = 'RFC2822Body'
else:
self.initial_state = 'Body'
self.state_classes = states.state_classes
self.inliner = inliner
def parse(self, inputstring, document):
"""Parse `inputstring` and populate `document`, a document tree."""
self.setup_parse(inputstring, document)
self.statemachine = states.RSTStateMachine(
state_classes=self.state_classes,
initial_state=self.initial_state,
debug=document.reporter.debug_flag)
inputlines = docutils.statemachine.string2lines(
inputstring, tab_width=document.settings.tab_width,
convert_whitespace=1)
self.statemachine.run(inputlines, document, inliner=self.inliner)
self.finish_parse()
class DirectiveError(Exception):
"""
Store a message and a system message level.
To be thrown from inside directive code.
Do not instantiate directly -- use `Directive.directive_error()`
instead!
"""
def __init__(self, level, message, source, line):
"""
Initialize with message `message`. `level` is a system message level.
"""
Exception.__init__(self)
self.level = level
self.msg = message
self.source = source
self.line = line
class Directive(object):
"""
Base class for reStructuredText directives.
The following attributes may be set by subclasses. They are
interpreted by the directive parser (which runs the directive
class):
- `required_arguments`: The number of required arguments (default:
0).
- `optional_arguments`: The number of optional arguments (default:
0).
- `final_argument_whitespace`: A boolean, indicating if the final
argument may contain whitespace (default: False).
- `option_spec`: A dictionary, mapping known option names to
conversion functions such as `int` or `float` (default: {}, no
options). Several conversion functions are defined in the
directives/__init__.py module.
Option conversion functions take a single parameter, the option
argument (a string or ``None``), validate it and/or convert it
to the appropriate form. Conversion functions may raise
`ValueError` and `TypeError` exceptions.
- `has_content`: A boolean; True if content is allowed. Client
code must handle the case where content is required but not
supplied (an empty content list will be supplied).
Arguments are normally single whitespace-separated words. The
final argument may contain whitespace and/or newlines if
`final_argument_whitespace` is True.
If the form of the arguments is more complex, specify only one
argument (either required or optional) and set
`final_argument_whitespace` to True; the client code must do any
context-sensitive parsing.
When a directive implementation is being run, the directive class
is instantiated, and the `run()` method is executed. During
instantiation, the following instance variables are set:
- ``name`` is the directive type or name (string).
- ``arguments`` is the list of positional arguments (strings).
- ``options`` is a dictionary mapping option names (strings) to
values (type depends on option conversion functions; see
`option_spec` above).
- ``content`` is a list of strings, the directive content line by line.
- ``lineno`` is the line number of the first line of the directive.
- ``content_offset`` is the line offset of the first line of the content from
the beginning of the current input. Used when initiating a nested parse.
- ``block_text`` is a string containing the entire directive.
- ``state`` is the state which called the directive function.
- ``state_machine`` is the state machine which controls the state which called
the directive function.
Directive functions return a list of nodes which will be inserted
into the document tree at the point where the directive was
encountered. This can be an empty list if there is nothing to
insert.
For ordinary directives, the list must contain body elements or
structural elements. Some directives are intended specifically
for substitution definitions, and must return a list of `Text`
nodes and/or inline elements (suitable for inline insertion, in
place of the substitution reference). Such directives must verify
substitution definition context, typically using code like this::
if not isinstance(state, states.SubstitutionDef):
error = state_machine.reporter.error(
'Invalid context: the "%s" directive can only be used '
'within a substitution definition.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
"""
# There is a "Creating reStructuredText Directives" how-to at
# <http://docutils.sf.net/docs/howto/rst-directives.html>. If you
# update this docstring, please update the how-to as well.
required_arguments = 0
"""Number of required directive arguments."""
optional_arguments = 0
"""Number of optional arguments after the required arguments."""
final_argument_whitespace = False
"""May the final argument contain whitespace?"""
option_spec = None
"""Mapping of option names to validator functions."""
has_content = False
"""May the directive have content?"""
def __init__(self, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
self.name = name
self.arguments = arguments
self.options = options
self.content = content
self.lineno = lineno
self.content_offset = content_offset
self.block_text = block_text
self.state = state
self.state_machine = state_machine
def run(self):
raise NotImplementedError('Must override run() is subclass.')
# Directive errors:
def directive_error(self, level, message):
"""
Return a DirectiveError suitable for being thrown as an exception.
Call "raise self.directive_error(level, message)" from within
a directive implementation to return one single system message
at level `level`, which automatically gets the directive block
and the line number added.
You'd often use self.error(message) instead, which will
generate an ERROR-level directive error.
"""
# source = self.state_machine.get_source(self.lineno - 1)
try:
(source, line) = self.state_machine.input_lines.info(self.lineno)
except IndexError:
source = self.state_machine.get_source(self.lineno - 1)
line = self.lineno
return DirectiveError(level, message, source, line)
def debug(self, message):
return self.directive_error(0, message)
def info(self, message):
return self.directive_error(1, message)
def warning(self, message):
return self.directive_error(2, message)
def error(self, message):
return self.directive_error(3, message)
def severe(self, message):
return self.directive_error(4, message)
# Convenience methods:
def assert_has_content(self):
"""
Throw an ERROR-level DirectiveError if the directive doesn't
have contents.
"""
if not self.content:
raise self.error('Content block expected for the "%s" directive; '
'none found.' % self.name)
def convert_directive_function(directive_fn):
"""
Define & return a directive class generated from `directive_fn`.
`directive_fn` uses the old-style, functional interface.
"""
class FunctionalDirective(Directive):
option_spec = getattr(directive_fn, 'options', None)
has_content = getattr(directive_fn, 'content', False)
_argument_spec = getattr(directive_fn, 'arguments', (0, 0, False))
required_arguments, optional_arguments, final_argument_whitespace \
= _argument_spec
def run(self):
return directive_fn(
self.name, self.arguments, self.options, self.content,
self.lineno, self.content_offset, self.block_text,
self.state, self.state_machine)
# Return new-style directive.
return FunctionalDirective
|
edisonlz/fruit
|
web_project/base/site-packages/docutils/parsers/rst/__init__.py
|
Python
|
apache-2.0
| 14,192
|
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Wrappers to get actually replaceable DBAPI2 compliant modules and
database connection whatever the database and client lib used.
Currently support:
- postgresql (pgdb, psycopg, psycopg2, pyPgSQL)
- mysql (MySQLdb)
- sqlite (pysqlite2, sqlite, sqlite3)
just use the `get_connection` function from this module to get a
wrapped connection. If multiple drivers for a database are available,
you can control which one you want to use using the
`set_prefered_driver` function.
Additional helpers are also provided for advanced functionalities such
as listing existing users or databases, creating database... Get the
helper for your database using the `get_adv_func_helper` function.
"""
__docformat__ = "restructuredtext en"
from warnings import warn
warn('this module is deprecated, use logilab.database instead',
DeprecationWarning, stacklevel=1)
from logilab.database import (get_connection, set_prefered_driver,
get_dbapi_compliant_module as _gdcm,
get_db_helper as _gdh)
def get_dbapi_compliant_module(driver, *args, **kwargs):
module = _gdcm(driver, *args, **kwargs)
module.adv_func_helper = _gdh(driver)
return module
|
dbbhattacharya/kitsune
|
vendor/packages/logilab-common/db.py
|
Python
|
bsd-3-clause
| 2,037
|
from __future__ import division, absolute_import
import numpy as np
from .. import EulerDeconv, EulerDeconvEW, EulerDeconvMW, sphere
from ...mesher import Sphere
from ... import utils, gridder
model = None
xp, yp, zp = None, None, None
inc, dec = None, None
struct_ind = None
base = None
pos = None
field, dx, dy, dz = None, None, None, None
precision = 0.01
def setup():
global model, x, y, z, inc, dec, struct_ind, field, dx, dy, dz, base, pos
inc, dec = -30, 50
pos = np.array([1000, 1200, 200])
model = Sphere(pos[0], pos[1], pos[2], 1,
{'magnetization': utils.ang2vec(10000, inc, dec)})
struct_ind = 3
shape = (200, 200)
x, y, z = gridder.regular((0, 3000, 0, 3000), shape, z=-100)
base = 10
field = sphere.tf(x, y, z, [model], inc, dec) + base
# Use finite difference derivatives so that these tests don't depend on the
# performance of the FFT derivatives.
dx = (sphere.tf(x + 1, y, z, [model], inc, dec) -
sphere.tf(x - 1, y, z, [model], inc, dec))/2
dy = (sphere.tf(x, y + 1, z, [model], inc, dec) -
sphere.tf(x, y - 1, z, [model], inc, dec))/2
dz = (sphere.tf(x, y, z + 1, [model], inc, dec) -
sphere.tf(x, y, z - 1, [model], inc, dec))/2
def test_euler_sphere_mag():
"gravmag.EulerDeconv estimates center for sphere model and magnetic data"
euler = EulerDeconv(x, y, z, field, dx, dy, dz, struct_ind).fit()
assert (base - euler.baselevel_) / base <= precision, \
'baselevel: %g estimated: %g' % (base, euler.baselevel_)
assert np.all((pos - euler.estimate_) / pos <= precision), \
'position: %s estimated: %s' % (str(pos), str(euler.estimate_))
# Check if the R^2 metric (how good the fit is) is reasonably high
# (best score is 1)
data = -x*dx - y*dy - z*dz - struct_ind*field
pred = euler.predicted()
u = ((data - pred)**2).sum()
v = ((data - data.mean())**2).sum()
R2 = 1 - u/v
assert R2 >= 0.999, "R^2 too low: {}".format(R2)
def test_euler_expandingwindow_sphere_mag():
"gravmag.EulerDeconvEW estimates center for sphere model and magnetic data"
euler = EulerDeconvEW(x, y, z, field, dx, dy, dz, struct_ind,
center=[1000, 1000],
sizes=np.linspace(100, 2000, 20))
euler.fit()
assert (base - euler.baselevel_) / base <= precision, \
'baselevel: %g estimated: %g' % (base, euler.baselevel_)
assert np.all((pos - euler.estimate_) / pos <= precision), \
'position: %s estimated: %s' % (str(pos), str(euler.estimate_))
def test_euler_movingwindow_sphere_mag():
"gravmag.EulerDeconvMW estimates center for sphere model and magnetic data"
euler = EulerDeconvMW(x, y, z, field, dx, dy, dz, struct_ind,
windows=[10, 10], size=(1000, 1000), keep=0.2)
euler.fit()
for b in euler.baselevel_:
assert (base - b) / base <= precision, \
'baselevel: %g estimated: %g' % (base, b)
for c in euler.estimate_:
assert np.all((pos - c) / pos <= precision), \
'position: %s estimated: %s' % (str(pos), str(c))
|
rafaelmds/fatiando
|
fatiando/gravmag/tests/test_euler.py
|
Python
|
bsd-3-clause
| 3,153
|
# -*- coding: utf-8 -*-
"""
amqp.five
~~~~~~~~~~~
Compatibility implementations of features
only available in newer Python versions.
"""
from __future__ import absolute_import
import io
import sys
try:
from collections import Counter
except ImportError: # pragma: no cover
from collections import defaultdict
def Counter(): # noqa
return defaultdict(int)
try:
buffer_t = buffer
except NameError: # pragma: no cover
# Py3 does not have buffer, only use this for isa checks.
class buffer_t(object): # noqa
pass
bytes_t = bytes
__all__ = ['Counter', 'reload', 'UserList', 'UserDict',
'Queue', 'Empty', 'Full', 'LifoQueue', 'builtins',
'zip_longest', 'map', 'zip', 'string', 'string_t', 'bytes_t',
'long_t', 'text_t', 'int_types', 'module_name_t',
'range', 'items', 'keys', 'values', 'nextfun', 'reraise',
'WhateverIO', 'with_metaclass', 'open_fqdn', 'StringIO',
'THREAD_TIMEOUT_MAX', 'format_d', 'monotonic', 'buffer_t']
# ############# py3k ########################################################
PY3 = sys.version_info[0] == 3
try:
reload = reload # noqa
except NameError: # pragma: no cover
from imp import reload # noqa
try:
from collections import UserList # noqa
except ImportError: # pragma: no cover
from UserList import UserList # noqa
try:
from collections import UserDict # noqa
except ImportError: # pragma: no cover
from UserDict import UserDict # noqa
# ############# time.monotonic #############################################
if sys.version_info < (3, 3):
import platform
SYSTEM = platform.system()
try:
import ctypes
except ImportError: # pragma: no cover
ctypes = None # noqa
if SYSTEM == 'Darwin' and ctypes is not None:
from ctypes.util import find_library
libSystem = ctypes.CDLL(find_library('libSystem.dylib'))
CoreServices = ctypes.CDLL(find_library('CoreServices'),
use_errno=True)
mach_absolute_time = libSystem.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds
absolute_to_nanoseconds.restype = ctypes.c_uint64
absolute_to_nanoseconds.argtypes = [ctypes.c_uint64]
def _monotonic():
return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9
elif SYSTEM == 'Linux' and ctypes is not None:
# from stackoverflow:
# questions/1205722/how-do-i-get-monotonic-time-durations-in-python
import os
CLOCK_MONOTONIC = 1 # see <linux/time.h>
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long),
]
librt = ctypes.CDLL('librt.so.1', use_errno=True)
clock_gettime = librt.clock_gettime
clock_gettime.argtypes = [
ctypes.c_int, ctypes.POINTER(timespec),
]
def _monotonic(): # noqa
t = timespec()
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0:
errno_ = ctypes.get_errno()
raise OSError(errno_, os.strerror(errno_))
return t.tv_sec + t.tv_nsec * 1e-9
else:
from time import time as _monotonic
try:
from time import monotonic
except ImportError:
monotonic = _monotonic # noqa
# ############# Py3 <-> Py2 #################################################
if PY3: # pragma: no cover
import builtins
from itertools import zip_longest
map = map
zip = zip
string = str
string_t = str
long_t = int
text_t = str
range = range
int_types = (int,)
module_name_t = str
open_fqdn = 'builtins.open'
def items(d):
return d.items()
def keys(d):
return d.keys()
def values(d):
return d.values()
def nextfun(it):
return it.__next__
exec_ = getattr(builtins, 'exec')
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
import __builtin__ as builtins # noqa
from itertools import ( # noqa
imap as map,
izip as zip,
izip_longest as zip_longest,
)
string = unicode # noqa
string_t = basestring # noqa
text_t = unicode
long_t = long # noqa
range = xrange
module_name_t = str
int_types = (int, long)
open_fqdn = '__builtin__.open'
def items(d): # noqa
return d.iteritems()
def keys(d): # noqa
return d.iterkeys()
def values(d): # noqa
return d.itervalues()
def nextfun(it): # noqa
return it.next
def exec_(code, globs=None, locs=None): # pragma: no cover
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""")
def with_metaclass(Type, skip_attrs=set(('__dict__', '__weakref__'))):
"""Class decorator to set metaclass.
Works with both Python 2 and Python 3 and it does not add
an extra class in the lookup order like ``six.with_metaclass`` does
(that is -- it copies the original class instead of using inheritance).
"""
def _clone_with_metaclass(Class):
attrs = dict((key, value) for key, value in items(vars(Class))
if key not in skip_attrs)
return Type(Class.__name__, Class.__bases__, attrs)
return _clone_with_metaclass
# ############# threading.TIMEOUT_MAX ########################################
try:
from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX
except ImportError:
THREAD_TIMEOUT_MAX = 1e10 # noqa
# ############# format(int, ',d') ############################################
if sys.version_info >= (2, 7): # pragma: no cover
def format_d(i):
return format(i, ',d')
else: # pragma: no cover
def format_d(i): # noqa
s = '%d' % i
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
StringIO = io.StringIO
_SIO_write = StringIO.write
_SIO_init = StringIO.__init__
class WhateverIO(StringIO):
def __init__(self, v=None, *a, **kw):
_SIO_init(self, v.decode() if isinstance(v, bytes) else v, *a, **kw)
def write(self, data):
_SIO_write(self, data.decode() if isinstance(data, bytes) else data)
|
kalefranz/auxlib
|
auxlib/_vendor/five.py
|
Python
|
isc
| 7,109
|
# -*- coding: utf-8 -*-
from __future__ import division
from qsrlib_qsrs.qsr_qtc_simplified_abstractclass import QSR_QTC_Simplified_Abstractclass
import numpy as np
from qsrlib_io.world_qsr_trace import *
class QSR_QTC_BC_Simplified(QSR_QTC_Simplified_Abstractclass):
"""QTCBC simplified relations.
Values of the abstract properties
* **_unique_id** = "qtcbcs"
* **_all_possible_relations** = ?
* **_dtype** = "points"
Some explanation about the QSR or better link to a separate webpage explaining it. Maybe a reference if it exists.
"""
def __init__(self):
"""Constructor."""
super(QSR_QTC_BC_Simplified, self).__init__()
self._unique_id = "qtcbcs"
"""str: Unique identifier name of the QSR."""
self.qtc_type = "bc"
"""str: QTC specific type."""
self._all_possible_relations = tuple(self.return_all_possible_state_combinations()[0])
"""tuple: All possible relations of the QSR."""
def make_world_qsr_trace(self, world_trace, timestamps, qsr_params, req_params, **kwargs):
"""Compute the world QSR trace from the arguments.
:param world_trace: Input data.
:type world_trace: :class:`World_Trace <qsrlib_io.world_trace.World_Trace>`
:param timestamps: List of sorted timestamps of `world_trace`.
:type timestamps: list
:param qsr_params: QSR specific parameters passed in `dynamic_args`.
:type qsr_params: dict
:param req_params: Dynamic arguments passed with the request.
:type dynamic_args: dict
:param kwargs: kwargs arguments.
:return: Computed world QSR trace.
:rtype: :class:`World_QSR_Trace <qsrlib_io.world_qsr_trace.World_QSR_Trace>`
"""
ret = World_QSR_Trace(qsr_type=self._unique_id)
qtc_sequence = {}
for t, tp in zip(timestamps[1:], timestamps):
world_state_now = world_trace.trace[t]
world_state_previous = world_trace.trace[tp]
if set(world_state_now.objects.keys()) != set(world_state_previous.objects.keys()):
ret.put_empty_world_qsr_state(t)
continue # Objects have to be present in both timestamps
qsrs_for = self._process_qsrs_for(world_state_now.objects.keys(), req_params["dynamic_args"])
for o1_name, o2_name in qsrs_for:
between = str(o1_name) + "," + str(o2_name)
qtc = np.array([], dtype=int)
k = [world_state_previous.objects[o1_name].x,
world_state_previous.objects[o1_name].y,
world_state_now.objects[o1_name].x,
world_state_now.objects[o1_name].y]
l = [world_state_previous.objects[o2_name].x,
world_state_previous.objects[o2_name].y,
world_state_now.objects[o2_name].x,
world_state_now.objects[o2_name].y]
qtc = self._create_qtc_representation(
k,
l,
qsr_params["quantisation_factor"]
)
distance = self._get_euclidean_distance(
(world_state_now.objects[o1_name].x,
world_state_now.objects[o1_name].y),
(world_state_now.objects[o2_name].x,
world_state_now.objects[o2_name].y)
)
try:
qtc_sequence[between]["qtc"] = np.append(
qtc_sequence[between]["qtc"],
qtc
).reshape(-1,4)
qtc_sequence[between]["distances"] = np.append(
qtc_sequence[between]["distances"],
distance
)
except KeyError:
qtc_sequence[between] = {
"qtc": qtc,
"distances": np.array([distance])
}
for between, qtcbc in qtc_sequence.items():
qtcbc["qtc"] = self._create_bc_chain(qtcbc["qtc"], qtcbc["distances"], qsr_params["distance_threshold"])
if not qsr_params["no_collapse"]:
qtcbc["qtc"] = self._collapse_similar_states(qtcbc["qtc"])
if qsr_params["validate"]:
qtcbc["qtc"] = self._validate_qtc_sequence(qtcbc["qtc"])
for idx, q in enumerate(qtcbc["qtc"]):
qsr = QSR(
timestamp=idx+1,
between=between,
qsr=self.qtc_to_output_format(q)
)
ret.add_qsr(qsr, idx+1)
return ret
def _create_bc_chain(self, qtc, distances, distance_threshold):
"""
:param qtc:
:type qtc:
:param distances:
:type distances:
:param distance_threshold:
:type distance_threshold:
:return:
:rtype:
"""
ret = np.array([])
if len(qtc.shape) == 1:
qtc = [qtc]
for dist, state in zip(distances, qtc):
if dist > distance_threshold:
ret = np.append(ret, np.append(state[0:2],[np.nan,np.nan]), axis=0)
else:
ret = np.append(ret, state, axis=0)
return ret.reshape(-1,4)
def qtc_to_output_format(self, qtc):
"""Overwrite this for the different QTC variants to select only the parts from the QTCCS tuple that you would
like to return. Example for QTCBS: return `qtc[0:2]`.
:param qtc: Full QTCC tuple [q1,q2,q4,q5].
:type qtc: list or tuple
:return: {"qtcbcs": "q1,q2,q4,q5"}
:rtype: dict
"""
s = self.create_qtc_string(qtc) if not np.isnan(qtc[2]) else self.create_qtc_string(qtc[0:2])
return self._format_qsr(s)
def _get_euclidean_distance(self, p, q):
"""Calculate the Euclidean distance between points `p` and `q`.
:param p: x,y coordinates.
:type p: tuple
:param q: x,y coordinates.
:type q: tuple
:return: Euclidean distance between `p` and `q`.
:rtype: float
"""
return np.sqrt(np.power((float(p[0])-float(q[0])),2)+np.power((float(p[1])-float(q[1])),2))
|
pet1330/strands_qsr_lib
|
qsr_lib/src/qsrlib_qsrs/qsr_qtc_bc_simplified.py
|
Python
|
mit
| 6,319
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Romain Command&
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Tests for factory_boy/MongoEngine interactions."""
import factory
import os
from .compat import unittest
try:
import mongoengine
except ImportError:
mongoengine = None
if os.environ.get('SKIP_MONGOENGINE') == '1':
mongoengine = None
if mongoengine:
from factory.mongoengine import MongoEngineFactory
class Address(mongoengine.EmbeddedDocument):
street = mongoengine.StringField()
class Person(mongoengine.Document):
name = mongoengine.StringField()
address = mongoengine.EmbeddedDocumentField(Address)
class AddressFactory(MongoEngineFactory):
class Meta:
model = Address
street = factory.Sequence(lambda n: 'street%d' % n)
class PersonFactory(MongoEngineFactory):
class Meta:
model = Person
name = factory.Sequence(lambda n: 'name%d' % n)
address = factory.SubFactory(AddressFactory)
@unittest.skipIf(mongoengine is None, "mongoengine not installed.")
class MongoEngineTestCase(unittest.TestCase):
db_name = os.environ.get('MONGO_DATABASE', 'factory_boy_test')
db_host = os.environ.get('MONGO_HOST', 'localhost')
db_port = int(os.environ.get('MONGO_PORT', '27017'))
server_timeout_ms = int(os.environ.get('MONGO_TIMEOUT', '300'))
@classmethod
def setUpClass(cls):
from pymongo import read_preferences as mongo_rp
cls.db = mongoengine.connect(
db=cls.db_name,
host=cls.db_host,
port=cls.db_port,
# PyMongo>=2.1 requires an explicit read_preference.
read_preference=mongo_rp.ReadPreference.PRIMARY,
# PyMongo>=2.1 has a 20s timeout, use 100ms instead
serverselectiontimeoutms=cls.server_timeout_ms,
)
@classmethod
def tearDownClass(cls):
cls.db.drop_database(cls.db_name)
def setUp(self):
mongoengine.connect('factory_boy_test')
def test_build(self):
std = PersonFactory.build()
self.assertEqual('name0', std.name)
self.assertEqual('street0', std.address.street)
self.assertIsNone(std.id)
def test_creation(self):
std1 = PersonFactory.create()
self.assertEqual('name1', std1.name)
self.assertEqual('street1', std1.address.street)
self.assertIsNotNone(std1.id)
|
muhammad-ammar/factory_boy
|
tests/test_mongoengine.py
|
Python
|
mit
| 3,452
|
#!/usr/bin/env python
# ******************************************************
# Copyright 2004: Commonwealth of Australia.
#
# Developed by the Computer Network Vulnerability Team,
# Information Security Group.
# Department of Defence.
#
# Michael Cohen <scudette@users.sourceforge.net>
#
# ******************************************************
# Version: FLAG $Version: 0.87-pre1 Date: Thu Jun 12 00:48:38 EST 2008$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# ******************************************************
""" This is an implementation of an ASCII Text UI suitable for producing simple automated reports.
"""
import re, types, textwrap, csv, sys
import pyflag.FlagFramework as FlagFramework
import pyflag.DB as DB
import pyflag.conf
import pyflag.UI as UI
config=pyflag.conf.ConfObject()
import pyflag.Registry as Registry
import cStringIO
class TextObject:
generator = None
class TEXTUI(UI.GenericUI):
""" A simple text UI """
def __init__(self, default= None, query=None):
self.result = ""
self.text_var = ''
self.current_table=None
self.generator = TextObject()
if query:
self.defaults=query
if default:
self.defaults = default.defaults
def display(self):
if self.current_table:
self.end_table()
return self.result
def __str__(self):
return self.result
def heading(self,string):
self.result+=string+"\r\n"+ "-" * len(string) + "\r\n\r\n"
def pre(self,string):
self.result+=string
def start_table(self,**options):
if self.current_table==None:
self.current_table_size=[0,0]
self.current_table=[]
def table(self,sql="select ",columns=[],names=[],links=[],table='',where='',groupby = None,case=None,callbacks={},**opts):
names=list(names)
## Establish the sorting order
try:
self.sort=[list(names).index(self.defaults['order']),'order']
except KeyError:
try:
self.sort=[self.defaults['dorder'],'dorder']
except KeyError:
self.sort=[0,'order']
self.filter_conditions=[]
self.filter_text=[]
try:
if not groupby:
groupby=self.defaults['group_by']
except KeyError:
groupby=None
# Get a new SQL generator for building the table with.
generator,new_query,names,columns,links = self._make_sql(sql=sql,columns=columns,names=names,links=links,table=table,where=where,groupby = groupby,case=case,callbacks=callbacks, query=self.defaults)
output = cStringIO.StringIO()
writer=None
for row in generator:
if not writer:
## Print the headers in a comment field:
output.write("#%s\r\n" % ','.join(row.keys()))
writer=csv.DictWriter(output, row.keys())
writer.writerow(row)
output.seek(0)
self.result+=output.read()
def text(self,*cuts,**options):
self.text_var += "".join(cuts)
try:
if options['wrap']=='full':
for line in self.text_var.splitlines(True):
new_lines = textwrap.wrap(line, config.WRAP)
for i in range(len(new_lines)):
new_line = new_lines[i]
self.result+=new_line
if len(new_line)<len(line) and i<len(new_lines)-1:
self.result += " " * (config.WRAP - len(new_line)) + "\\"
return
except KeyError:
pass
self.result+=self.text_var
self.text_var = ''
def notebook(self,names=[],context="notebook",callbacks=[],descriptions=[]):
""" This text implementation of notebook will only show the page which is currently selected """
print "%r" % self.defaults
query=self.defaults.clone()
try:
context_str=query[context]
cbfunc=callbacks[names.index(context_str)]
except (ValueError,KeyError):
cbfunc=callbacks[0]
context_str=names[0]
result=self.__class__(self)
cbfunc(query,result)
self.result += result.display()
def end_table(self):
for row_index in range(len(self.current_table)):
row=self.current_table[row_index]
temp = []
max_height = 0
for item in row:
width=0
lines = item.splitlines()
if len(lines)>max_height: max_height=len(lines)
for line in lines:
if width<len(line): width=len(line)
#fill the line out to max width:
lines = [ line + " "*(width-len(line)) for line in lines]
temp.append(lines + ["\r\n"] * (max_height - len(lines)))
for i in range(0,max_height):
try:
self.result+="".join([c[i] for c in temp ]) + "\r\n"
except IndexError:
pass
def toolbar(self,cb=None,text=None,icon=None,popup=True,tooltip=None,link=None):
pass
def row(self, *columns, **options):
if self.current_table == None:
self.start_table()
## Add an extra row on the end
self.current_table_size[0]+=1
if self.current_table_size[1]<len(columns):
self.current_table_size[1]=len(columns)
column_widgets=[]
for i in range(len(columns)):
col=columns[i]
if isinstance(col,self.__class__):
col=col.display()
column_widgets.append(col)
##Attach the column to row at the end of the table:
self.current_table.append(column_widgets)
def tree(self,tree_cb = None, pane_cb=None, branch = ('/'), layout="horizontal"):
""" A Text tree implementation """
query = self.defaults
try:
## Get the right part:
branch=FlagFramework.splitpath(query['open_tree'])
except KeyError:
branch=['']
#Start building the tree using the branch.
def draw_branch(depth,tree_array):
#We search through all the items until we find the one
#that matches the branch for this depth, then recurse into
#it.
branch_array=branch[:depth]
path = FlagFramework.joinpath(branch[:depth])
for k,v,t in tree_cb(path):
if not k: continue
if not t: continue
tree_array.append((depth,k,v,t))
try:
if k == branch[depth]:
#Recurse into the next level in the tree
draw_branch(depth+1,tree_array)
except IndexError:
pass
tree_array = []
#The first item in the tree is the first one provided in branch
if not branch[0]:
tree_array.append((0,'/','/','branch'))
else:
tree_array.append((0,branch[0],branch[0],'branch'))
#Build the tree_array
draw_branch(1,tree_array)
left = self.__class__(self)
for depth,k,v,t in tree_array:
icon = '-'
if t=="branch":
icon = '+'
left.text(" "*depth + icon + v.__str__() + "\r\n")
right = self.__class__(self)
path = FlagFramework.joinpath(branch)
pane_cb(path, right)
self.row(left, right)
def refresh(self, int, query):
pass
def link(self,string,target=FlagFramework.query_type(()),**target_options):
pass
def para(self,string,**options):
self.result += string + "\r\n\r\n"
|
backupManager/pyflag
|
src/pyflag/TEXTUI.py
|
Python
|
gpl-2.0
| 8,684
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
from sickbeard import helpers, logger
meta_session = helpers.make_session()
def getShowImage(url, imgNum=None):
if url is None:
return None
# if they provided a fanart number try to use it instead
if imgNum is not None:
tempURL = url.split('-')[0] + "-" + str(imgNum) + ".jpg"
else:
tempURL = url
logger.log("Fetching image from " + tempURL, logger.DEBUG)
image_data = helpers.getURL(tempURL, session=meta_session, returns='content')
if image_data is None:
logger.log("There was an error trying to retrieve the image, aborting", logger.WARNING)
return
return image_data
|
b0ttl3z/SickRage
|
sickbeard/metadata/helpers.py
|
Python
|
gpl-3.0
| 1,452
|
"""Extract reference documentation from the NumPy source tree.
"""
import inspect
import textwrap
import re
import pydoc
from io import StringIO
from warnings import warn
import collections
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l + 1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class NumpyDocString(object):
def __init__(self, docstring, config={}):
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
self._parse()
def __getitem__(self, key):
return self._parsed_data[key]
def __setitem__(self, key, val):
if key not in self._parsed_data:
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-' * len(l1)) or l2.startswith('=' * len(l1))
def _strip(self, doc):
i = 0
j = 0
for i, line in enumerate(doc):
if line.strip():
break
for j, line in enumerate(doc[::-1]):
if line.strip():
break
return doc[i:len(doc) - j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self, content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
params.append((arg_name, arg_type, desc))
return params
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section, content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize()
for s in section.split(' ')])
if section in ('Parameters', 'Returns', 'Raises', 'Warns',
'Other Parameters', 'Attributes', 'Methods'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name) * symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*', '\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param, param_type, desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']:
return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self['See Also']:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
return '\n'.join(out)
def indent(str, indent=4):
indent_str = ' ' * indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style * len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc)
if not self['Signature'] and func is not None:
func, func_name = self.get_func()
try:
# try to read signature
argspec = inspect.signature(func)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*', '\*')
signature = '%s%s' % (func_name, argspec)
except TypeError as e:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
signature = self['Signature'].replace('*', '\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if self._role not in roles:
print("Warning: invalid role %s" % self._role)
out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
extra_public_methods = ['__call__']
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
config={}):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
if config.get('show_class_members', True):
if not self['Methods']:
self['Methods'] = [(name, '', '')
for name in sorted(self.methods)]
if not self['Attributes']:
self['Attributes'] = [(name, '', '')
for name in sorted(self.properties)]
@property
def methods(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if ((not name.startswith('_')
or name in self.extra_public_methods)
and isinstance(func, collections.Callable))]
@property
def properties(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if not name.startswith('_') and func is None]
|
vidartf/hyperspy
|
doc/sphinxext/docscrape.py
|
Python
|
gpl-3.0
| 15,590
|
class constant():
folder_name = 'results'
MAX_HELP_POSITION = 27
CURRENT_VERSION = '0.9.1'
output = None
file_logger = None
# jitsi options
jitsi_masterpass = None
# mozilla options
manually = None
path = None
bruteforce = None
specific_path = None
mozilla_software = ''
# ie options
ie_historic = None
# total password found
nbPasswordFound = 0
passwordFound = []
|
theoneandonly-vector/LaZagne
|
Windows/src/LaZagne/config/constant.py
|
Python
|
lgpl-3.0
| 416
|