code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import WebDriverException
try:
import http.client as http_client
except ImportError:
import httplib as http_client
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .service import Service
from .remote_connection import SafariRemoteConnection
class WebDriver(RemoteWebDriver):
"""
Controls the SafariDriver and allows you to drive the browser.
"""
def __init__(self, port=0, executable_path="/usr/bin/safaridriver", reuse_service=False,
desired_capabilities=DesiredCapabilities.SAFARI, quiet=False,
keep_alive=True):
"""
Creates a new Safari driver instance and launches or finds a running safaridriver service.
:Args:
- port - The port on which the safaridriver service should listen for new connections. If zero, a free port will be found.
- executable_path - Path to a custom safaridriver executable to be used. If absent, /usr/bin/safaridriver is used.
- reuse_service - If True, do not spawn a safaridriver instance; instead, connect to an already-running service that was launched externally.
- desired_capabilities: Dictionary object with desired capabilities (Can be used to provide various Safari switches).
- quiet - If True, the driver's stdout and stderr is suppressed.
- keep_alive - Whether to configure SafariRemoteConnection to use
HTTP keep-alive. Defaults to False.
"""
self._reuse_service = reuse_service
self.service = Service(executable_path, port=port, quiet=quiet)
if not reuse_service:
self.service.start()
executor = SafariRemoteConnection(remote_server_addr=self.service.service_url,
keep_alive=keep_alive)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=desired_capabilities)
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the SafariDriver executable
that is started when starting the SafariDriver
"""
try:
RemoteWebDriver.quit(self)
except http_client.BadStatusLine:
pass
finally:
if not self._reuse_service:
self.service.stop()
# safaridriver extension commands. The canonical command support matrix is here:
# https://developer.apple.com/library/content/documentation/NetworkingInternetWeb/Conceptual/WebDriverEndpointDoc/Commands/Commands.html
# First available in Safari 11.1 and Safari Technology Preview 41.
def set_permission(self, permission, value):
if not isinstance(value, bool):
raise WebDriverException("Value of a session permission must be set to True or False.")
payload = {}
payload[permission] = value
self.execute("SET_PERMISSIONS", {"permissions": payload})
# First available in Safari 11.1 and Safari Technology Preview 41.
def get_permission(self, permission):
payload = self.execute("GET_PERMISSIONS")["value"]
permissions = payload["permissions"]
if not permissions:
return None
if permission not in permissions:
return None
value = permissions[permission]
if not isinstance(value, bool):
return None
return value
# First available in Safari 11.1 and Safari Technology Preview 42.
def debug(self):
self.execute("ATTACH_DEBUGGER")
self.execute_script("debugger;")
|
krmahadevan/selenium
|
py/selenium/webdriver/safari/webdriver.py
|
Python
|
apache-2.0
| 4,520
|
"""Get ride details and liveboard details for NMBS (Belgian railway)."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_NAME,
CONF_SHOW_ON_MAP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "NMBS"
DEFAULT_ICON = "mdi:train"
DEFAULT_ICON_ALERT = "mdi:alert-octagon"
CONF_STATION_FROM = "station_from"
CONF_STATION_TO = "station_to"
CONF_STATION_LIVE = "station_live"
CONF_EXCLUDE_VIAS = "exclude_vias"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STATION_FROM): cv.string,
vol.Required(CONF_STATION_TO): cv.string,
vol.Optional(CONF_STATION_LIVE): cv.string,
vol.Optional(CONF_EXCLUDE_VIAS, default=False): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean,
}
)
def get_time_until(departure_time=None):
"""Calculate the time between now and a train's departure time."""
if departure_time is None:
return 0
delta = dt_util.utc_from_timestamp(int(departure_time)) - dt_util.now()
return round((delta.total_seconds() / 60))
def get_delay_in_minutes(delay=0):
"""Get the delay in minutes from a delay in seconds."""
return round((int(delay) / 60))
def get_ride_duration(departure_time, arrival_time, delay=0):
"""Calculate the total travel time in minutes."""
duration = dt_util.utc_from_timestamp(
int(arrival_time)
) - dt_util.utc_from_timestamp(int(departure_time))
duration_time = int(round((duration.total_seconds() / 60)))
return duration_time + get_delay_in_minutes(delay)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NMBS sensor with iRail API."""
from pyrail import iRail
api_client = iRail()
name = config[CONF_NAME]
show_on_map = config[CONF_SHOW_ON_MAP]
station_from = config[CONF_STATION_FROM]
station_to = config[CONF_STATION_TO]
station_live = config.get(CONF_STATION_LIVE)
excl_vias = config[CONF_EXCLUDE_VIAS]
sensors = [
NMBSSensor(api_client, name, show_on_map, station_from, station_to, excl_vias)
]
if station_live is not None:
sensors.append(NMBSLiveBoard(api_client, station_live))
add_entities(sensors, True)
class NMBSLiveBoard(Entity):
"""Get the next train from a station's liveboard."""
def __init__(self, api_client, live_station):
"""Initialize the sensor for getting liveboard data."""
self._station = live_station
self._api_client = api_client
self._attrs = {}
self._state = None
@property
def name(self):
"""Return the sensor default name."""
return "NMBS Live"
@property
def icon(self):
"""Return the default icon or an alert icon if delays."""
if self._attrs and int(self._attrs["delay"]) > 0:
return DEFAULT_ICON_ALERT
return DEFAULT_ICON
@property
def state(self):
"""Return sensor state."""
return self._state
@property
def device_state_attributes(self):
"""Return the sensor attributes if data is available."""
if self._state is None or not self._attrs:
return None
delay = get_delay_in_minutes(self._attrs["delay"])
departure = get_time_until(self._attrs["time"])
attrs = {
"departure": f"In {departure} minutes",
"extra_train": int(self._attrs["isExtra"]) > 0,
"vehicle_id": self._attrs["vehicle"],
"monitored_station": self._station,
ATTR_ATTRIBUTION: "https://api.irail.be/",
}
if delay > 0:
attrs["delay"] = f"{delay} minutes"
return attrs
def update(self):
"""Set the state equal to the next departure."""
liveboard = self._api_client.get_liveboard(self._station)
next_departure = liveboard["departures"]["departure"][0]
self._attrs = next_departure
self._state = "Track {} - {}".format(
next_departure["platform"], next_departure["station"]
)
class NMBSSensor(Entity):
"""Get the the total travel time for a given connection."""
def __init__(
self, api_client, name, show_on_map, station_from, station_to, excl_vias
):
"""Initialize the NMBS connection sensor."""
self._name = name
self._show_on_map = show_on_map
self._api_client = api_client
self._station_from = station_from
self._station_to = station_to
self._excl_vias = excl_vias
self._attrs = {}
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "min"
@property
def icon(self):
"""Return the sensor default icon or an alert icon if any delay."""
if self._attrs:
delay = get_delay_in_minutes(self._attrs["departure"]["delay"])
if delay > 0:
return "mdi:alert-octagon"
return "mdi:train"
@property
def device_state_attributes(self):
"""Return sensor attributes if data is available."""
if self._state is None or not self._attrs:
return None
delay = get_delay_in_minutes(self._attrs["departure"]["delay"])
departure = get_time_until(self._attrs["departure"]["time"])
attrs = {
"departure": f"In {departure} minutes",
"destination": self._station_to,
"direction": self._attrs["departure"]["direction"]["name"],
"platform_arriving": self._attrs["arrival"]["platform"],
"platform_departing": self._attrs["departure"]["platform"],
"vehicle_id": self._attrs["departure"]["vehicle"],
ATTR_ATTRIBUTION: "https://api.irail.be/",
}
if self._show_on_map and self.station_coordinates:
attrs[ATTR_LATITUDE] = self.station_coordinates[0]
attrs[ATTR_LONGITUDE] = self.station_coordinates[1]
if self.is_via_connection and not self._excl_vias:
via = self._attrs["vias"]["via"][0]
attrs["via"] = via["station"]
attrs["via_arrival_platform"] = via["arrival"]["platform"]
attrs["via_transfer_platform"] = via["departure"]["platform"]
attrs["via_transfer_time"] = get_delay_in_minutes(
via["timeBetween"]
) + get_delay_in_minutes(via["departure"]["delay"])
if delay > 0:
attrs["delay"] = f"{delay} minutes"
return attrs
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def station_coordinates(self):
"""Get the lat, long coordinates for station."""
if self._state is None or not self._attrs:
return []
latitude = float(self._attrs["departure"]["stationinfo"]["locationY"])
longitude = float(self._attrs["departure"]["stationinfo"]["locationX"])
return [latitude, longitude]
@property
def is_via_connection(self):
"""Return whether the connection goes through another station."""
if not self._attrs:
return False
return "vias" in self._attrs and int(self._attrs["vias"]["number"]) > 0
def update(self):
"""Set the state to the duration of a connection."""
connections = self._api_client.get_connections(
self._station_from, self._station_to
)
if int(connections["connection"][0]["departure"]["left"]) > 0:
next_connection = connections["connection"][1]
else:
next_connection = connections["connection"][0]
self._attrs = next_connection
if self._excl_vias and self.is_via_connection:
_LOGGER.debug(
"Skipping update of NMBSSensor \
because this connection is a via"
)
return
duration = get_ride_duration(
next_connection["departure"]["time"],
next_connection["arrival"]["time"],
next_connection["departure"]["delay"],
)
self._state = duration
|
joopert/home-assistant
|
homeassistant/components/nmbs/sensor.py
|
Python
|
apache-2.0
| 8,620
|
from redwind import app, db, util
from redwind.models import Post
import itertools
db.engine.execute('alter table post add column historic_path varchar(256)')
db.engine.execute('update post set historic_path = path')
for post in Post.query.all():
print(post.historic_path)
if not post.slug:
post.slug = post.generate_slug()
post.path = '{}/{:02d}/{}'.format(post.published.year,
post.published.month,
post.slug)
db.session.commit()
|
thedod/redwind
|
migrations/20141017-permalinks.py
|
Python
|
bsd-2-clause
| 531
|
# -*- coding: UTF-8 -*-
import haystack
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from conference import models
from conference.templatetags.conference import fare_blob
from collections import defaultdict
from datetime import datetime
from xml.sax.saxutils import escape
class Command(BaseCommand):
"""
"""
@transaction.commit_on_success
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference missing')
partner_events = defaultdict(list)
for f in models.Fare.objects.available(conference=conference).filter(ticket_type='partner'):
try:
date = datetime.strptime(fare_blob(f, 'data').split(',')[0][:-2] + ' 2011', '%B %d %Y').date()
time = datetime.strptime(fare_blob(f, 'departure'), '%H:%M').time()
except ValueError:
continue
partner_events[date].append((f, time))
for sch in models.Schedule.objects.filter(conference=conference):
events = list(models.Event.objects.filter(schedule=sch))
for fare, time in partner_events[sch.date]:
track_id = 'f%s' % fare.id
for e in events:
if track_id in e.get_all_tracks_names():
event = e
break
else:
event = models.Event(schedule=sch, talk=None)
event.track = 'partner-program ' + track_id
event.custom = escape(fare.name)
event.start_time = time
if time.hour < 13:
d = (13 - time.hour) * 60
else:
d = (19 - time.hour) * 60
event.duration = d
event.save()
|
pythonitalia/pycon_site
|
p3/management/commands/partner_events.py
|
Python
|
bsd-2-clause
| 1,898
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
db.commit_transaction()
try:
self._forwards(orm)
except Exception:
db.start_transaction()
raise
db.start_transaction()
def _forwards(self, orm):
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
ProjectKey = orm['sentry.ProjectKey']
queryset = ProjectKey.objects.filter(rate_limit_window__isnull=False)
for key in RangeQuerySetWrapperWithProgressBar(queryset):
ProjectKey.objects.filter(pk=key.pk).update(
rate_limit_window=key.rate_limit_window * 60)
def backwards(self, orm):
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
ProjectKey = orm['sentry.ProjectKey']
queryset = ProjectKey.objects.filter(rate_limit_window__isnull=False)
for key in RangeQuerySetWrapperWithProgressBar(queryset):
ProjectKey.objects.filter(pk=key.pk).update(
rate_limit_window=key.rate_limit_window / 60)
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apiapplication': {
'Meta': {'object_name': 'ApiApplication'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "'edca03fca6594a0bbb3bf8d1de291c64b3ec21abb7ed464d84a3e0e1b87a33ce'", 'unique': 'True', 'max_length': '64'}),
'client_secret': ('sentry.db.models.fields.encrypted.EncryptedTextField', [], {'default': "'a91de422beac427ab54ad0a3cab4610cb37a7022173c452fb87e776457612a40'"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Immortal Cow'", 'max_length': '64', 'blank': 'True'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'privacy_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'terms_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.apiauthorization': {
'Meta': {'unique_together': "(('user', 'application'),)", 'object_name': 'ApiAuthorization'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apigrant': {
'Meta': {'object_name': 'ApiGrant'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']"}),
'code': ('django.db.models.fields.CharField', [], {'default': "'69669803fe884840a38d78ab081f2c9b'", 'max_length': '64', 'db_index': 'True'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 8, 31, 0, 0)', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'redirect_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'application': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiApplication']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'expires_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 9, 30, 0, 0)', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'refresh_token': ('django.db.models.fields.CharField', [], {'default': "'2e3f93d366f84c32a20bbaf2536bffad7ff9761de5944afaa0be34690b056b4e'", 'max_length': '64', 'unique': 'True', 'null': 'True'}),
'scope_list': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'5526f80c353b4489bce4e550f7d5d2cb724347c2f4184506bebe2bc3536ff27f'", 'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('sentry.db.models.fields.encrypted.EncryptedJsonField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 9, 7, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'), ('organization_id', 'external_id'))", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '164', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.deploy': {
'Meta': {'object_name': 'Deploy'},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notified': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.distribution': {
'Meta': {'unique_together': "(('release', 'name'),)", 'object_name': 'Distribution'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.dsymapp': {
'Meta': {'unique_together': "(('project', 'platform', 'app_id'),)", 'object_name': 'DSymApp'},
'app_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'sync_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'), ('organization_id', 'name'))", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.featureadoption': {
'Meta': {'unique_together': "(('organization', 'feature_id'),)", 'object_name': 'FeatureAdoption'},
'applicable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feature_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'group_tombstone_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'state': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'state': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'user_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project_id', 'group_id', 'key'),)", 'object_name': 'GroupTagKey'},
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group_id', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project_id', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptombstone': {
'Meta': {'object_name': 'GroupTombstone'},
'actor_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'unique': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.integration': {
'Meta': {'unique_together': "(('provider', 'external_id'),)", 'object_name': 'Integration'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'default_auth_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationIntegration']", 'to': "orm['sentry.Organization']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'integrations'", 'symmetrical': 'False', 'through': "orm['sentry.ProjectIntegration']", 'to': "orm['sentry.Project']"}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationintegration': {
'Meta': {'unique_together': "(('organization', 'integration'),)", 'object_name': 'OrganizationIntegration'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectintegration': {
'Meta': {'unique_together': "(('project', 'integration'),)", 'object_name': 'ProjectIntegration'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Integration']"}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'rate_limit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'rate_limit_window': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'authors': ('sentry.db.models.fields.array.ArrayField', [], {'of': ('django.db.models.fields.TextField', [], {})}),
'commit_count': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'last_deploy_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'total_deploys': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('organization_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'dist': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Distribution']", 'null': 'True'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseheadcommit': {
'Meta': {'unique_together': "(('repository_id', 'release'),)", 'object_name': 'ReleaseHeadCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'integration_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.scheduleddeletion': {
'Meta': {'unique_together': "(('app_label', 'model_name', 'object_id'),)", 'object_name': 'ScheduledDeletion'},
'aborted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'actor_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 9, 30, 0, 0)'}),
'guid': ('django.db.models.fields.CharField', [], {'default': "'239ab0540b5145759a9add392371adeb'", 'unique': 'True', 'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'in_progress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'object_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.scheduledjob': {
'Meta': {'object_name': 'ScheduledJob'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_scheduled': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'payload': ('jsonfield.fields.JSONField', [], {'default': '{}'})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project_id', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project_id', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'HuAc0iXywzxgtn4lIO5hi82ini9PxxLH'", 'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'), ('user', 'organization', 'key'))", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.encrypted.EncryptedPickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_user_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.versiondsymfile': {
'Meta': {'unique_together': "(('dsym_file', 'version', 'build'),)", 'object_name': 'VersionDSymFile'},
'build': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dsym_app': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymApp']"}),
'dsym_file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProjectDSymFile']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['sentry']
symmetrical = True
|
looker/sentry
|
src/sentry/south_migrations/0348_fix_project_key_rate_limit_window_unit.py
|
Python
|
bsd-3-clause
| 83,050
|
from rest_framework import relations, serializers
import amo
import mkt.carriers
import mkt.regions
from addons.models import Category
from mkt.api.fields import SplitField, TranslationSerializerField
from mkt.api.serializers import URLSerializerMixin
from mkt.collections.serializers import (CollectionSerializer, SlugChoiceField,
SlugModelChoiceField)
from mkt.submit.serializers import PreviewSerializer
from mkt.webapps.api import AppSerializer
from .models import FeedApp, FeedItem
class FeedAppSerializer(URLSerializerMixin, serializers.ModelSerializer):
app = SplitField(relations.PrimaryKeyRelatedField(required=True),
AppSerializer())
description = TranslationSerializerField(required=False)
preview = SplitField(relations.PrimaryKeyRelatedField(required=False),
PreviewSerializer())
pullquote_attribution = TranslationSerializerField(required=False)
pullquote_rating = serializers.IntegerField(required=False)
pullquote_text = TranslationSerializerField(required=False)
class Meta:
fields = ('app', 'description', 'id', 'preview',
'pullquote_attribution', 'pullquote_rating', 'pullquote_text',
'url')
model = FeedApp
url_basename = 'feedapp'
class FeedItemSerializer(URLSerializerMixin, serializers.ModelSerializer):
carrier = SlugChoiceField(required=False,
choices_dict=mkt.carriers.CARRIER_MAP)
region = SlugChoiceField(required=False,
choices_dict=mkt.regions.REGION_LOOKUP)
category = SlugModelChoiceField(required=False,
queryset=Category.objects.filter(type=amo.ADDON_WEBAPP))
item_type = serializers.SerializerMethodField('get_item_type')
# Types of objects that are allowed to be a feed item.
collection = SplitField(relations.PrimaryKeyRelatedField(required=False),
CollectionSerializer())
class Meta:
fields = ('carrier', 'category', 'collection', 'id', 'item_type',
'region', 'url')
item_types = ('collection',)
model = FeedItem
url_basename = 'feeditem'
def validate(self, attrs):
"""
Ensure that at least one object type is specified.
"""
item_changed = any(k for k in self.Meta.item_types if k in attrs.keys())
num_defined = sum(1 for item in self.Meta.item_types if attrs.get(item))
if item_changed and num_defined != 1:
message = ('A valid value for exactly one of the following '
'parameters must be defined: %s' % ','.join(
self.Meta.item_types))
raise serializers.ValidationError(message)
return attrs
def get_item_type(self, obj):
for item_type in self.Meta.item_types:
if getattr(obj, item_type):
return item_type
return
|
wagnerand/zamboni
|
mkt/feed/serializers.py
|
Python
|
bsd-3-clause
| 2,948
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD-3-Clause
import copy
import os
import os.path as op
import numpy as np
from ..constants import FIFF
from ..open import fiff_open, _fiff_get_fid, _get_next_fname
from ..meas_info import read_meas_info
from ..tree import dir_tree_find
from ..tag import read_tag, read_tag_info
from ..base import (BaseRaw, _RawShell, _check_raw_compatibility,
_check_maxshield)
from ..utils import _mult_cal_one
from ...annotations import Annotations, _read_annotations_fif
from ...event import AcqParserFIF
from ...utils import (check_fname, logger, verbose, warn, fill_doc, _file_like,
_on_missing, _check_fname)
@fill_doc
class Raw(BaseRaw):
"""Raw data in FIF format.
Parameters
----------
fname : str | file-like
The raw filename to load. For files that have automatically been split,
the split part will be automatically loaded. Filenames not ending with
``raw.fif``, ``raw_sss.fif``, ``raw_tsss.fif``, ``_meg.fif``,
``_eeg.fif``, or ``_ieeg.fif`` (with or without an optional additional
``.gz`` extension) will generate a warning. If a file-like object is
provided, preloading must be used.
.. versionchanged:: 0.18
Support for file-like objects.
allow_maxshield : bool | str (default False)
If True, allow loading of data that has been recorded with internal
active compensation (MaxShield). Data recorded with MaxShield should
generally not be loaded directly, but should first be processed using
SSS/tSSS to remove the compensation signals that may also affect brain
activity. Can also be "yes" to load without eliciting a warning.
%(preload)s
%(on_split_missing)s
%(verbose)s
Attributes
----------
%(info_not_none)s
ch_names : list of string
List of channels' names.
n_times : int
Total number of time points in the raw file.
times : ndarray
Time vector in seconds. Starts from 0, independently of `first_samp`
value. Time interval between consecutive time samples is equal to the
inverse of the sampling frequency.
preload : bool
Indicates whether raw data are in memory.
%(verbose)s
"""
@verbose
def __init__(self, fname, allow_maxshield=False, preload=False,
on_split_missing='raise', verbose=None): # noqa: D102
raws = []
do_check_ext = not _file_like(fname)
next_fname = fname
while next_fname is not None:
raw, next_fname, buffer_size_sec = \
self._read_raw_file(next_fname, allow_maxshield,
preload, do_check_ext)
do_check_ext = False
raws.append(raw)
if next_fname is not None:
if not op.exists(next_fname):
msg = (
f'Split raw file detected but next file {next_fname} '
'does not exist. Ensure all files were transferred '
'properly and that split and original files were not '
'manually renamed on disk (split files should be '
'renamed by loading and re-saving with MNE-Python to '
'preserve proper filename linkage).')
_on_missing(on_split_missing, msg, name='on_split_missing')
break
if _file_like(fname):
# avoid serialization error when copying file-like
fname = None # noqa
_check_raw_compatibility(raws)
super(Raw, self).__init__(
copy.deepcopy(raws[0].info), False,
[r.first_samp for r in raws], [r.last_samp for r in raws],
[r.filename for r in raws], [r._raw_extras for r in raws],
raws[0].orig_format, None, buffer_size_sec=buffer_size_sec,
verbose=verbose)
# combine annotations
self.set_annotations(raws[0].annotations, emit_warning=False)
# Add annotations for in-data skips
for extra in self._raw_extras:
mask = [ent is None for ent in extra['ent']]
start = extra['bounds'][:-1][mask]
stop = extra['bounds'][1:][mask] - 1
duration = (stop - start + 1.) / self.info['sfreq']
annot = Annotations(onset=(start / self.info['sfreq']),
duration=duration,
description='BAD_ACQ_SKIP',
orig_time=self.info['meas_date'])
self._annotations += annot
if preload:
self._preload_data(preload)
else:
self.preload = False
# If using a file-like object, fix the filenames to be representative
# strings now instead of the file-like objects
self._filenames = [_get_fname_rep(fname) for fname in self._filenames]
@verbose
def _read_raw_file(self, fname, allow_maxshield, preload,
do_check_ext=True, verbose=None):
"""Read in header information from a raw file."""
logger.info('Opening raw data file %s...' % fname)
# Read in the whole file if preload is on and .fif.gz (saves time)
if not _file_like(fname):
if do_check_ext:
endings = ('raw.fif', 'raw_sss.fif', 'raw_tsss.fif',
'_meg.fif', '_eeg.fif', '_ieeg.fif')
endings += tuple([f'{e}.gz' for e in endings])
check_fname(fname, 'raw', endings)
# filename
fname = _check_fname(fname, 'read', True, 'fname')
ext = os.path.splitext(fname)[1].lower()
whole_file = preload if '.gz' in ext else False
del ext
else:
# file-like
if not preload:
raise ValueError('preload must be used with file-like objects')
whole_file = True
fname_rep = _get_fname_rep(fname)
ff, tree, _ = fiff_open(fname, preload=whole_file)
with ff as fid:
# Read the measurement info
info, meas = read_meas_info(fid, tree, clean_bads=True)
annotations = _read_annotations_fif(fid, tree)
# Locate the data of interest
raw_node = dir_tree_find(meas, FIFF.FIFFB_RAW_DATA)
if len(raw_node) == 0:
raw_node = dir_tree_find(meas, FIFF.FIFFB_CONTINUOUS_DATA)
if (len(raw_node) == 0):
raw_node = dir_tree_find(meas, FIFF.FIFFB_IAS_RAW_DATA)
if (len(raw_node) == 0):
raise ValueError('No raw data in %s' % fname_rep)
_check_maxshield(allow_maxshield)
with info._unlock():
info['maxshield'] = True
del meas
if len(raw_node) == 1:
raw_node = raw_node[0]
# Process the directory
directory = raw_node['directory']
nent = raw_node['nent']
nchan = int(info['nchan'])
first = 0
first_samp = 0
first_skip = 0
# Get first sample tag if it is there
if directory[first].kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, directory[first].pos)
first_samp = int(tag.data)
first += 1
_check_entry(first, nent)
# Omit initial skip
if directory[first].kind == FIFF.FIFF_DATA_SKIP:
# This first skip can be applied only after we know the bufsize
tag = read_tag(fid, directory[first].pos)
first_skip = int(tag.data)
first += 1
_check_entry(first, nent)
raw = _RawShell()
raw.filename = fname
raw.first_samp = first_samp
if info['meas_date'] is None and annotations is not None:
# we need to adjust annotations.onset as when there is no meas
# date set_annotations considers that the origin of time is the
# first available sample (ignores first_samp)
annotations.onset -= first_samp / info['sfreq']
raw.set_annotations(annotations)
# Go through the remaining tags in the directory
raw_extras = list()
nskip = 0
orig_format = None
for k in range(first, nent):
ent = directory[k]
# There can be skips in the data (e.g., if the user unclicked)
# an re-clicked the button
if ent.kind == FIFF.FIFF_DATA_SKIP:
tag = read_tag(fid, ent.pos)
nskip = int(tag.data)
elif ent.kind == FIFF.FIFF_DATA_BUFFER:
# Figure out the number of samples in this buffer
if ent.type == FIFF.FIFFT_DAU_PACK16:
nsamp = ent.size // (2 * nchan)
elif ent.type == FIFF.FIFFT_SHORT:
nsamp = ent.size // (2 * nchan)
elif ent.type == FIFF.FIFFT_FLOAT:
nsamp = ent.size // (4 * nchan)
elif ent.type == FIFF.FIFFT_DOUBLE:
nsamp = ent.size // (8 * nchan)
elif ent.type == FIFF.FIFFT_INT:
nsamp = ent.size // (4 * nchan)
elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
nsamp = ent.size // (8 * nchan)
elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
nsamp = ent.size // (16 * nchan)
else:
raise ValueError('Cannot handle data buffers of type '
'%d' % ent.type)
if orig_format is None:
if ent.type == FIFF.FIFFT_DAU_PACK16:
orig_format = 'short'
elif ent.type == FIFF.FIFFT_SHORT:
orig_format = 'short'
elif ent.type == FIFF.FIFFT_FLOAT:
orig_format = 'single'
elif ent.type == FIFF.FIFFT_DOUBLE:
orig_format = 'double'
elif ent.type == FIFF.FIFFT_INT:
orig_format = 'int'
elif ent.type == FIFF.FIFFT_COMPLEX_FLOAT:
orig_format = 'single'
elif ent.type == FIFF.FIFFT_COMPLEX_DOUBLE:
orig_format = 'double'
# Do we have an initial skip pending?
if first_skip > 0:
first_samp += nsamp * first_skip
raw.first_samp = first_samp
first_skip = 0
# Do we have a skip pending?
if nskip > 0:
raw_extras.append(dict(
ent=None, first=first_samp, nsamp=nskip * nsamp,
last=first_samp + nskip * nsamp - 1))
first_samp += nskip * nsamp
nskip = 0
# Add a data buffer
raw_extras.append(dict(ent=ent, first=first_samp,
last=first_samp + nsamp - 1,
nsamp=nsamp))
first_samp += nsamp
next_fname = _get_next_fname(fid, fname_rep, tree)
# reformat raw_extras to be a dict of list/ndarray rather than
# list of dict (faster access)
raw_extras = {key: [r[key] for r in raw_extras]
for key in raw_extras[0]}
for key in raw_extras:
if key != 'ent': # dict or None
raw_extras[key] = np.array(raw_extras[key], int)
if not np.array_equal(raw_extras['last'][:-1],
raw_extras['first'][1:] - 1):
raise RuntimeError('FIF file appears to be broken')
bounds = np.cumsum(np.concatenate(
[raw_extras['first'][:1], raw_extras['nsamp']]))
raw_extras['bounds'] = bounds
assert len(raw_extras['bounds']) == len(raw_extras['ent']) + 1
# store the original buffer size
buffer_size_sec = np.median(raw_extras['nsamp']) / info['sfreq']
del raw_extras['first']
del raw_extras['last']
del raw_extras['nsamp']
raw.last_samp = first_samp - 1
raw.orig_format = orig_format
# Add the calibration factors
cals = np.zeros(info['nchan'])
for k in range(info['nchan']):
cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']
raw._cals = cals
raw._raw_extras = raw_extras
logger.info(' Range : %d ... %d = %9.3f ... %9.3f secs' % (
raw.first_samp, raw.last_samp,
float(raw.first_samp) / info['sfreq'],
float(raw.last_samp) / info['sfreq']))
raw.info = info
logger.info('Ready.')
return raw, next_fname, buffer_size_sec
@property
def _dtype(self):
"""Get the dtype to use to store data from disk."""
if self._dtype_ is not None:
return self._dtype_
dtype = None
for raw_extra, filename in zip(self._raw_extras, self._filenames):
for ent in raw_extra['ent']:
if ent is not None:
with _fiff_get_fid(filename) as fid:
fid.seek(ent.pos, 0)
tag = read_tag_info(fid)
if tag is not None:
if tag.type in (FIFF.FIFFT_COMPLEX_FLOAT,
FIFF.FIFFT_COMPLEX_DOUBLE):
dtype = np.complex128
else:
dtype = np.float64
if dtype is not None:
break
if dtype is not None:
break
if dtype is None:
raise RuntimeError('bug in reading')
self._dtype_ = dtype
return dtype
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file."""
n_bad = 0
with _fiff_get_fid(self._filenames[fi]) as fid:
bounds = self._raw_extras[fi]['bounds']
ents = self._raw_extras[fi]['ent']
nchan = self._raw_extras[fi]['orig_nchan']
use = (stop > bounds[:-1]) & (start < bounds[1:])
offset = 0
for ei in np.where(use)[0]:
first = bounds[ei]
last = bounds[ei + 1]
nsamp = last - first
ent = ents[ei]
first_pick = max(start - first, 0)
last_pick = min(nsamp, stop - first)
picksamp = last_pick - first_pick
# only read data if it exists
if ent is not None:
one = read_tag(fid, ent.pos,
shape=(nsamp, nchan),
rlims=(first_pick, last_pick)).data
try:
one.shape = (picksamp, nchan)
except AttributeError: # one is None
n_bad += picksamp
else:
_mult_cal_one(data[:, offset:(offset + picksamp)],
one.T, idx, cals, mult)
offset += picksamp
if n_bad:
warn(f'FIF raw buffer could not be read, acquisition error '
f'likely: {n_bad} samples set to zero')
assert offset == stop - start
def fix_mag_coil_types(self):
"""Fix Elekta magnetometer coil types.
Returns
-------
raw : instance of Raw
The raw object. Operates in place.
Notes
-----
This function changes magnetometer coil types 3022 (T1: SQ20483N) and
3023 (T2: SQ20483-A) to 3024 (T3: SQ20950N) in the channel definition
records in the info structure.
Neuromag Vectorview systems can contain magnetometers with two
different coil sizes (3022 and 3023 vs. 3024). The systems
incorporating coils of type 3024 were introduced last and are used at
the majority of MEG sites. At some sites with 3024 magnetometers,
the data files have still defined the magnetometers to be of type
3022 to ensure compatibility with older versions of Neuromag software.
In the MNE software as well as in the present version of Neuromag
software coil type 3024 is fully supported. Therefore, it is now safe
to upgrade the data files to use the true coil type.
.. note:: The effect of the difference between the coil sizes on the
current estimates computed by the MNE software is very small.
Therefore the use of mne_fix_mag_coil_types is not mandatory.
"""
from ...channels import fix_mag_coil_types
fix_mag_coil_types(self.info)
return self
@property
def acqparser(self):
"""The AcqParserFIF for the measurement info.
See Also
--------
mne.AcqParserFIF
"""
if getattr(self, '_acqparser', None) is None:
self._acqparser = AcqParserFIF(self.info)
return self._acqparser
def _get_fname_rep(fname):
if not _file_like(fname):
return fname
else:
return 'File-like'
def _check_entry(first, nent):
"""Sanity check entries."""
if first >= nent:
raise IOError('Could not read data, perhaps this is a corrupt file')
@fill_doc
def read_raw_fif(fname, allow_maxshield=False, preload=False,
on_split_missing='raise', verbose=None):
"""Reader function for Raw FIF data.
Parameters
----------
fname : str | file-like
The raw filename to load. For files that have automatically been split,
the split part will be automatically loaded. Filenames should end
with raw.fif, raw.fif.gz, raw_sss.fif, raw_sss.fif.gz, raw_tsss.fif,
raw_tsss.fif.gz, or _meg.fif. If a file-like object is provided,
preloading must be used.
.. versionchanged:: 0.18
Support for file-like objects.
allow_maxshield : bool | str (default False)
If True, allow loading of data that has been recorded with internal
active compensation (MaxShield). Data recorded with MaxShield should
generally not be loaded directly, but should first be processed using
SSS/tSSS to remove the compensation signals that may also affect brain
activity. Can also be "yes" to load without eliciting a warning.
%(preload)s
%(on_split_missing)s
%(verbose)s
Returns
-------
raw : instance of Raw
A Raw object containing FIF data.
Notes
-----
.. versionadded:: 0.9.0
When reading a FIF file, note that the first N seconds annotated
``BAD_ACQ_SKIP`` are **skipped**. They are removed from ``raw.times`` and
``raw.n_times`` parameters but ``raw.first_samp`` and ``raw.first_time``
are updated accordingly.
"""
return Raw(fname=fname, allow_maxshield=allow_maxshield,
preload=preload, verbose=verbose,
on_split_missing=on_split_missing)
|
wmvanvliet/mne-python
|
mne/io/fiff/raw.py
|
Python
|
bsd-3-clause
| 20,018
|
import os
import socket
import geoip2.database
from django.conf import settings
from django.core.validators import ipv4_re
from django.utils.ipv6 import is_valid_ipv6_address
from .resources import City, Country
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {
'GEOIP_PATH': getattr(settings, 'GEOIP_PATH', None),
'GEOIP_CITY': getattr(settings, 'GEOIP_CITY', 'GeoLite2-City.mmdb'),
'GEOIP_COUNTRY': getattr(settings, 'GEOIP_COUNTRY', 'GeoLite2-Country.mmdb'),
}
class GeoIP2Exception(Exception):
pass
class GeoIP2:
# The flags for GeoIP memory caching.
# Try MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order.
MODE_AUTO = 0
# Use the C extension with memory map.
MODE_MMAP_EXT = 1
# Read from memory map. Pure Python.
MODE_MMAP = 2
# Read database as standard file. Pure Python.
MODE_FILE = 4
# Load database into memory. Pure Python.
MODE_MEMORY = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initialize the GeoIP object. No parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP datasets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.mmdb) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH setting.
* cache: The cache settings when opening up the GeoIP datasets. May be
an integer in (0, 1, 2, 4, 8) corresponding to the MODE_AUTO,
MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, and MODE_MEMORY,
`GeoIPOptions` C API settings, respectively. Defaults to 0,
meaning MODE_AUTO.
* country: The name of the GeoIP country data file. Defaults to
'GeoLite2-Country.mmdb'; overrides the GEOIP_COUNTRY setting.
* city: The name of the GeoIP city data file. Defaults to
'GeoLite2-City.mmdb'; overrides the GEOIP_CITY setting.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIP2Exception('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS['GEOIP_PATH']
if not path:
raise GeoIP2Exception('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, str):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try to open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS['GEOIP_COUNTRY'])
if os.path.isfile(country_db):
self._country = geoip2.database.Reader(country_db, mode=cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS['GEOIP_CITY'])
if os.path.isfile(city_db):
self._city = geoip2.database.Reader(city_db, mode=cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure out
# whether the given database path is for the GeoIP country or city
# databases.
reader = geoip2.database.Reader(path, mode=cache)
db_type = reader.metadata().database_type
if db_type.endswith('City'):
# GeoLite City database detected.
self._city = reader
self._city_file = path
elif db_type.endswith('Country'):
# GeoIP Country database detected.
self._country = reader
self._country_file = path
else:
raise GeoIP2Exception('Unable to recognize database edition: %s' % db_type)
else:
raise GeoIP2Exception('GeoIP path must be a valid file or directory.')
@property
def _reader(self):
if self._country:
return self._country
else:
return self._city
@property
def _country_or_city(self):
if self._country:
return self._country.country
else:
return self._city.city
def __del__(self):
# Cleanup any GeoIP file handles lying around.
if self._reader:
self._reader.close()
def __repr__(self):
meta = self._reader.metadata()
version = '[v%s.%s]' % (meta.binary_format_major_version, meta.binary_format_minor_version)
return '<%(cls)s %(version)s _country_file="%(country)s", _city_file="%(city)s">' % {
'cls': self.__class__.__name__,
'version': version,
'country': self._country_file,
'city': self._city_file,
}
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, str):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIP2Exception('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIP2Exception('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIP2Exception('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP2 only takes IP addresses.
if not (ipv4_re.match(query) or is_valid_ipv6_address(query)):
query = socket.gethostbyname(query)
return query
def city(self, query):
"""
Return a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
return City(self._city.city(enc_query))
def country_code(self, query):
"Return the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_code']
def country_name(self, query):
"Return the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_name']
def country(self, query):
"""
Return a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
enc_query = self._check_query(query, city_or_country=True)
return Country(self._country_or_city(enc_query))
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Return a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Return a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Return a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def info(self):
"Return information about the GeoIP library and databases in use."
meta = self._reader.metadata()
return 'GeoIP Library:\n\t%s.%s\n' % (meta.binary_format_major_version, meta.binary_format_minor_version)
@classmethod
def open(cls, full_path, cache):
return GeoIP2(full_path, cache)
|
mattseymour/django
|
django/contrib/gis/geoip2/base.py
|
Python
|
bsd-3-clause
| 8,991
|
from __future__ import unicode_literals
from math import ceil
from django.db import models, IntegrityError, connection
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.test import TestCase, skipUnlessDBFeature, skipIfDBFeature
from django.utils.six.moves import range
from .models import (R, RChild, S, T, A, M, MR, MRNull,
create_a, get_default_r, User, Avatar, HiddenUser, HiddenUserProfile,
M2MTo, M2MFrom, Parent, Child, Base)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
self.assertRaises(IntegrityError, a.protect.delete)
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_do_nothing_qscount(self):
"""
Test that a models.DO_NOTHING relation doesn't trigger a query.
"""
b = Base.objects.create()
with self.assertNumQueries(1):
# RelToBase should not be queried.
b.delete()
self.assertEqual(Base.objects.count(), 0)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(None, a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').rel.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
s = S.objects.create(r=R.objects.create())
for i in range(2 * GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertEqual(None, obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertEqual(None, a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
T.objects.create(pk=1, s=s1)
T.objects.create(pk=2, s=s2)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.pre_delete.disconnect(log_pre_delete)
def test_relational_post_delete_signals_happen_before_parent_object(self):
deletions = []
def log_post_delete(instance, **kwargs):
self.assertTrue(R.objects.filter(pk=instance.r_id))
self.assertIs(type(instance), S)
deletions.append(instance.id)
r = R.objects.create(pk=1)
S.objects.create(pk=1, r=r)
models.signals.post_delete.connect(log_post_delete, sender=S)
try:
r.delete()
finally:
models.signals.post_delete.disconnect(log_post_delete)
self.assertEqual(len(deletions), 1)
self.assertEqual(deletions[0], 1)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
a = Avatar.objects.get(pk=u.avatar_id)
# The below doesn't make sense... Why do we need to null out
# user.avatar if we are going to delete the user immediately after it,
# and there are no more cascades.
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
def test_large_delete(self):
TEST_SIZE = 2000
objs = [Avatar() for i in range(0, TEST_SIZE)]
Avatar.objects.bulk_create(objs)
# Calculate the number of queries needed.
batch_size = connection.ops.bulk_batch_size(['pk'], objs)
# The related fetches are done in batches.
batches = int(ceil(float(len(objs)) / batch_size))
# One query for Avatar.objects.all() and then one related fast delete for
# each batch.
fetches_to_mem = 1 + batches
# The Avatar objects are going to be deleted in batches of GET_ITERATOR_CHUNK_SIZE
queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE
self.assertNumQueries(queries, Avatar.objects.all().delete)
self.assertFalse(Avatar.objects.exists())
def test_large_delete_related(self):
TEST_SIZE = 2000
s = S.objects.create(r=R.objects.create())
for i in range(TEST_SIZE):
T.objects.create(s=s)
batch_size = max(connection.ops.bulk_batch_size(['pk'], range(TEST_SIZE)), 1)
# TEST_SIZE // batch_size (select related `T` instances)
# + 1 (select related `U` instances)
# + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches)
# + 1 (delete `s`)
expected_num_queries = (ceil(TEST_SIZE // batch_size) +
ceil(TEST_SIZE // GET_ITERATOR_CHUNK_SIZE) + 2)
self.assertNumQueries(expected_num_queries, s.delete)
self.assertFalse(S.objects.exists())
self.assertFalse(T.objects.exists())
class FastDeleteTests(TestCase):
def test_fast_delete_fk(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to fast-delete the user
# 1 query to delete the avatar
self.assertNumQueries(2, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_fast_delete_m2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete f, 1 to fast-delete m2m for f
self.assertNumQueries(2, f.delete)
def test_fast_delete_revm2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete t, 1 to fast-delete t's m_set
self.assertNumQueries(2, f.delete)
def test_fast_delete_qs(self):
u1 = User.objects.create()
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_joined_qs(self):
a = Avatar.objects.create(desc='a')
User.objects.create(avatar=a)
u2 = User.objects.create()
expected_queries = 1 if connection.features.update_can_self_select else 2
self.assertNumQueries(expected_queries,
User.objects.filter(avatar__desc='a').delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_inheritance(self):
c = Child.objects.create()
p = Parent.objects.create()
# 1 for self, 1 for parent
# However, this doesn't work as child.parent access creates a query,
# and this means we will be generating extra queries (a lot for large
# querysets). This is not a fast-delete problem.
# self.assertNumQueries(2, c.delete)
c.delete()
self.assertFalse(Child.objects.exists())
self.assertEqual(Parent.objects.count(), 1)
self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1)
# 1 for self delete, 1 for fast delete of empty "child" qs.
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
# 1 for self delete, 1 for fast delete of empty "child" qs.
c = Child.objects.create()
p = c.parent_ptr
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_fast_delete_large_batch(self):
User.objects.bulk_create(User() for i in range(0, 2000))
# No problems here - we aren't going to cascade, so we will fast
# delete the objects in a single query.
self.assertNumQueries(1, User.objects.all().delete)
a = Avatar.objects.create(desc='a')
User.objects.bulk_create(User(avatar=a) for i in range(0, 2000))
# We don't hit parameter amount limits for a, so just one query for
# that + fast delete of the related objs.
self.assertNumQueries(2, a.delete)
self.assertEqual(User.objects.count(), 0)
|
doismellburning/django
|
tests/delete/tests.py
|
Python
|
bsd-3-clause
| 15,685
|
"""
Plots the instantaneous drag coefficient between 0 and 3 time-units of flow
simulation and compares with numerical results from
Koumoutsakos and Leonard (1995).
_References:_
* Koumoutsakos, P., & Leonard, A. (1995).
High-resolution simulations of the flow around an impulsively started
cylinder using vortex methods.
Journal of Fluid Mechanics, 296, 1-38.
"""
import os
import pathlib
import numpy
import collections
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
root_dir = os.environ.get('PETIBM_EXAMPLES')
if not root_dir:
root_dir = simu_dir.parents[1]
data = collections.OrderedDict({})
# Reads forces from file.
label = 'PetIBM'
filepath = simu_dir / 'forces-0.txt'
with open(filepath, 'r') as infile:
t, fx = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, usecols=(0, 1))
data[label] = {'t': t, 'cd': 2 * fx}
data[label]['kwargs'] = {}
# Reads drag coefficient of Koumoutsakos and Leonard (1995) for Re=3000.
label = 'Koumoutsakos and Leonard (1995)'
filename = 'koumoutsakos_leonard_1995_cylinder_dragCoefficientRe3000.dat'
filepath = root_dir / 'data' / filename
with open(filepath, 'r') as infile:
t, cd = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
data[label] = {'t': 0.5 * t, 'cd': cd}
data[label]['kwargs'] = {'linewidth': 0, 'marker': 'o',
'markerfacecolor': 'none', 'markeredgecolor': 'black'}
pyplot.rc('font', family='serif', size=16)
# Plots the instantaneous drag coefficients.
fig, ax = pyplot.subplots(figsize=(8.0, 6.0))
ax.grid()
ax.set_xlabel('Non-dimensional time')
ax.set_ylabel('Drag coefficient')
for label, subdata in data.items():
ax.plot(subdata['t'], subdata['cd'], label=label, **subdata['kwargs'])
ax.axis((0.0, 3.0, 0.0, 2.0))
ax.legend()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'dragCoefficient.png'
fig.savefig(str(filepath), dpi=300)
|
mesnardo/PetIBM
|
examples/ibpm/cylinder2dRe3000_GPU/scripts/plotDragCoefficient.py
|
Python
|
bsd-3-clause
| 2,006
|
from __future__ import print_function
import shutil
import os.path
import tempfile
import cProfile
import pstats
import nineml
from nineml.utils.comprehensive_example import (
instances_of_all_types, v1_safe_docs)
from nineml.serialization import ext_to_format, format_to_serializer
format_to_ext = dict((v, k) for k, v in ext_to_format.items()) # @UndefinedVariable @IgnorePep8
print_serialized = False
printable = ('xml', 'json', 'yaml')
_tmp_dir = tempfile.mkdtemp()
def function():
for version in (1.0, 2.0):
if version == 1.0:
docs = v1_safe_docs
else:
docs = list(instances_of_all_types['NineML'].values())
for format in format_to_serializer: # @ReservedAssignment
try:
ext = format_to_ext[format]
except KeyError:
continue # ones that can't be written to file (e.g. dict)
for i, document in enumerate(docs):
doc = document.clone()
url = os.path.join(
_tmp_dir, 'test{}v{}{}'.format(i, version, ext))
nineml.write(url, doc, format=format, version=version,
indent=2)
if print_serialized and format in printable:
with open(url) as f:
print(f.read())
reread_doc = nineml.read(url, reload=True) # @UnusedVariable
shutil.rmtree(_tmp_dir)
out_file = os.path.join(os.getcwd(), 'serial_profile.out')
cProfile.run('function()', out_file)
p = pstats.Stats(out_file)
p.sort_stats('cumtime').print_stats()
|
INCF/lib9ML
|
test/serialization_profile.py
|
Python
|
bsd-3-clause
| 1,617
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hs_core', '0018_merge'),
]
operations = [
migrations.AddField(
model_name='baseresource',
name='locked_time',
field=models.DateTimeField(null=True, blank=True),
),
]
|
ResearchSoftwareInstitute/MyHPOM
|
hs_core/migrations/0019_baseresource_locked_time.py
|
Python
|
bsd-3-clause
| 410
|
# $Id$
#
# Copyright (C) 2005-2006 greg landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from __future__ import print_function
import copy,struct,sys
from rdkit.six.moves import cPickle
from rdkit.six import iterkeys
from rdkit import six
from rdkit import DataStructs
class VectCollection(object):
"""
>>> vc = VectCollection()
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((1,3,5))
>>> vc.AddVect(1,bv1)
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((6,8))
>>> vc.AddVect(2,bv1)
>>> len(vc)
10
>>> vc.GetNumBits()
10
>>> vc[0]
0
>>> vc[1]
1
>>> vc[9]
0
>>> vc[6]
1
>>> vc.GetBit(6)
1
>>> list(vc.GetOnBits())
[1, 3, 5, 6, 8]
keys must be unique, so adding a duplicate replaces the
previous values:
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((7,9))
>>> vc.AddVect(1,bv1)
>>> len(vc)
10
>>> vc[1]
0
>>> vc[9]
1
>>> vc[6]
1
we can also query the children:
>>> vc.NumChildren()
2
>>> cs = vc.GetChildren()
>>> id,fp = cs[0]
>>> id
1
>>> list(fp.GetOnBits())
[7, 9]
>>> id,fp = cs[1]
>>> id
2
>>> list(fp.GetOnBits())
[6, 8]
attach/detach operations:
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((5,6))
>>> vc.AddVect(3,bv1)
>>> vc.NumChildren()
3
>>> list(vc.GetOnBits())
[5, 6, 7, 8, 9]
>>> vc.DetachVectsNotMatchingBit(6)
>>> vc.NumChildren()
2
>>> list(vc.GetOnBits())
[5, 6, 8]
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((7,9))
>>> vc.AddVect(1,bv1)
>>> vc.NumChildren()
3
>>> list(vc.GetOnBits())
[5, 6, 7, 8, 9]
>>> vc.DetachVectsMatchingBit(6)
>>> vc.NumChildren()
1
>>> list(vc.GetOnBits())
[7, 9]
to copy VectCollections, use the copy module:
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((5,6))
>>> vc.AddVect(3,bv1)
>>> list(vc.GetOnBits())
[5, 6, 7, 9]
>>> vc2 = copy.copy(vc)
>>> vc.DetachVectsNotMatchingBit(6)
>>> list(vc.GetOnBits())
[5, 6]
>>> list(vc2.GetOnBits())
[5, 6, 7, 9]
The Uniquify() method can be used to remove duplicate vectors:
>>> vc = VectCollection()
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((7,9))
>>> vc.AddVect(1,bv1)
>>> vc.AddVect(2,bv1)
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((2,3,5))
>>> vc.AddVect(3,bv1)
>>> vc.NumChildren()
3
>>> vc.Uniquify()
>>> vc.NumChildren()
2
"""
def __init__(self):
self.__vects = {}
self.__orVect = None
self.__numBits = -1
self.__needReset=True
def GetOrVect(self):
if self.__needReset:
self.Reset()
return self.__orVect
orVect = property(GetOrVect)
def AddVect(self,id,vect):
self.__vects[id]=vect
self.__needReset=True
def Reset(self):
if not self.__needReset:
return
self.__orVect=None
if not self.__vects:
return
ks = list(iterkeys(self.__vects))
self.__orVect = copy.copy(self.__vects[ks[0]])
self.__numBits = self.__orVect.GetNumBits()
for i in range(1,len(ks)):
self.__orVect |= self.__vects[ks[i]]
self.__needReset=False
def NumChildren(self):
return len(self.__vects.keys())
def GetChildren(self):
return tuple(self.__vects.items())
def GetBit(self,id):
if self.__needReset:
self.Reset()
return self[id]
def GetNumBits(self):
return len(self)
def GetOnBits(self):
if self.__needReset:
self.Reset()
return self.__orVect.GetOnBits()
def DetachVectsNotMatchingBit(self,bit):
items = list(self.__vects.items())
for k,v in items:
if not v.GetBit(bit):
del(self.__vects[k])
self.__needReset=True
def DetachVectsMatchingBit(self,bit):
items = list(self.__vects.items())
for k,v in items:
if v.GetBit(bit):
del(self.__vects[k])
self.__needReset=True
def Uniquify(self,verbose=False):
obls = {}
for k,v in self.__vects.items():
obls[k] = list(v.GetOnBits())
keys = self.__vects.keys()
nKeys = len(keys)
keep = self.__vects.keys()
for i in range(nKeys):
k1 = keys[i]
if k1 in keep:
obl1 = obls[k1]
idx = keys.index(k1)
for j in range(idx+1,nKeys):
k2 = keys[j]
if k2 in keep:
obl2 = obls[k2]
if obl1==obl2:
keep.remove(k2)
self.__needsReset=True
tmp = {}
for k in keep:
tmp[k] = self.__vects[k]
if verbose: print('uniquify:',len(self.__vects),'->',len(tmp))
self.__vects=tmp
def __len__(self):
if self.__needReset:
self.Reset()
return self.__numBits
def __getitem__(self,id):
if self.__needReset:
self.Reset()
return self.__orVect.GetBit(id)
#
# set up our support for pickling:
#
def __getstate__(self):
pkl = struct.pack('<I',len(self.__vects))
for k,v in self.__vects.items():
pkl += struct.pack('<I',k)
p = v.ToBinary()
l = len(p)
pkl += struct.pack('<I',l)
pkl += struct.pack('%ds'%(l),p)
return pkl
def __setstate__(self,pkl):
if six.PY3 and isinstance(pkl,str):
pkl = bytes(pkl,encoding='Latin1')
self.__vects = {}
self.__orVect = None
self.__numBits = -1
self.__needReset=True
szI = struct.calcsize('I')
offset = 0
nToRead = struct.unpack('<I',pkl[offset:offset+szI])[0]
offset += szI
for i in range(nToRead):
k = struct.unpack('<I',pkl[offset:offset+szI])[0]
offset += szI
l = struct.unpack('<I',pkl[offset:offset+szI])[0]
offset += szI
sz = struct.calcsize('%ds'%l)
bv = DataStructs.ExplicitBitVect(struct.unpack('%ds'%l,pkl[offset:offset+sz])[0])
offset += sz
self.AddVect(k,bv)
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
|
soerendip42/rdkit
|
rdkit/DataStructs/VectCollection.py
|
Python
|
bsd-3-clause
| 6,300
|
# lots to do:
# __ native drawLines
# __ add native drawCurve method
# __ native rectangle/round rect method
# __ native drawEllipse
# __ native drawArc
# __ drawImage support (work on Pyart side of things)
from __future__ import print_function
import pyart
from rdkit.sping.pid import *
from rdkit.sping.PDF import pdfmetrics
import Fontmapping # helps by mapping pid font classes to Pyart font names
# note for now I'm just going to do the standard PDF fonts & forget the rest
class PyartCanvas(Canvas):
"note the default face is 'times' and is set in Fontmapping.py"
def __init__(self,size=(300,300),name='PyartCanvas.png'):
self._pycan = pyart.Canvas(size[0], size[1], dpi=72)
self.filename = name
Canvas.__init__(self, size, name)
# self.defaultFillColor = transparent
# now we need to setup our tracking of the defaults vs the current state
# see if the __setattr__ approach is any better than the _updateXX strategy
def __setattr__(self, name, value):
if name == 'defaultLineColor':
if value:
# print('setting defaultLineColor to %s, 0x%x' % (value, value.toHexRGB()))
if value != transparent:
self._pycan.gstate.stroke = value.toHexRGB()
self.__dict__[name] = value
elif name == 'defaultFillColor':
if value:
if value != transparent:
self._pycan.gstate.fill = value.toHexRGB()
self.__dict__[name] = value
elif name == 'defaultLineWidth' :
if value:
self._pycan.gstate.stroke_width = value
self.__dict__[name] = value
elif name == 'defaultFont':
if value:
self.__dict__[name] = value
self._setPyartFont(value)
else: # received None so set to default font face & size=12
self.__dict__[name] = Font(face='times')
self._setPyartFont(self.__dict__[name])
else:
self.__dict__[name] = value
## Private methods ##
def _protectArtState(self, bool):
if bool:
self._pycan.gsave()
return bool
def _restoreArtState(self, bool):
if bool:
self._pycan.grestore()
def _setPyartFont(self, fontInstance):
# accounts for "None" option
# does not act on self.defaultFont at all
fontsize = fontInstance.size
self._pycan.gstate.font_size = fontsize
# map pid name for font to Pyart name
pyartname = Fontmapping.getPyartName(fontInstance)
self._pycan.gstate.setfont(pyartname)
# # # # #
### public PID Canvas methods ##
def clear(self):
pass
def flush(self):
pass
def save(self, file=None, format=None):
# fileobj = getFileObject(file)
if not file:
file = self.filename
if isinstance(file, StringType):
self._pycan.save(file)
else:
raise NotImplementedError
def _findExternalFontName(self, font): #copied from piddlePDF by cwl- hack away!
"""Attempts to return proper font name.
PDF uses a standard 14 fonts referred to
by name. Default to self.defaultFont('Helvetica').
The dictionary allows a layer of indirection to
support a standard set of PIDDLE font names."""
piddle_font_map = {
'Times':'Times',
'times':'Times',
'Courier':'Courier',
'courier':'Courier',
'helvetica':'Helvetica',
'Helvetica':'Helvetica',
'symbol':'Symbol',
'Symbol':'Symbol',
'monospaced':'Courier',
'serif':'Times',
'sansserif':'Helvetica',
'ZapfDingbats':'ZapfDingbats',
'zapfdingbats':'ZapfDingbats',
'arial':'Helvetica'
}
try:
face = piddle_font_map[string.lower(font.face)]
except:
return 'Helvetica'
name = face + '-'
if font.bold and face in ['Courier','Helvetica','Times']:
name = name + 'Bold'
if font.italic and face in ['Courier', 'Helvetica']:
name = name + 'Oblique'
elif font.italic and face == 'Times':
name = name + 'Italic'
if name == 'Times-':
name = name + 'Roman'
# symbol and ZapfDingbats cannot be modified!
#trim and return
if name[-1] == '-':
name = name[0:-1]
return name
def stringWidth(self, s, font=None):
if not font:
font = self.defaultFont
fontname = Fontmapping.getPdfName(font)
return pdfmetrics.stringwidth(s, fontname) * font.size * 0.001
def fontAscent(self, font=None):
if not font:
font = self.defaultFont
fontname = Fontmapping.getPdfName(font)
return pdfmetrics.ascent_descent[fontname][0] * 0.001 * font.size
def fontDescent(self, font=None):
if not font:
font = self.defaultFont
fontname = Fontmapping.getPdfName(font)
return -pdfmetrics.ascent_descent[fontname][1] * 0.001 * font.size
def drawLine(self, x1, y1, x2, y2, color=None, width=None):
## standard code ##
color = color or self.defaultLineColor
width = width or self.defaultLineWidth
if color != transparent:
changed = self._protectArtState( (color != self.defaultLineColor) or
(width != self.defaultLineWidth) )
if color != self.defaultLineColor:
self._pycan.gstate.stroke = color.toHexRGB()
# print("color is %s <-> %s" % (color, color.toHexStr()))
if width != self.defaultLineWidth:
self._pycan.gstate.stroke_width = width
###################
# actual drawing
p = pyart.VectorPath(3)
p.moveto_open(x1,y1)
p.lineto(x2,y2)
self._pycan.stroke(p)
## standard code ##
if changed:
self._pycan.grestore()
###################
# def drawLines(self, lineList, color=None, width=None):
# pass
def drawString(self, s, x, y, font=None, color=None, angle=0):
# start w/ the basics
self._pycan.drawString(x,y, s)
def drawPolygon(self, pointlist,
edgeColor=None, edgeWidth=None, fillColor=None, closed=0):
eColor = edgeColor or self.defaultLineColor
fColor = fillColor or self.defaultFillColor
eWidth = edgeWidth or self.defaultLineWidth
changed = self._protectArtState( (eColor != self.defaultLineColor) or
(eWidth != self.defaultLineWidth) or
(fColor != self.defaultFillColor) )
if eColor != self.defaultLineColor:
self._pycan.gstate.stroke = eColor.toHexRGB()
if fColor != self.defaultFillColor:
self._pycan.gstate.fill = fColor.toHexRGB()
if eWidth != self.defaultLineWidth:
self._pycan.gstate.stroke_width = eWidth
path = pyart.VectorPath(len(pointlist)+1)
if closed:
path.moveto_closed(pointlist[0][0], pointlist[0][1])
else:
path.moveto_open(pointlist[0][0], pointlist[0][1])
for pt in pointlist[1:]:
path.lineto(pt[0],pt[1])
if closed:
path.close()
if fColor != transparent and closed:
self._pycan.fill(path)
if eColor != transparent:
self._pycan.stroke(path)
self._restoreArtState(changed)
#def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4,
# edgeColor=None, edgeWidth=None, fillColor=None, closed=0):
# pass
# def drawRoundRect(self, x1,y1, x2,y2, rx=8, ry=8,
# edgeColor=None, edgeWidth=None, fillColor=None):
# pass
# def drawEllipse(self, x1,y1, x2,y2, edgeColor=None, edgeWidth=None,
# fillColor=None):
# pass
# def drawArc(self, x1,y1, x2,y2, startAng=0, extent=360, edgeColor=None,
# edgeWidth=None, fillColor=None):
# pass
# def drawFigure(self, partList,
# edgeColor=None, edgeWidth=None, fillColor=None, closed=0):
# pass
# def drawImage(self, image, x1, y1, x2=None,y2=None):
# pass
## basic tests ##
if __name__=='__main__':
import rdkit.sping.tests.pidtest
can = PyartCanvas(size=(300,300), name='basictest.png')
#can.defaultLineColor = Color(0.7, 0.7, 1.0)
#can.drawLine(10,10, 290,290)
#can.drawLine(10,10, 50, 10, color=green, width = 4.5)
rdkit.sping.tests.pidtest.drawBasics(can)
can.save(file='basicTest.png')
print('saving basicTest.png')
can = PyartCanvas(size=(400,400), name='test-strings.png')
rdkit.sping.tests.pidtest.drawStrings(can)
can.save()
|
soerendip42/rdkit
|
rdkit/sping/Pyart/pidPyart.py
|
Python
|
bsd-3-clause
| 9,314
|
from typing import (
Dict,
Optional,
)
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.core.util.numba_ import (
NUMBA_FUNC_CACHE,
get_jit_arguments,
)
def generate_online_numba_ewma_func(engine_kwargs: Optional[Dict[str, bool]]):
"""
Generate a numba jitted groupby ewma function specified by values
from engine_kwargs.
Parameters
----------
engine_kwargs : dict
dictionary of arguments to be passed into numba.jit
Returns
-------
Numba function
"""
nopython, nogil, parallel = get_jit_arguments(engine_kwargs)
cache_key = (lambda x: x, "online_ewma")
if cache_key in NUMBA_FUNC_CACHE:
return NUMBA_FUNC_CACHE[cache_key]
numba = import_optional_dependency("numba")
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def online_ewma(
values: np.ndarray,
deltas: np.ndarray,
minimum_periods: int,
old_wt_factor: float,
new_wt: float,
old_wt: np.ndarray,
adjust: bool,
ignore_na: bool,
):
"""
Compute online exponentially weighted mean per column over 2D values.
Takes the first observation as is, then computes the subsequent
exponentially weighted mean accounting minimum periods.
"""
result = np.empty(values.shape)
weighted_avg = values[0]
nobs = (~np.isnan(weighted_avg)).astype(np.int64)
result[0] = np.where(nobs >= minimum_periods, weighted_avg, np.nan)
for i in range(1, len(values)):
cur = values[i]
is_observations = ~np.isnan(cur)
nobs += is_observations.astype(np.int64)
for j in numba.prange(len(cur)):
if not np.isnan(weighted_avg[j]):
if is_observations[j] or not ignore_na:
# note that len(deltas) = len(vals) - 1 and deltas[i] is to be
# used in conjunction with vals[i+1]
old_wt[j] *= old_wt_factor ** deltas[j - 1]
if is_observations[j]:
# avoid numerical errors on constant series
if weighted_avg[j] != cur[j]:
weighted_avg[j] = (
(old_wt[j] * weighted_avg[j]) + (new_wt * cur[j])
) / (old_wt[j] + new_wt)
if adjust:
old_wt[j] += new_wt
else:
old_wt[j] = 1.0
elif is_observations[j]:
weighted_avg[j] = cur[j]
result[i] = np.where(nobs >= minimum_periods, weighted_avg, np.nan)
return result, old_wt
return online_ewma
class EWMMeanState:
def __init__(self, com, adjust, ignore_na, axis, shape):
alpha = 1.0 / (1.0 + com)
self.axis = axis
self.shape = shape
self.adjust = adjust
self.ignore_na = ignore_na
self.new_wt = 1.0 if adjust else alpha
self.old_wt_factor = 1.0 - alpha
self.old_wt = np.ones(self.shape[self.axis - 1])
self.last_ewm = None
def run_ewm(self, weighted_avg, deltas, min_periods, ewm_func):
result, old_wt = ewm_func(
weighted_avg,
deltas,
min_periods,
self.old_wt_factor,
self.new_wt,
self.old_wt,
self.adjust,
self.ignore_na,
)
self.old_wt = old_wt
self.last_ewm = result[-1]
return result
def reset(self):
self.old_wt = np.ones(self.shape[self.axis - 1])
self.last_ewm = None
|
rs2/pandas
|
pandas/core/window/online.py
|
Python
|
bsd-3-clause
| 3,789
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests/spf.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from king_phisher import testing
from king_phisher import spf
class SPFTests(testing.KingPhisherTestCase):
@testing.skip_if_offline
def test_spf_check_host(self):
s = spf.SenderPolicyFramework('1.2.3.4', 'king-phisher.com')
check_host_result = s.check_host()
self.assertIsNotNone(check_host_result)
self.assertEqual(check_host_result, 'fail')
self.assertEqual(spf.check_host('1.2.3.4', 'king-phisher.com'), 'fail')
@testing.skip_if_offline
def test_spf_evaluate_mechanism(self):
s = spf.SenderPolicyFramework('1.2.3.4', 'doesnotexist.king-phisher.com')
eval_mech = lambda m, r: s._evaluate_mechanism(s.ip_address, s.domain, s.sender, m, r)
self.assertTrue(eval_mech('all', None))
self.assertTrue(eval_mech('exists', '%{d2}'))
self.assertTrue(eval_mech('ip4', '1.2.3.0/24'))
self.assertTrue(eval_mech('ip4', '1.2.3.4'))
self.assertFalse(eval_mech('ip4', '1.1.1.0/24'))
def test_spf_evaluate_mechanism_permerror(self):
s = spf.SenderPolicyFramework('1.2.3.4', 'doesnotexist.king-phisher.com')
eval_mech = lambda m, r: s._evaluate_mechanism(s.ip_address, s.domain, s.sender, m, r)
with self.assertRaises(spf.SPFPermError):
eval_mech('ip4', 'thisisnotanetwork')
with self.assertRaises(spf.SPFPermError):
eval_mech('ip6', 'thisisnotanetwork')
with self.assertRaises(spf.SPFPermError):
eval_mech('fake', None)
def test_spf_evaluate_mechanism_temperror(self):
s = spf.SenderPolicyFramework('1.2.3.4', 'doesnotexist.king-phisher.com')
eval_mech = lambda m, r: s._evaluate_mechanism(s.ip_address, s.domain, s.sender, m, r)
with self.assertRaises(spf.SPFTempError):
eval_mech('a', None)
with self.assertRaises(spf.SPFTempError):
eval_mech('exists', None)
with self.assertRaises(spf.SPFTempError):
eval_mech('mx', None)
def test_spf_nonexistent_domain(self):
s = spf.SenderPolicyFramework('1.2.3.4', 'doesnotexist.king-phisher.com')
self.assertIsNone(s.check_host())
self.assertIsNone(spf.check_host('1.2.3.4', 'doesnotexist.king-phisher.com'))
def test_spf_rfc7208_macro_expansion(self):
spf_records = [('all', '-', None)]
s = spf.SenderPolicyFramework('192.0.2.3', 'email.example.com', 'strong-bad@email.example.com', spf_records=spf_records)
expand_macro = lambda m: s.expand_macros(m, '192.0.2.3', 'email.example.com', 'strong-bad@email.example.com')
self.assertEqual(expand_macro('%{s}'), 'strong-bad@email.example.com')
self.assertEqual(expand_macro('%{o}'), 'email.example.com')
self.assertEqual(expand_macro('%{d}'), 'email.example.com')
self.assertEqual(expand_macro('%{d4}'), 'email.example.com')
self.assertEqual(expand_macro('%{d3}'), 'email.example.com')
self.assertEqual(expand_macro('%{d2}'), 'example.com')
self.assertEqual(expand_macro('%{d1}'), 'com')
self.assertEqual(expand_macro('%{dr}'), 'com.example.email')
self.assertEqual(expand_macro('%{d2r}'), 'example.email')
self.assertEqual(expand_macro('%{l}'), 'strong-bad')
self.assertEqual(expand_macro('%{l-}'), 'strong.bad')
self.assertEqual(expand_macro('%{lr}'), 'strong-bad')
self.assertEqual(expand_macro('%{lr-}'), 'bad.strong')
self.assertEqual(expand_macro('%{l1r-}'), 'strong')
self.assertEqual(expand_macro('%{ir}.%{v}._spf.%{d2}'), '3.2.0.192.in-addr._spf.example.com')
self.assertEqual(expand_macro('%{lr-}.lp._spf.%{d2}'), 'bad.strong.lp._spf.example.com')
self.assertEqual(expand_macro('%{lr-}.lp.%{ir}.%{v}._spf.%{d2}'), 'bad.strong.lp.3.2.0.192.in-addr._spf.example.com')
self.assertEqual(expand_macro('%{ir}.%{v}.%{l1r-}.lp._spf.%{d2}'), '3.2.0.192.in-addr.strong.lp._spf.example.com')
self.assertEqual(expand_macro('%{d2}.trusted-domains.example.net'), 'example.com.trusted-domains.example.net')
def test_spf_record_unparse(self):
self.assertEqual(spf.record_unparse(('all', '+', None)), 'all')
self.assertEqual(spf.record_unparse(('all', '-', None)), '-all')
self.assertEqual(spf.record_unparse(('include', '+', '_spf.wonderland.com')), 'include:_spf.wonderland.com')
self.assertEqual(spf.record_unparse(('ip4', '+', '10.0.0.0/24')), 'ip4:10.0.0.0/24')
if __name__ == '__main__':
unittest.main()
|
zigitax/king-phisher
|
tests/spf.py
|
Python
|
bsd-3-clause
| 5,698
|
# In this example we will show the difference between a 2-d Sobol sequence
# and sampling uniformly at random in 2 dimensions.
# The Sobol sequence has far lower discrepancy, i.e., the generated samples
# are spread out better in the sampling space.
#
# This example requires matplotlib to generate figures.
import matplotlib.pyplot as plt
import optunity
import random
num_pts = 200 # the number of points to generate
skip = 5000 # the number of initial points of the Sobol sequence to skip
# generate Sobol sequence
res = optunity.solvers.Sobol.i4_sobol_generate(2, num_pts, skip)
x1_sobol, x2_sobol = zip(*res)
# generate uniform points
x1_random = [random.random() for _ in range(num_pts)]
x2_random = [random.random() for _ in range(num_pts)]
# plot results
plt.figure(1)
plt.plot(x1_sobol, x2_sobol, 'o')
plt.title('Sobol sequence')
plt.draw()
plt.figure(2)
plt.plot(x1_random, x2_random, 'ro')
plt.title('Uniform random samples')
plt.show()
|
chrinide/optunity
|
bin/examples/python/sobol_vs_uniform.py
|
Python
|
bsd-3-clause
| 960
|
"""
Based on http://vaig.be/2009/03/getting-client-os-in-django.html
"""
import re
def client_os(user_agent):
'''
Context processor for Django that provides operating system
information base on HTTP user agent.
A user agent looks like (line break added):
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) \
Gecko/2009020409 Iceweasel/3.0.6 (Debian-3.0.6-1)"
'''
# Mozilla/5.0
regex = '(?P<application_name>\w+)/(?P<application_version>[\d\.]+)'
regex += ' \('
# X11
regex += '(?P<compatibility_flag>\w+)'
regex += '; '
# U
if "U;" in user_agent or "MSIE" in user_agent: # some UA strings leave out the U;
regex += '(?P<version_token>[\w .]+)'
regex += '; '
# Linux i686
regex += '(?P<platform_token>[\w ._]+)'
# anything else
regex += '; .*'
result = re.match(regex, user_agent)
if result:
result_dict = result.groupdict()
full_platform = result_dict['platform_token']
platform_values = full_platform.split(' ')
if platform_values[0] in ('Windows', 'Linux', 'Mac'):
platform = platform_values[0]
elif platform_values[1] in ('Mac',):
# Mac is given as "PPC Mac" or "Intel Mac"
platform = platform_values[1]
else:
platform = None
else:
# Total hack to avoid dealing with regex nightmares
if 'mac' in user_agent.lower():
full_platform = "Intel Mac 10.6"
platform = 'Mac'
elif 'windows' in user_agent.lower():
full_platform = "Windows"
platform = 'Windows'
else:
full_platform = None
platform = None
return {
'full_platform': full_platform,
'platform': platform,
}
|
Alwnikrotikz/marinemap
|
lingcod/common/uaparser/clientos.py
|
Python
|
bsd-3-clause
| 1,804
|
import os
import django
TEST_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'tests')
COMPRESS_CACHE_BACKEND = 'locmem://'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = [
'compressor',
'coffin',
'jingo',
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(TEST_DIR, 'static')
TEMPLATE_DIRS = (
# Specifically choose a name that will not be considered
# by app_directories loader, to make sure each test uses
# a specific template without considering the others.
os.path.join(TEST_DIR, 'test_templates'),
)
if django.VERSION[:2] < (1, 6):
TEST_RUNNER = 'discover_runner.DiscoverRunner'
SECRET_KEY = "iufoj=mibkpdz*%bob952x(%49rqgv8gg45k36kjcg76&-y5=!"
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
)
|
ramcn/demo3
|
venv/lib/python3.4/site-packages/compressor/test_settings.py
|
Python
|
mit
| 880
|
from stormed.util import WithFields
class Declare(WithFields):
_name = "queue.declare"
_class_id = 50
_method_id = 10
_sync = True
_content = False
_fields = [
('ticket' , 'short'),
('queue' , 'shortstr'),
('passive' , 'bit'),
('durable' , 'bit'),
('exclusive' , 'bit'),
('auto_delete' , 'bit'),
('nowait' , 'bit'),
('arguments' , 'table'),
]
class DeclareOk(WithFields):
_name = "queue.declare-ok"
_class_id = 50
_method_id = 11
_sync = False
_content = False
_fields = [
('queue' , 'shortstr'),
('message_count' , 'long'),
('consumer_count' , 'long'),
]
class Bind(WithFields):
_name = "queue.bind"
_class_id = 50
_method_id = 20
_sync = True
_content = False
_fields = [
('ticket' , 'short'),
('queue' , 'shortstr'),
('exchange' , 'shortstr'),
('routing_key' , 'shortstr'),
('nowait' , 'bit'),
('arguments' , 'table'),
]
class BindOk(WithFields):
_name = "queue.bind-ok"
_class_id = 50
_method_id = 21
_sync = False
_content = False
_fields = [
]
class Purge(WithFields):
_name = "queue.purge"
_class_id = 50
_method_id = 30
_sync = True
_content = False
_fields = [
('ticket' , 'short'),
('queue' , 'shortstr'),
('nowait' , 'bit'),
]
class PurgeOk(WithFields):
_name = "queue.purge-ok"
_class_id = 50
_method_id = 31
_sync = False
_content = False
_fields = [
('message_count' , 'long'),
]
class Delete(WithFields):
_name = "queue.delete"
_class_id = 50
_method_id = 40
_sync = True
_content = False
_fields = [
('ticket' , 'short'),
('queue' , 'shortstr'),
('if_unused' , 'bit'),
('if_empty' , 'bit'),
('nowait' , 'bit'),
]
class DeleteOk(WithFields):
_name = "queue.delete-ok"
_class_id = 50
_method_id = 41
_sync = False
_content = False
_fields = [
('message_count' , 'long'),
]
class Unbind(WithFields):
_name = "queue.unbind"
_class_id = 50
_method_id = 50
_sync = True
_content = False
_fields = [
('ticket' , 'short'),
('queue' , 'shortstr'),
('exchange' , 'shortstr'),
('routing_key' , 'shortstr'),
('arguments' , 'table'),
]
class UnbindOk(WithFields):
_name = "queue.unbind-ok"
_class_id = 50
_method_id = 51
_sync = False
_content = False
_fields = [
]
id2method = {
10: Declare,
11: DeclareOk,
20: Bind,
21: BindOk,
30: Purge,
31: PurgeOk,
40: Delete,
41: DeleteOk,
50: Unbind,
51: UnbindOk,
}
|
bufferx/stormed-amqp
|
stormed/method/codegen/queue.py
|
Python
|
mit
| 3,261
|
"""
Support for the definition of zones.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zone/
"""
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_HIDDEN, ATTR_LATITUDE, ATTR_LONGITUDE, CONF_NAME, CONF_LATITUDE,
CONF_LONGITUDE, CONF_ICON)
from homeassistant.helpers import config_per_platform
from homeassistant.helpers.entity import Entity, generate_entity_id
from homeassistant.util.location import distance
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_PASSIVE = 'passive'
ATTR_RADIUS = 'radius'
CONF_PASSIVE = 'passive'
CONF_RADIUS = 'radius'
DEFAULT_NAME = 'Unnamed zone'
DEFAULT_PASSIVE = False
DEFAULT_RADIUS = 100
DOMAIN = 'zone'
ENTITY_ID_FORMAT = 'zone.{}'
ENTITY_ID_HOME = ENTITY_ID_FORMAT.format('home')
ICON_HOME = 'mdi:home'
ICON_IMPORT = 'mdi:import'
STATE = 'zoning'
# The config that zone accepts is the same as if it has platforms.
PLATFORM_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_LATITUDE): cv.latitude,
vol.Required(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS): vol.Coerce(float),
vol.Optional(CONF_PASSIVE, default=DEFAULT_PASSIVE): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
})
def active_zone(hass, latitude, longitude, radius=0):
"""Find the active zone for given latitude, longitude."""
# Sort entity IDs so that we are deterministic if equal distance to 2 zones
zones = (hass.states.get(entity_id) for entity_id
in sorted(hass.states.entity_ids(DOMAIN)))
min_dist = None
closest = None
for zone in zones:
if zone.attributes.get(ATTR_PASSIVE):
continue
zone_dist = distance(
latitude, longitude,
zone.attributes[ATTR_LATITUDE], zone.attributes[ATTR_LONGITUDE])
within_zone = zone_dist - radius < zone.attributes[ATTR_RADIUS]
closer_zone = closest is None or zone_dist < min_dist
smaller_zone = (zone_dist == min_dist and
zone.attributes[ATTR_RADIUS] <
closest.attributes[ATTR_RADIUS])
if within_zone and (closer_zone or smaller_zone):
min_dist = zone_dist
closest = zone
return closest
def in_zone(zone, latitude, longitude, radius=0):
"""Test if given latitude, longitude is in given zone."""
zone_dist = distance(
latitude, longitude,
zone.attributes[ATTR_LATITUDE], zone.attributes[ATTR_LONGITUDE])
return zone_dist - radius < zone.attributes[ATTR_RADIUS]
def setup(hass, config):
"""Setup zone."""
entities = set()
for _, entry in config_per_platform(config, DOMAIN):
name = entry.get(CONF_NAME)
zone = Zone(hass, name, entry[CONF_LATITUDE], entry[CONF_LONGITUDE],
entry.get(CONF_RADIUS), entry.get(CONF_ICON),
entry.get(CONF_PASSIVE))
zone.entity_id = generate_entity_id(ENTITY_ID_FORMAT, name, entities)
zone.update_ha_state()
entities.add(zone.entity_id)
if ENTITY_ID_HOME not in entities:
zone = Zone(hass, hass.config.location_name,
hass.config.latitude, hass.config.longitude,
DEFAULT_RADIUS, ICON_HOME, False)
zone.entity_id = ENTITY_ID_HOME
zone.update_ha_state()
return True
class Zone(Entity):
"""Representation of a Zone."""
# pylint: disable=too-many-arguments, too-many-instance-attributes
def __init__(self, hass, name, latitude, longitude, radius, icon, passive):
"""Initialize the zone."""
self.hass = hass
self._name = name
self._latitude = latitude
self._longitude = longitude
self._radius = radius
self._icon = icon
self._passive = passive
@property
def name(self):
"""Return the name of the zone."""
return self._name
@property
def state(self):
"""Return the state property really does nothing for a zone."""
return STATE
@property
def icon(self):
"""Return the icon if any."""
return self._icon
@property
def state_attributes(self):
"""Return the state attributes of the zone."""
data = {
ATTR_HIDDEN: True,
ATTR_LATITUDE: self._latitude,
ATTR_LONGITUDE: self._longitude,
ATTR_RADIUS: self._radius,
}
if self._passive:
data[ATTR_PASSIVE] = self._passive
return data
|
Smart-Torvy/torvy-home-assistant
|
homeassistant/components/zone.py
|
Python
|
mit
| 4,669
|
import unittest
import threading
from electrum import constants
# Set this locally to make the test suite run faster.
# If set, unit tests that would normally test functions with multiple implementations,
# will only be run once, using the fastest implementation.
# e.g. libsecp256k1 vs python-ecdsa. pycryptodomex vs pyaes.
FAST_TESTS = False
# some unit tests are modifying globals; sorry.
class SequentialTestCase(unittest.TestCase):
test_lock = threading.Lock()
def setUp(self):
super().setUp()
self.test_lock.acquire()
def tearDown(self):
super().tearDown()
self.test_lock.release()
class TestCaseForTestnet(SequentialTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
constants.set_testnet()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
constants.set_mainnet()
|
cryptapus/electrum
|
electrum/tests/__init__.py
|
Python
|
mit
| 902
|
# -*- coding: UTF-8 -*-
__revision__ = '$Id$'
# Copyright (c) 2005-2007 Vasco Nunes
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# You may use and distribute this software under the terms of the
# GNU General Public License, version 2 or later
import csv
import gtk
import os
import gutils
import db
from plugins.export import Base
class ExportPlugin(Base):
name = "CSV"
description = _("Full CSV list export plugin")
author = "Vasco Nunes"
email = "<vasco.m.nunes@gmail.com>"
version = "0.3"
fields_to_export = ('number', 'o_title', 'title', 'director', 'year', 'classification', 'country',
'genre', 'rating', 'runtime', 'studio', 'seen', 'loaned', 'o_site', 'site', 'trailer',
'plot', 'cast', 'notes', 'image', 'volumes.name', 'collections.name', 'media.name',
'screenplay', 'cameraman', 'barcode', 'color', 'cond', 'layers', 'region',
'media_num', 'vcodecs.name')
def run(self):
basedir = None
if self.config is not None:
basedir = self.config.get('export_dir', None, section='export-csv')
if not basedir:
filename = gutils.file_chooser(_("Export a %s document")%"CSV", action=gtk.FILE_CHOOSER_ACTION_SAVE, \
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK), name='griffith_list.csv')
else:
filename = gutils.file_chooser(_("Export a %s document")%"CSV", action=gtk.FILE_CHOOSER_ACTION_SAVE, \
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE,gtk.RESPONSE_OK), name='griffith_list.csv', folder=basedir)
if filename and filename[0]:
if self.config is not None and filename[1]:
self.config.set('export_dir', filename[1], section='export-csv')
self.config.save()
overwrite = None
if os.path.isfile(filename[0]):
if gutils.question(_("File exists. Do you want to overwrite it?"), self.parent_window):
overwrite = True
else:
overwrite = False
if overwrite or overwrite is None:
movies = self.get_query().execute()
writer = csv.writer(file(filename[0], 'w'), dialect=csv.excel)
# write column header row
writer.writerow(self.fields_to_export)
# write data rows
for movie in movies:
t = []
for s in self.exported_columns:
t.append(movie[s])
writer.writerow(t)
gutils.info(_("%s file has been created.") % "CSV", self.parent_window)
|
FiloSottile/Griffith-mirror
|
lib/plugins/export/PluginExportCSV.py
|
Python
|
gpl-2.0
| 3,429
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact sales@nmap.com). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the terms and conditions of this license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact fyodor@nmap.org with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email sales@nmap.com for further *
# * information. *
# * *
# * If you have received a written license agreement or contract for *
# * Covered Software stating terms other than these, you may choose to use *
# * and redistribute Covered Software under those terms instead of these. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the dev@nmap.org mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
import gobject
import gtk
import pango
import os
import os.path
import sys
import xml.sax
from zenmapGUI.higwidgets.higdialogs import HIGAlertDialog, HIGDialog
from zenmapGUI.higwidgets.higboxes import HIGVBox, HIGHBox, \
hig_box_space_holder
from zenmapGUI.higwidgets.higlabels import HIGSectionLabel
from zenmapGUI.higwidgets.higtables import HIGTable
from zenmapGUI.higwidgets.higbuttons import HIGButton
from zenmapCore.NmapParser import NmapParser
from zenmapCore.UmitLogging import log
import zenmapCore.I18N
import zenmapCore.Diff
from zenmapGUI.FileChoosers import ResultsFileSingleChooserDialog
# In milliseconds.
NDIFF_CHECK_TIMEOUT = 200
class ScanChooser(HIGVBox):
"""This class allows the selection of scan results from the list of open
tabs or from a file. It emits the "changed" signal when the scan selection
has changed."""
__gsignals__ = {
"changed": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ())
}
def __init__(self, scans, title):
self.__gobject_init__()
self.title = title
self.scan_dict = {}
# Setting HIGVBox
self.set_border_width(5)
self.set_spacing(6)
self._create_widgets()
self._pack_hbox()
self._attaching_widgets()
self._set_scrolled()
self._set_text_view()
self._set_open_button()
for scan in scans:
self.add_scan(scan.scan_name or scan.get_nmap_command(), scan)
self.combo_scan.connect('changed', self.show_scan)
self.combo_scan.connect('changed', lambda x: self.emit('changed'))
self._pack_noexpand_nofill(self.lbl_scan)
self._pack_expand_fill(self.hbox)
def _create_widgets(self):
self.lbl_scan = HIGSectionLabel(self.title)
self.hbox = HIGHBox()
self.table = HIGTable()
self.list_scan = gtk.ListStore(str)
self.combo_scan = gtk.ComboBoxEntry(self.list_scan, 0)
self.btn_open_scan = gtk.Button(stock=gtk.STOCK_OPEN)
self.exp_scan = gtk.Expander(_("Scan Output"))
self.scrolled = gtk.ScrolledWindow()
self.txt_scan_result = gtk.TextView()
self.txg_tag = gtk.TextTag("scan_style")
def get_buffer(self):
return self.txt_scan_result.get_buffer()
def show_scan(self, widget):
nmap_output = self.get_nmap_output()
if nmap_output is not None:
self.txt_scan_result.get_buffer().set_text(nmap_output)
def normalize_output(self, output):
return "\n".join(output.split("\\n"))
def _pack_hbox(self):
self.hbox._pack_noexpand_nofill(hig_box_space_holder())
self.hbox._pack_expand_fill(self.table)
def _attaching_widgets(self):
self.table.attach(self.combo_scan, 0, 1, 0, 1, yoptions=0)
self.table.attach(
self.btn_open_scan, 1, 2, 0, 1, yoptions=0, xoptions=0)
self.table.attach(self.exp_scan, 0, 2, 1, 2)
def _set_scrolled(self):
self.scrolled.set_border_width(5)
self.scrolled.set_size_request(-1, 130)
# Packing scrolled window into expander
self.exp_scan.add(self.scrolled)
# Packing text view into scrolled window
self.scrolled.add_with_viewport(self.txt_scan_result)
# Setting scrolled window
self.scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
def _set_text_view(self):
self.txg_table = self.txt_scan_result.get_buffer().get_tag_table()
self.txg_table.add(self.txg_tag)
self.txg_tag.set_property("family", "Monospace")
self.txt_scan_result.set_wrap_mode(gtk.WRAP_WORD)
self.txt_scan_result.set_editable(False)
self.txt_scan_result.get_buffer().connect(
"changed", self._text_changed_cb)
def _set_open_button(self):
self.btn_open_scan.connect('clicked', self.open_file)
def open_file(self, widget):
file_chooser = ResultsFileSingleChooserDialog(_("Select Scan Result"))
response = file_chooser.run()
file_chosen = file_chooser.get_filename()
file_chooser.destroy()
if response == gtk.RESPONSE_OK:
try:
parser = NmapParser()
parser.parse_file(file_chosen)
except xml.sax.SAXParseException, e:
alert = HIGAlertDialog(
message_format='<b>%s</b>' % _('Error parsing file'),
secondary_text=_(
"The file is not an Nmap XML output file. "
"The parsing error that occurred was\n%s") % str(e))
alert.run()
alert.destroy()
return False
except Exception, e:
alert = HIGAlertDialog(
message_format='<b>%s</b>' % _(
'Cannot open selected file'),
secondary_text=_("""\
This error occurred while trying to open the file:
%s""") % str(e))
alert.run()
alert.destroy()
return False
scan_name = os.path.split(file_chosen)[-1]
self.add_scan(scan_name, parser)
self.combo_scan.set_active(len(self.list_scan) - 1)
def add_scan(self, scan_name, parser):
scan_id = 1
new_scan_name = scan_name
while new_scan_name in self.scan_dict.keys():
new_scan_name = "%s (%s)" % (scan_name, scan_id)
scan_id += 1
self.list_scan.append([new_scan_name])
self.scan_dict[new_scan_name] = parser
def _text_changed_cb(self, widget):
buff = self.txt_scan_result.get_buffer()
buff.apply_tag(
self.txg_tag, buff.get_start_iter(), buff.get_end_iter())
def get_parsed_scan(self):
"""Return the currently selected scan's parsed output as an NmapParser
object, or None if no valid scan is selected."""
selected_scan = self.combo_scan.child.get_text()
return self.scan_dict.get(selected_scan)
def get_nmap_output(self):
"""Return the currently selected scan's output as a string, or None if
no valid scan is selected."""
return self.parsed_scan
nmap_output = property(get_nmap_output)
parsed_scan = property(get_parsed_scan)
class DiffWindow(gtk.Window):
def __init__(self, scans):
gtk.Window.__init__(self)
self.set_title(_("Compare Results"))
self.ndiff_process = None
# We allow the user to start a new diff before the old one has
# finished. We have to keep references to old processes until they
# finish to avoid problems when tearing down the Python interpreter at
# program exit.
self.old_processes = []
self.timer_id = None
self.main_vbox = HIGVBox()
self.diff_view = DiffView()
self.diff_view.set_size_request(-1, 100)
self.hbox_buttons = HIGHBox()
self.progress = gtk.ProgressBar()
self.btn_close = HIGButton(stock=gtk.STOCK_CLOSE)
self.hbox_selection = HIGHBox()
self.scan_chooser_a = ScanChooser(scans, _(u"A Scan"))
self.scan_chooser_b = ScanChooser(scans, _(u"B Scan"))
self._pack_widgets()
self._connect_widgets()
self.set_default_size(-1, 500)
# Initial Size Request
self.initial_size = self.get_size()
def _pack_widgets(self):
self.main_vbox.set_border_width(6)
self.hbox_selection.pack_start(self.scan_chooser_a, True, True)
self.hbox_selection.pack_start(self.scan_chooser_b, True, True)
self.main_vbox.pack_start(self.hbox_selection, False)
scroll = gtk.ScrolledWindow()
scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
scroll.add(self.diff_view)
self.main_vbox.pack_start(scroll, True, True)
self.progress.hide()
self.progress.set_no_show_all(True)
self.hbox_buttons.pack_start(self.progress, False)
self.hbox_buttons.pack_end(self.btn_close, False)
self.main_vbox._pack_noexpand_nofill(self.hbox_buttons)
self.add(self.main_vbox)
def _connect_widgets(self):
self.connect("delete-event", self.close)
self.btn_close.connect("clicked", self.close)
self.scan_chooser_a.connect('changed', self.refresh_diff)
self.scan_chooser_b.connect('changed', self.refresh_diff)
def refresh_diff(self, widget):
"""This method is called whenever the diff output might have changed,
such as when a different scan was selected in one of the choosers."""
log.debug("Refresh diff.")
if (self.ndiff_process is not None and
self.ndiff_process.poll() is None):
# Put this in the list of old processes we keep track of.
self.old_processes.append(self.ndiff_process)
self.ndiff_process = None
scan_a = self.scan_chooser_a.parsed_scan
scan_b = self.scan_chooser_b.parsed_scan
if scan_a is None or scan_b is None:
self.diff_view.clear()
else:
try:
self.ndiff_process = zenmapCore.Diff.ndiff(scan_a, scan_b)
except OSError, e:
alert = HIGAlertDialog(
message_format=_("Error running ndiff"),
secondary_text=_(
"There was an error running the ndiff program.\n\n"
) + str(e).decode(sys.getdefaultencoding(), "replace"))
alert.run()
alert.destroy()
else:
self.progress.show()
if self.timer_id is None:
self.timer_id = gobject.timeout_add(
NDIFF_CHECK_TIMEOUT, self.check_ndiff_process)
def check_ndiff_process(self):
"""Check if the ndiff subprocess is done and show the diff if it is.
Also remove any finished processes from the old process list."""
# Check if any old background processes have finished.
for p in self.old_processes[:]:
if p.poll() is not None:
p.close()
self.old_processes.remove(p)
if self.ndiff_process is not None:
# We're running the most recent scan. Check if it's done.
status = self.ndiff_process.poll()
if status is None:
# Keep calling this function on a timer until the process
# finishes.
self.progress.pulse()
return True
if status == 0 or status == 1:
# Successful completion.
try:
diff = self.ndiff_process.get_scan_diff()
except zenmapCore.Diff.NdiffParseException, e:
alert = HIGAlertDialog(
message_format=_("Error parsing ndiff output"),
secondary_text=str(e))
alert.run()
alert.destroy()
else:
self.diff_view.show_diff(diff)
else:
# Unsuccessful completion.
error_text = _(
"The ndiff process terminated with status code %d."
) % status
stderr = self.ndiff_process.stderr.read()
if len(stderr) > 0:
error_text += "\n\n" + stderr
alert = HIGAlertDialog(
message_format=_("Error running ndiff"),
secondary_text=error_text)
alert.run()
alert.destroy()
self.progress.hide()
self.ndiff_process.close()
self.ndiff_process = None
if len(self.old_processes) > 0:
# Keep calling this callback.
return True
else:
# All done.
self.timer_id = None
return False
def close(self, widget=None, extra=None):
self.destroy()
class DiffView(gtk.TextView):
REMOVE_COLOR = "#ffaaaa"
ADD_COLOR = "#ccffcc"
"""A widget displaying a zenmapCore.Diff.ScanDiff."""
def __init__(self):
gtk.TextView.__init__(self)
self.set_editable(False)
buff = self.get_buffer()
# Create text markup tags.
buff.create_tag("=", font="Monospace")
buff.create_tag(
"-", font="Monospace", background=self.REMOVE_COLOR)
buff.create_tag("+", font="Monospace", background=self.ADD_COLOR)
def clear(self):
self.get_buffer().set_text(u"")
def show_diff(self, diff):
self.clear()
buff = self.get_buffer()
for line in diff.splitlines(True):
if line.startswith("-"):
tags = ["-"]
elif line.startswith("+"):
tags = ["+"]
else:
tags = ["="]
buff.insert_with_tags_by_name(buff.get_end_iter(), line, *tags)
if __name__ == "__main__":
from zenmapCore.NmapParser import NmapParser
parsed1 = NmapParser()
parsed2 = NmapParser()
parsed3 = NmapParser()
parsed4 = NmapParser()
parsed1.parse_file("test/xml_test1.xml")
parsed2.parse_file("test/xml_test2.xml")
parsed3.parse_file("test/xml_test3.xml")
parsed4.parse_file("test/xml_test4.xml")
dw = DiffWindow({"Parsed 1": parsed1,
"Parsed 2": parsed2,
"Parsed 3": parsed3,
"Parsed 4": parsed4})
dw.show_all()
dw.connect("delete-event", lambda x, y: gtk.main_quit())
gtk.main()
|
markofu/scripts
|
nmap/nmap/zenmap/zenmapGUI/DiffCompare.py
|
Python
|
gpl-2.0
| 22,982
|
# util/_collections.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Collection classes and helpers."""
from __future__ import absolute_import
import operator
import types
import weakref
from .compat import binary_types
from .compat import collections_abc
from .compat import itertools_filterfalse
from .compat import py2k
from .compat import string_types
from .compat import threading
EMPTY_SET = frozenset()
class AbstractKeyedTuple(tuple):
__slots__ = ()
def keys(self):
"""Return a list of string key names for this :class:`.KeyedTuple`.
.. seealso::
:attr:`.KeyedTuple._fields`
"""
return list(self._fields)
class KeyedTuple(AbstractKeyedTuple):
"""``tuple`` subclass that adds labeled names.
E.g.::
>>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"])
>>> k.one
1
>>> k.two
2
Result rows returned by :class:`_query.Query` that contain multiple
ORM entities and/or column expressions make use of this
class to return rows.
The :class:`.KeyedTuple` exhibits similar behavior to the
``collections.namedtuple()`` construct provided in the Python
standard library, however is architected very differently.
Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is
does not rely on creation of custom subtypes in order to represent
a new series of keys, instead each :class:`.KeyedTuple` instance
receives its list of keys in place. The subtype approach
of ``collections.namedtuple()`` introduces significant complexity
and performance overhead, which is not necessary for the
:class:`_query.Query` object's use case.
.. seealso::
:ref:`ormtutorial_querying`
"""
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
if labels:
t.__dict__.update(zip(labels, vals))
else:
labels = []
t.__dict__["_labels"] = labels
return t
@property
def _fields(self):
"""Return a tuple of string key names for this :class:`.KeyedTuple`.
This method provides compatibility with ``collections.namedtuple()``.
.. seealso::
:meth:`.KeyedTuple.keys`
"""
return tuple([l for l in self._labels if l is not None])
def __setattr__(self, key, value):
raise AttributeError("Can't set attribute: %s" % key)
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary.
This method provides compatibility with ``collections.namedtuple()``,
with the exception that the dictionary returned is **not** ordered.
"""
return {key: self.__dict__[key] for key in self.keys()}
class _LW(AbstractKeyedTuple):
__slots__ = ()
def __new__(cls, vals):
return tuple.__new__(cls, vals)
def __reduce__(self):
# for pickling, degrade down to the regular
# KeyedTuple, thus avoiding anonymous class pickling
# difficulties
return KeyedTuple, (list(self), self._real_fields)
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary."""
d = dict(zip(self._real_fields, self))
d.pop(None, None)
return d
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self),)
def union(self, d):
if not d:
return self
elif not self:
if isinstance(d, immutabledict):
return d
else:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
"""Provide a __getattr__/__setattr__ interface over a dict."""
__slots__ = ("_data",)
def __init__(self, data):
object.__setattr__(self, "_data", data)
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(list(self._data.values()))
def __dir__(self):
return dir(super(Properties, self)) + [
str(k) for k in self._data.keys()
]
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, obj):
self._data[key] = obj
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, obj):
self._data[key] = obj
def __getstate__(self):
return {"_data": self._data}
def __setstate__(self, state):
object.__setattr__(self, "_data", state["_data"])
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
"""Return an immutable proxy for this :class:`.Properties`."""
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return list(self._data)
def values(self):
return list(self._data.values())
def items(self):
return list(self._data.items())
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
__slots__ = ()
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
"""Provide immutable dict/object attribute to an underlying dictionary."""
__slots__ = ()
class OrderedDict(dict):
"""A dict that returns keys/values/items in the order they were added."""
__slots__ = ("_list",)
def __reduce__(self):
return OrderedDict, (self.items(),)
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, "keys"):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def keys(self):
return list(self)
def values(self):
return [self[key] for key in self._list]
def items(self):
return [(key, self[key]) for key in self._list]
if py2k:
def itervalues(self):
return iter(self.values())
def iterkeys(self):
return iter(self)
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, obj):
if key not in self:
try:
self._list.append(key)
except AttributeError:
# work around Python pickle loads() with
# dict subclass (seems to ignore __setstate__?)
self._list = [key]
dict.__setitem__(self, key, obj)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key, *default):
present = key in self
value = dict.pop(self, key, *default)
if present:
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
class OrderedSet(set):
def __init__(self, d=None):
set.__init__(self)
self._list = []
if d is not None:
self._list = unique_list(d)
set.update(self, self._list)
else:
self._list = []
def add(self, element):
if element not in self:
self._list.append(element)
set.add(self, element)
def remove(self, element):
set.remove(self, element)
self._list.remove(element)
def insert(self, pos, element):
if element not in self:
self._list.insert(pos, element)
set.add(self, element)
def discard(self, element):
if element in self:
self._list.remove(element)
set.remove(self, element)
def clear(self):
set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
for e in iterable:
if e not in self:
self._list.append(e)
set.add(self, e)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = set(other)
return self.__class__(a for a in self if a in other)
__and__ = intersection
def symmetric_difference(self, other):
other = set(other)
result = self.__class__(a for a in self if a not in other)
result.update(a for a in other if a not in self)
return result
__xor__ = symmetric_difference
def difference(self, other):
other = set(other)
return self.__class__(a for a in self if a not in other)
__sub__ = difference
def intersection_update(self, other):
other = set(other)
set.intersection_update(self, other)
self._list = [a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
set.symmetric_difference_update(self, other)
self._list = [a for a in self._list if a in self]
self._list += [a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
set.difference_update(self, other)
self._list = [a for a in self._list if a in self]
return self
__isub__ = difference_update
class IdentitySet(object):
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
def __init__(self, iterable=None):
self._members = dict()
if iterable:
self.update(iterable)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError("pop from an empty set")
def clear(self):
self._members.clear()
def __cmp__(self, other):
raise TypeError("cannot compare sets using cmp()")
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = self.__class__(iterable)
if len(self) > len(other):
return False
for m in itertools_filterfalse(
other._members.__contains__, iter(self._members.keys())
):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = self.__class__(iterable)
if len(self) < len(other):
return False
for m in itertools_filterfalse(
self._members.__contains__, iter(other._members.keys())
):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = self.__class__()
members = self._members
result._members.update(members)
result._members.update((id(obj), obj) for obj in iterable)
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members.update((id(obj), obj) for obj in iterable)
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = self.__class__()
members = self._members
other = {id(obj) for obj in iterable}
result._members.update(
((k, v) for k, v in members.items() if k not in other)
)
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = self.__class__()
members = self._members
other = {id(obj) for obj in iterable}
result._members.update(
(k, v) for k, v in members.items() if k in other
)
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = self.__class__()
members = self._members
other = {id(obj): obj for obj in iterable}
result._members.update(
((k, v) for k, v in members.items() if k not in other)
)
result._members.update(
((k, v) for k, v in other.items() if k not in members)
)
return result
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(iter(self._members.values()))
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return iter(self._members.values())
def __hash__(self):
raise TypeError("set objects are unhashable")
def __repr__(self):
return "%s(%r)" % (type(self).__name__, list(self._members.values()))
class WeakSequence(object):
def __init__(self, __elements=()):
# adapted from weakref.WeakKeyDictionary, prevent reference
# cycles in the collection itself
def _remove(item, selfref=weakref.ref(self)):
self = selfref()
if self is not None:
self._storage.remove(item)
self._remove = _remove
self._storage = [
weakref.ref(element, _remove) for element in __elements
]
def append(self, item):
self._storage.append(weakref.ref(item, self._remove))
def __len__(self):
return len(self._storage)
def __iter__(self):
return (
obj for obj in (ref() for ref in self._storage) if obj is not None
)
def __getitem__(self, index):
try:
obj = self._storage[index]
except KeyError:
raise IndexError("Index %s out of range" % index)
else:
return obj()
class OrderedIdentitySet(IdentitySet):
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
class PopulateDict(dict):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
class WeakPopulateDict(dict):
"""Like PopulateDict, but assumes a self + a method and does not create
a reference cycle.
"""
def __init__(self, creator_method):
self.creator = creator_method.__func__
weakself = creator_method.__self__
self.weakself = weakref.ref(weakself)
def __missing__(self, key):
self[key] = val = self.creator(self.weakself(), key)
return val
# Define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
# At this point, these are mostly historical, things
# used to be more complicated.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
_getters = PopulateDict(operator.itemgetter)
_property_getters = PopulateDict(
lambda idx: property(operator.itemgetter(idx))
)
def unique_list(seq, hashfunc=None):
seen = set()
seen_add = seen.add
if not hashfunc:
return [x for x in seq if x not in seen and not seen_add(x)]
else:
return [
x
for x in seq
if hashfunc(x) not in seen and not seen_add(hashfunc(x))
]
class UniqueAppender(object):
"""Appends items to a collection ensuring uniqueness.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, "append"):
self._data_appender = data.append
elif hasattr(data, "add"):
self._data_appender = data.add
def append(self, item):
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item)
self._unique[id_] = True
def __iter__(self):
return iter(self.data)
def coerce_generator_arg(arg):
if len(arg) == 1 and isinstance(arg[0], types.GeneratorType):
return list(arg[0])
else:
return arg
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, collections_abc.Iterable) or isinstance(
x, string_types + binary_types
):
return [x]
elif isinstance(x, list):
return x
else:
return list(x)
def has_intersection(set_, iterable):
r"""return True if any items of set\_ are present in iterable.
Goes through special effort to ensure __hash__ is not called
on items in iterable that don't support it.
"""
# TODO: optimize, write in C, etc.
return bool(set_.intersection([i for i in iterable if i.__hash__]))
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x):
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if not isinstance(elem, str) and hasattr(elem, "__iter__"):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class LRUCache(dict):
"""Dictionary with 'squishy' removal of least
recently used items.
Note that either get() or [] should be used here, but
generally its not safe to do an "in" check first as the dictionary
can change subsequent to that call.
"""
__slots__ = "capacity", "threshold", "size_alert", "_counter", "_mutex"
def __init__(self, capacity=100, threshold=0.5, size_alert=None):
self.capacity = capacity
self.threshold = threshold
self.size_alert = size_alert
self._counter = 0
self._mutex = threading.Lock()
def _inc_counter(self):
self._counter += 1
return self._counter
def get(self, key, default=None):
item = dict.get(self, key, default)
if item is not default:
item[2] = self._inc_counter()
return item[1]
else:
return default
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item[2] = self._inc_counter()
return item[1]
def values(self):
return [i[1] for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = [key, value, self._inc_counter()]
dict.__setitem__(self, key, item)
else:
item[1] = value
self._manage_size()
@property
def size_threshold(self):
return self.capacity + self.capacity * self.threshold
def _manage_size(self):
if not self._mutex.acquire(False):
return
try:
size_alert = bool(self.size_alert)
while len(self) > self.capacity + self.capacity * self.threshold:
if size_alert:
size_alert = False
self.size_alert(self)
by_counter = sorted(
dict.values(self), key=operator.itemgetter(2), reverse=True
)
for item in by_counter[self.capacity :]:
try:
del self[item[0]]
except KeyError:
# deleted elsewhere; skip
continue
finally:
self._mutex.release()
_lw_tuples = LRUCache(100)
def lightweight_named_tuple(name, fields):
hash_ = (name,) + tuple(fields)
tp_cls = _lw_tuples.get(hash_)
if tp_cls:
return tp_cls
tp_cls = type(
name,
(_LW,),
dict(
[
(field, _property_getters[idx])
for idx, field in enumerate(fields)
if field is not None
]
+ [("__slots__", ())]
),
)
tp_cls._real_fields = fields
tp_cls._fields = tuple([f for f in fields if f is not None])
_lw_tuples[hash_] = tp_cls
return tp_cls
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
class on the basis of a "scope" function.
The object implements ``__call__`` as the "getter", so by
calling ``myregistry()`` the contained object is returned
for the current scope.
:param createfunc:
a callable that returns a new object to be placed in the registry
:param scopefunc:
a callable that will return a key to store/retrieve an object.
"""
def __init__(self, createfunc, scopefunc):
"""Construct a new :class:`.ScopedRegistry`.
:param createfunc: A creation function that will generate
a new value for the current scope, if none is present.
:param scopefunc: A function that returns a hashable
token representing the current scope (such as, current
thread identifier).
"""
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self.scopefunc()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
"""Return True if an object is present in the current scope."""
return self.scopefunc() in self.registry
def set(self, obj):
"""Set the value for the current scope."""
self.registry[self.scopefunc()] = obj
def clear(self):
"""Clear the current scope, if any."""
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry):
"""A :class:`.ScopedRegistry` that uses a ``threading.local()``
variable for storage.
"""
def __init__(self, createfunc):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self):
try:
return self.registry.value
except AttributeError:
val = self.registry.value = self.createfunc()
return val
def has(self):
return hasattr(self.registry, "value")
def set(self, obj):
self.registry.value = obj
def clear(self):
try:
del self.registry.value
except AttributeError:
pass
def has_dupes(sequence, target):
"""Given a sequence and search object, return True if there's more
than one, False if zero or one of them.
"""
# compare to .index version below, this version introduces less function
# overhead and is usually the same speed. At 15000 items (way bigger than
# a relationship-bound collection in memory usually is) it begins to
# fall behind the other version only by microseconds.
c = 0
for item in sequence:
if item is target:
c += 1
if c > 1:
return True
return False
# .index version. the two __contains__ calls as well
# as .index() and isinstance() slow this down.
# def has_dupes(sequence, target):
# if target not in sequence:
# return False
# elif not isinstance(sequence, collections_abc.Sequence):
# return False
#
# idx = sequence.index(target)
# return target in sequence[idx + 1:]
|
gltn/stdm
|
stdm/third_party/sqlalchemy/util/_collections.py
|
Python
|
gpl-2.0
| 29,219
|
# $Id: icmp.py,v 1.1.1.1 2005/10/29 18:20:48 provos Exp $
from dpkt import Packet, in_cksum as _icmp_cksum
import ip
# Types (icmp_type) and codes (icmp_code) -
# http://www.iana.org/assignments/icmp-parameters
ICMP_CODE_NONE = 0 # for types without codes
ICMP_ECHOREPLY = 0 # echo reply
ICMP_UNREACH = 3 # dest unreachable, codes:
ICMP_UNREACH_NET = 0 # bad net
ICMP_UNREACH_HOST = 1 # bad host
ICMP_UNREACH_PROTO = 2 # bad protocol
ICMP_UNREACH_PORT = 3 # bad port
ICMP_UNREACH_NEEDFRAG = 4 # IP_DF caused drop
ICMP_UNREACH_SRCFAIL = 5 # src route failed
ICMP_UNREACH_NET_UNKNOWN = 6 # unknown net
ICMP_UNREACH_HOST_UNKNOWN = 7 # unknown host
ICMP_UNREACH_ISOLATED = 8 # src host isolated
ICMP_UNREACH_NET_PROHIB = 9 # for crypto devs
ICMP_UNREACH_HOST_PROHIB = 10 # ditto
ICMP_UNREACH_TOSNET = 11 # bad tos for net
ICMP_UNREACH_TOSHOST = 12 # bad tos for host
ICMP_UNREACH_FILTER_PROHIB = 13 # prohibited access
ICMP_UNREACH_HOST_PRECEDENCE = 14 # precedence error
ICMP_UNREACH_PRECEDENCE_CUTOFF = 15 # precedence cutoff
ICMP_SRCQUENCH = 4 # packet lost, slow down
ICMP_REDIRECT = 5 # shorter route, codes:
ICMP_REDIRECT_NET = 0 # for network
ICMP_REDIRECT_HOST = 1 # for host
ICMP_REDIRECT_TOSNET = 2 # for tos and net
ICMP_REDIRECT_TOSHOST = 3 # for tos and host
ICMP_ALTHOSTADDR = 6 # alternate host address
ICMP_ECHO = 8 # echo service
ICMP_RTRADVERT = 9 # router advertise, codes:
ICMP_RTRADVERT_NORMAL = 0 # normal
ICMP_RTRADVERT_NOROUTE_COMMON = 16 # selective routing
ICMP_RTRSOLICIT = 10 # router solicitation
ICMP_TIMEXCEED = 11 # time exceeded, code:
ICMP_TIMEXCEED_INTRANS = 0 # ttl==0 in transit
ICMP_TIMEXCEED_REASS = 1 # ttl==0 in reass
ICMP_PARAMPROB = 12 # ip header bad
ICMP_PARAMPROB_ERRATPTR = 0 # req. opt. absent
ICMP_PARAMPROB_OPTABSENT = 1 # req. opt. absent
ICMP_PARAMPROB_LENGTH = 2 # bad length
ICMP_TSTAMP = 13 # timestamp request
ICMP_TSTAMPREPLY = 14 # timestamp reply
ICMP_INFO = 15 # information request
ICMP_INFOREPLY = 16 # information reply
ICMP_MASK = 17 # address mask request
ICMP_MASKREPLY = 18 # address mask reply
ICMP_TRACEROUTE = 30 # traceroute
ICMP_DATACONVERR = 31 # data conversion error
ICMP_MOBILE_REDIRECT = 32 # mobile host redirect
ICMP_IP6_WHEREAREYOU = 33 # IPv6 where-are-you
ICMP_IP6_IAMHERE = 34 # IPv6 i-am-here
ICMP_MOBILE_REG = 35 # mobile registration req
ICMP_MOBILE_REGREPLY = 36 # mobile registration reply
ICMP_DNS = 37 # domain name request
ICMP_DNSREPLY = 38 # domain name reply
ICMP_SKIP = 39 # SKIP
ICMP_PHOTURIS = 40 # Photuris
ICMP_PHOTURIS_UNKNOWN_INDEX = 0 # unknown sec index
ICMP_PHOTURIS_AUTH_FAILED = 1 # auth failed
ICMP_PHOTURIS_DECOMPRESS_FAILED = 2 # decompress failed
ICMP_PHOTURIS_DECRYPT_FAILED = 3 # decrypt failed
ICMP_PHOTURIS_NEED_AUTHN = 4 # no authentication
ICMP_PHOTURIS_NEED_AUTHZ = 5 # no authorization
ICMP_TYPE_MAX = 40
class ICMP(Packet):
"""Internet Control Message Protocol."""
__hdr__ = (
('type', 'B', 8),
('code', 'B', 0),
('sum', 'H', 0)
)
class Echo(Packet):
__hdr__ = (('id', 'H', 0), ('seq', 'H', 0))
class Quote(Packet):
__hdr__ = (('pad', 'I', 0),)
def unpack(self, buf):
Packet.unpack(self, buf)
self.data = self.ip = ip.IP(self.data)
class Unreach(Quote):
__hdr__ = (('pad', 'H', 0), ('mtu', 'H', 0))
class Quench(Quote):
pass
class Redirect(Quote):
__hdr__ = (('gw', 'I', 0),)
class ParamProbe(Quote):
__hdr__ = (('ptr', 'B', 0), ('pad1', 'B', 0), ('pad2', 'H', 0))
class TimeExceed(Quote):
pass
_typesw = { 0:Echo, 3:Unreach, 4:Quench, 5:Redirect, 8:Echo,
11:TimeExceed }
def unpack(self, buf):
Packet.unpack(self, buf)
try:
self.data = self._typesw[self.type](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except:
self.data = buf
def __str__(self):
if not self.sum:
self.sum = _icmp_cksum(Packet.__str__(self))
return Packet.__str__(self)
|
Banjong1990/honey
|
dpkt/dpkt/icmp.py
|
Python
|
gpl-2.0
| 4,112
|
'''OpenGL extension NV.pixel_data_range
This module customises the behaviour of the
OpenGL.raw.GL.NV.pixel_data_range to provide a more
Python-friendly API
Overview (from the spec)
The vertex array range extension is intended to improve the
efficiency of OpenGL vertex arrays. OpenGL vertex arrays' coherency
model and ability to access memory from arbitrary locations in memory
prevented implementations from using DMA (Direct Memory Access)
operations.
Many image-intensive applications, such as those that use dynamically
generated textures, face similar problems. These applications would
like to be able to sustain throughputs of hundreds of millions of
pixels per second through DrawPixels and hundreds of millions of
texels per second through TexSubImage.
However, the same restrictions that limited vertex throughput also
limit pixel throughput.
By the time that any pixel operation that reads data from user memory
returns, OpenGL requires that it must be safe for the application to
start using that memory for a different purpose. This coherency
model prevents asynchronous DMA transfers directly out of the user's
buffer.
There are also no restrictions on the pointer provided to pixel
operations or on the size of the data. To facilitate DMA
implementations, the driver needs to know in advance what region of
the address space to lock down.
Vertex arrays faced both of these restrictions already, but pixel
operations have one additional complicating factor -- they are
bidirectional. Vertex array data is always being transfered from the
application to the driver and the HW, whereas pixel operations
sometimes transfer data to the application from the driver and HW.
Note that the types of memory that are suitable for DMA for reading
and writing purposes are often different. For example, on many PC
platforms, DMA pulling is best accomplished with write-combined
(uncached) AGP memory, while pushing data should use cached memory so
that the application can read the data efficiently once it has been
read back over the AGP bus.
This extension defines an API where an application can specify two
pixel data ranges, which are analogous to vertex array ranges, except
that one is for operations where the application is reading data
(e.g. glReadPixels) and one is for operations where the application
is writing data (e.g. glDrawPixels, glTexSubImage2D, etc.). Each
pixel data range has a pointer to its start and a length in bytes.
When the pixel data range is enabled, and if the pointer specified
as the argument to a pixel operation is inside the corresponding
pixel data range, the implementation may choose to asynchronously
pull data from the pixel data range or push data to the pixel data
range. Data pulled from outside the pixel data range is undefined,
while pushing data to outside the pixel data range produces undefined
results.
The application may synchronize with the hardware in one of two ways:
by flushing the pixel data range (or causing an implicit flush) or by
using the NV_fence extension to insert fences in the command stream.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/pixel_data_range.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.NV.pixel_data_range import *
### END AUTOGENERATED SECTION
|
D4wN/brickv
|
src/build_data/windows/OpenGL/GL/NV/pixel_data_range.py
|
Python
|
gpl-2.0
| 3,489
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.errors import AnsibleParserError
from ansible.module_utils._text import to_text, to_native
from ansible.playbook.play import Play
from ansible.playbook.playbook_include import PlaybookInclude
from ansible.utils.display import Display
display = Display()
__all__ = ['Playbook']
class Playbook:
def __init__(self, loader):
# Entries in the datastructure of a playbook may
# be either a play or an include statement
self._entries = []
self._basedir = to_text(os.getcwd(), errors='surrogate_or_strict')
self._loader = loader
self._file_name = None
@staticmethod
def load(file_name, variable_manager=None, loader=None):
pb = Playbook(loader=loader)
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
return pb
def _load_playbook_data(self, file_name, variable_manager, vars=None):
if os.path.isabs(file_name):
self._basedir = os.path.dirname(file_name)
else:
self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
# set the loaders basedir
cur_basedir = self._loader.get_basedir()
self._loader.set_basedir(self._basedir)
self._file_name = file_name
try:
ds = self._loader.load_from_file(os.path.basename(file_name))
except UnicodeDecodeError as e:
raise AnsibleParserError("Could not read playbook (%s) due to encoding issues: %s" % (file_name, to_native(e)))
# check for errors and restore the basedir in case this error is caught and handled
if not ds:
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("Empty playbook, nothing to do", obj=ds)
elif not isinstance(ds, list):
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("A playbook must be a list of plays, got a %s instead" % type(ds), obj=ds)
# Parse the playbook entries. For plays, we simply parse them
# using the Play() object, and includes are parsed using the
# PlaybookInclude() object
for entry in ds:
if not isinstance(entry, dict):
# restore the basedir in case this error is caught and handled
self._loader.set_basedir(cur_basedir)
raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
if any(action in entry for action in ('import_playbook', 'include')):
if 'include' in entry:
display.deprecated("'include' for playbook includes. You should use 'import_playbook' instead", version="2.12")
pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
if pb is not None:
self._entries.extend(pb._entries)
else:
which = entry.get('import_playbook', entry.get('include', entry))
display.display("skipping playbook '%s' due to conditional test failure" % which, color=C.COLOR_SKIP)
else:
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader, vars=vars)
self._entries.append(entry_obj)
# we're done, so restore the old basedir in the loader
self._loader.set_basedir(cur_basedir)
def get_loader(self):
return self._loader
def get_plays(self):
return self._entries[:]
|
alxgu/ansible
|
lib/ansible/playbook/__init__.py
|
Python
|
gpl-3.0
| 4,467
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import glob
import imp
import inspect
import os
import os.path
import sys
from ansible import constants as C
from ansible.utils.display import Display
from ansible import errors
MODULE_CACHE = {}
PATH_CACHE = {}
PLUGIN_PATH_CACHE = {}
_basedirs = []
def push_basedir(basedir):
# avoid pushing the same absolute dir more than once
basedir = os.path.realpath(basedir)
if basedir not in _basedirs:
_basedirs.insert(0, basedir)
def get_all_plugin_loaders():
return [(name, obj) for (name, obj) in inspect.getmembers(sys.modules[__name__]) if isinstance(obj, PluginLoader)]
class PluginLoader:
'''
PluginLoader loads plugins from the configured plugin directories.
It searches for plugins by iterating through the combined list of
play basedirs, configured paths, and the python path.
The first match is used.
'''
def __init__(self, class_name, package, config, subdir, aliases={}, required_base_class=None):
self.class_name = class_name
self.base_class = required_base_class
self.package = package
self.config = config
self.subdir = subdir
self.aliases = aliases
if not class_name in MODULE_CACHE:
MODULE_CACHE[class_name] = {}
if not class_name in PATH_CACHE:
PATH_CACHE[class_name] = None
if not class_name in PLUGIN_PATH_CACHE:
PLUGIN_PATH_CACHE[class_name] = {}
self._module_cache = MODULE_CACHE[class_name]
self._paths = PATH_CACHE[class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
self._extra_dirs = []
self._searched_paths = set()
def __setstate__(self, data):
'''
Deserializer.
'''
class_name = data.get('class_name')
package = data.get('package')
config = data.get('config')
subdir = data.get('subdir')
aliases = data.get('aliases')
base_class = data.get('base_class')
PATH_CACHE[class_name] = data.get('PATH_CACHE')
PLUGIN_PATH_CACHE[class_name] = data.get('PLUGIN_PATH_CACHE')
self.__init__(class_name, package, config, subdir, aliases, base_class)
self._extra_dirs = data.get('_extra_dirs', [])
self._searched_paths = data.get('_searched_paths', set())
def __getstate__(self):
'''
Serializer.
'''
return dict(
class_name = self.class_name,
base_class = self.base_class,
package = self.package,
config = self.config,
subdir = self.subdir,
aliases = self.aliases,
_extra_dirs = self._extra_dirs,
_searched_paths = self._searched_paths,
PATH_CACHE = PATH_CACHE[self.class_name],
PLUGIN_PATH_CACHE = PLUGIN_PATH_CACHE[self.class_name],
)
def print_paths(self):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in self._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def _all_directories(self, dir):
results = []
results.append(dir)
for root, subdirs, files in os.walk(dir):
if '__init__.py' in files:
for x in subdirs:
results.append(os.path.join(root,x))
return results
def _get_package_paths(self):
''' Gets the path of a Python package '''
paths = []
if not self.package:
return []
if not hasattr(self, 'package_path'):
m = __import__(self.package)
parts = self.package.split('.')[1:]
self.package_path = os.path.join(os.path.dirname(m.__file__), *parts)
paths.extend(self._all_directories(self.package_path))
return paths
def _get_paths(self):
''' Return a list of paths to search for plugins in '''
if self._paths is not None:
return self._paths
ret = self._extra_dirs[:]
for basedir in _basedirs:
fullpath = os.path.realpath(os.path.join(basedir, self.subdir))
if os.path.isdir(fullpath):
files = glob.glob("%s/*" % fullpath)
# allow directories to be two levels deep
files2 = glob.glob("%s/*/*" % fullpath)
if files2 is not None:
files.extend(files2)
for file in files:
if os.path.isdir(file) and file not in ret:
ret.append(file)
if fullpath not in ret:
ret.append(fullpath)
# look in any configured plugin paths, allow one level deep for subcategories
if self.config is not None:
configured_paths = self.config.split(os.pathsep)
for path in configured_paths:
path = os.path.realpath(os.path.expanduser(path))
contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
for c in contents:
if os.path.isdir(c) and c not in ret:
ret.append(c)
if path not in ret:
ret.append(path)
# look for any plugins installed in the package subtree
ret.extend(self._get_package_paths())
# cache and return the result
self._paths = ret
return ret
def add_directory(self, directory, with_subdir=False):
''' Adds an additional directory to the search path '''
directory = os.path.realpath(directory)
if directory is not None:
if with_subdir:
directory = os.path.join(directory, self.subdir)
if directory not in self._extra_dirs:
# append the directory and invalidate the path cache
self._extra_dirs.append(directory)
self._paths = None
def find_plugin(self, name, suffixes=None):
''' Find a plugin named name '''
if not suffixes:
if self.class_name:
suffixes = ['.py']
else:
suffixes = ['.py', '']
potential_names = frozenset('%s%s' % (name, s) for s in suffixes)
for full_name in potential_names:
if full_name in self._plugin_path_cache:
return self._plugin_path_cache[full_name]
found = None
for path in [p for p in self._get_paths() if p not in self._searched_paths]:
if os.path.isdir(path):
try:
full_paths = (os.path.join(path, f) for f in os.listdir(path))
except OSError as e:
d = Display()
d.warning("Error accessing plugin paths: %s" % str(e))
for full_path in (f for f in full_paths if os.path.isfile(f)):
for suffix in suffixes:
if full_path.endswith(suffix):
full_name = os.path.basename(full_path)
break
else: # Yes, this is a for-else: http://bit.ly/1ElPkyg
continue
if full_name not in self._plugin_path_cache:
self._plugin_path_cache[full_name] = full_path
self._searched_paths.add(path)
for full_name in potential_names:
if full_name in self._plugin_path_cache:
return self._plugin_path_cache[full_name]
# if nothing is found, try finding alias/deprecated
if not name.startswith('_'):
for alias_name in ('_%s' % n for n in potential_names):
# We've already cached all the paths at this point
if alias_name in self._plugin_path_cache:
return self._plugin_path_cache[alias_name]
return None
def has_plugin(self, name):
''' Checks if a plugin named name exists '''
return self.find_plugin(name) is not None
__contains__ = has_plugin
def get(self, name, *args, **kwargs):
''' instantiates a plugin of the given name using arguments '''
if name in self.aliases:
name = self.aliases[name]
path = self.find_plugin(name)
if path is None:
return None
if path not in self._module_cache:
self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
if kwargs.get('class_only', False):
obj = getattr(self._module_cache[path], self.class_name)
else:
obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]:
return None
return obj
def all(self, *args, **kwargs):
''' instantiates all plugins with the same arguments '''
for i in self._get_paths():
matches = glob.glob(os.path.join(i, "*.py"))
matches.sort()
for path in matches:
name, ext = os.path.splitext(os.path.basename(path))
if name.startswith("_"):
continue
if path not in self._module_cache:
self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
if kwargs.get('class_only', False):
obj = getattr(self._module_cache[path], self.class_name)
else:
obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
if self.base_class and self.base_class not in [base.__name__ for base in obj.__class__.__bases__]:
continue
# set extra info on the module, in case we want it later
setattr(obj, '_original_path', path)
yield obj
action_loader = PluginLoader(
'ActionModule',
'ansible.plugins.action',
C.DEFAULT_ACTION_PLUGIN_PATH,
'action_plugins',
required_base_class='ActionBase',
)
cache_loader = PluginLoader(
'CacheModule',
'ansible.plugins.cache',
C.DEFAULT_CACHE_PLUGIN_PATH,
'cache_plugins',
)
callback_loader = PluginLoader(
'CallbackModule',
'ansible.plugins.callback',
C.DEFAULT_CALLBACK_PLUGIN_PATH,
'callback_plugins',
)
connection_loader = PluginLoader(
'Connection',
'ansible.plugins.connections',
C.DEFAULT_CONNECTION_PLUGIN_PATH,
'connection_plugins',
aliases={'paramiko': 'paramiko_ssh'},
required_base_class='ConnectionBase',
)
shell_loader = PluginLoader(
'ShellModule',
'ansible.plugins.shell',
'shell_plugins',
'shell_plugins',
)
module_loader = PluginLoader(
'',
'ansible.modules',
C.DEFAULT_MODULE_PATH,
'library',
)
lookup_loader = PluginLoader(
'LookupModule',
'ansible.plugins.lookup',
C.DEFAULT_LOOKUP_PLUGIN_PATH,
'lookup_plugins',
required_base_class='LookupBase',
)
vars_loader = PluginLoader(
'VarsModule',
'ansible.plugins.vars',
C.DEFAULT_VARS_PLUGIN_PATH,
'vars_plugins',
)
filter_loader = PluginLoader(
'FilterModule',
'ansible.plugins.filter',
C.DEFAULT_FILTER_PLUGIN_PATH,
'filter_plugins',
)
fragment_loader = PluginLoader(
'ModuleDocFragment',
'ansible.utils.module_docs_fragments',
os.path.join(os.path.dirname(__file__), 'module_docs_fragments'),
'',
)
strategy_loader = PluginLoader(
'StrategyModule',
'ansible.plugins.strategies',
None,
'strategy_plugins',
required_base_class='StrategyBase',
)
|
wehkamp/ansible
|
lib/ansible/plugins/__init__.py
|
Python
|
gpl-3.0
| 12,794
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for user handling.
"""
import tempfile
from unittest import TestCase as UnitTestCase
from django.test import TestCase
from unittest import SkipTest
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser, User, Group
from django.core import mail
from django.test.utils import override_settings
from django.core.management import call_command
from django.http import HttpRequest, HttpResponseRedirect
from weblate.accounts.models import (
Profile,
notify_merge_failure,
notify_new_string,
notify_new_suggestion,
notify_new_comment,
notify_new_translation,
notify_new_contributor,
notify_new_language,
)
from weblate.accounts.captcha import (
hash_question, unhash_question, MathCaptcha
)
from weblate.accounts import avatar
from weblate.accounts.middleware import RequireLoginMiddleware
from weblate.accounts.models import VerifiedEmail
from weblate.trans.tests.test_views import ViewTestCase, RegistrationTestMixin
from weblate.trans.tests.utils import get_test_file
from weblate.trans.tests import OverrideSettings
from weblate.trans.models.unitdata import Suggestion, Comment
from weblate.lang.models import Language
REGISTRATION_DATA = {
'username': 'username',
'email': 'noreply@weblate.org',
'first_name': 'First Last',
'captcha_id': '00',
'captcha': '9999'
}
class RegistrationTest(TestCase, RegistrationTestMixin):
clear_cookie = False
def assert_registration(self, match=None):
url = self.assert_registration_mailbox(match)
if self.clear_cookie:
del self.client.cookies['sessionid']
# Confirm account
response = self.client.get(url, follow=True)
self.assertRedirects(
response,
reverse('password')
)
@OverrideSettings(REGISTRATION_CAPTCHA=True)
def test_register_captcha(self):
# Enable captcha
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
self.assertContains(
response,
'Please check your math and try again.'
)
@OverrideSettings(REGISTRATION_OPEN=False)
def test_register_closed(self):
# Disable registration
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
self.assertContains(
response,
'Sorry, but registrations on this site are disabled.'
)
@OverrideSettings(REGISTRATION_CAPTCHA=False)
def test_register(self):
# Disable captcha
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
# Check we did succeed
self.assertRedirects(response, reverse('email-sent'))
# Confirm account
self.assert_registration()
# Set password
response = self.client.post(
reverse('password'),
{
'password1': 'password',
'password2': 'password',
}
)
self.assertRedirects(response, reverse('profile'))
# Check we can access home (was redirected to password change)
response = self.client.get(reverse('home'))
self.assertContains(response, 'First Last')
user = User.objects.get(username='username')
# Verify user is active
self.assertTrue(user.is_active)
# Verify stored first/last name
self.assertEqual(user.first_name, 'First Last')
@OverrideSettings(REGISTRATION_CAPTCHA=False)
def test_register_missing(self):
# Disable captcha
response = self.client.post(
reverse('register'),
REGISTRATION_DATA
)
# Check we did succeed
self.assertRedirects(response, reverse('email-sent'))
# Confirm account
url = self.assert_registration_mailbox()
# Remove session ID from URL
url = url.split('&id=')[0]
# Delete session ID from cookies
del self.client.cookies['sessionid']
# Confirm account
response = self.client.get(url, follow=True)
self.assertRedirects(response, reverse('login'))
self.assertContains(response, 'Failed to verify your registration')
def test_reset(self):
'''
Test for password reset.
'''
User.objects.create_user('testuser', 'test@example.com', 'x')
response = self.client.post(
reverse('password_reset'),
{
'email': 'test@example.com'
}
)
self.assertRedirects(response, reverse('email-sent'))
self.assert_registration('[Weblate] Password reset on Weblate')
def test_wrong_username(self):
data = REGISTRATION_DATA.copy()
data['username'] = ''
response = self.client.post(
reverse('register'),
data
)
self.assertContains(
response,
'This field is required.',
)
def test_wrong_mail(self):
data = REGISTRATION_DATA.copy()
data['email'] = 'x'
response = self.client.post(
reverse('register'),
data
)
self.assertContains(
response,
'Enter a valid email address.'
)
def test_spam(self):
data = REGISTRATION_DATA.copy()
data['content'] = 'x'
response = self.client.post(
reverse('register'),
data
)
self.assertContains(
response,
'Invalid value'
)
def test_add_mail(self):
# Create user
self.test_register()
mail.outbox.pop()
# Check adding email page
response = self.client.get(
reverse('email_login')
)
self.assertContains(response, 'Register email')
# Add email account
response = self.client.post(
reverse('social:complete', kwargs={'backend': 'email'}),
{'email': 'second@example.net'},
follow=True,
)
self.assertRedirects(response, reverse('email-sent'))
# Verify confirmation mail
url = self.assert_registration_mailbox()
response = self.client.get(url, follow=True)
self.assertRedirects(
response, '{0}#auth'.format(reverse('profile'))
)
# Check database models
user = User.objects.get(username='username')
self.assertEqual(
VerifiedEmail.objects.filter(social__user=user).count(), 2
)
self.assertTrue(
VerifiedEmail.objects.filter(
social__user=user, email='second@example.net'
).exists()
)
class NoCookieRegistrationTest(RegistrationTest):
clear_cookie = True
class CommandTest(TestCase):
'''
Tests for management commands.
'''
def test_createadmin(self):
call_command('createadmin')
user = User.objects.get(username='admin')
self.assertEqual(user.first_name, 'Weblate Admin')
self.assertEqual(user.last_name, '')
self.assertFalse(user.check_password('admin'))
def test_createadmin_password(self):
call_command('createadmin', password='admin')
user = User.objects.get(username='admin')
self.assertEqual(user.first_name, 'Weblate Admin')
self.assertEqual(user.last_name, '')
self.assertTrue(user.check_password('admin'))
def test_setupgroups(self):
call_command('setupgroups')
group = Group.objects.get(name='Users')
self.assertTrue(
group.permissions.filter(
codename='save_translation'
).exists()
)
call_command('setupgroups', move=True)
def test_importusers(self):
# First import
call_command('importusers', get_test_file('users.json'))
# Test that second import does not change anything
user = User.objects.get(username='weblate')
user.first_name = 'Weblate test user'
user.save()
call_command('importusers', get_test_file('users.json'))
user2 = User.objects.get(username='weblate')
self.assertEqual(user.first_name, user2.first_name)
def test_importdjangousers(self):
# First import
call_command('importusers', get_test_file('users-django.json'))
self.assertEqual(User.objects.count(), 2)
def test_userdata(self):
# Create test user
user = User.objects.create_user('testuser', 'test@example.com', 'x')
profile = Profile.objects.create(user=user)
profile.translated = 1000
profile.save()
with tempfile.NamedTemporaryFile() as output:
call_command('dumpuserdata', output.name)
call_command('importuserdata', output.name)
profile = Profile.objects.get(user__username='testuser')
self.assertEqual(profile.translated, 2000)
class ViewTest(TestCase):
'''
Test for views.
'''
def get_user(self):
user = User.objects.create_user(
username='testuser',
password='testpassword'
)
user.first_name = 'First Second'
user.email = 'noreply@weblate.org'
user.save()
Profile.objects.get_or_create(user=user)
return user
def test_contact(self):
'''
Test for contact form.
'''
# Basic get
response = self.client.get(reverse('contact'))
self.assertContains(response, 'id="id_message"')
# Sending message
response = self.client.post(
reverse('contact'),
{
'name': 'Test',
'email': 'noreply@weblate.org',
'subject': 'Message from dark side',
'message': 'Hi\n\nThis app looks really cool!',
}
)
self.assertRedirects(response, reverse('home'))
# Verify message
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'[Weblate] Message from dark side'
)
@OverrideSettings(OFFER_HOSTING=False)
def test_hosting_disabled(self):
'''
Test for hosting form with disabled hosting
'''
self.get_user()
self.client.login(username='testuser', password='testpassword')
response = self.client.get(reverse('hosting'))
self.assertRedirects(response, reverse('home'))
@OverrideSettings(OFFER_HOSTING=True)
def test_hosting(self):
'''
Test for hosting form with enabled hosting.
'''
self.get_user()
self.client.login(username='testuser', password='testpassword')
response = self.client.get(reverse('hosting'))
self.assertContains(response, 'id="id_message"')
# Sending message
response = self.client.post(
reverse('hosting'),
{
'name': 'Test',
'email': 'noreply@weblate.org',
'project': 'HOST',
'url': 'http://example.net',
'repo': 'git://github.com/nijel/weblate.git',
'mask': 'po/*.po',
'message': 'Hi\n\nI want to use it!',
}
)
self.assertRedirects(response, reverse('home'))
# Verify message
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'[Weblate] Hosting request for HOST'
)
def test_contact_subject(self):
# With set subject
response = self.client.get(
reverse('contact'),
{'subject': 'Weblate test message'}
)
self.assertContains(response, 'Weblate test message')
def test_contact_user(self):
self.get_user()
# Login
self.client.login(username='testuser', password='testpassword')
response = self.client.get(
reverse('contact'),
)
self.assertContains(response, 'value="First Second"')
self.assertContains(response, 'noreply@weblate.org')
def test_user(self):
'''
Test user pages.
'''
# Setup user
user = self.get_user()
# Login as user
self.client.login(username='testuser', password='testpassword')
# Get public profile
response = self.client.get(
reverse('user_page', kwargs={'user': user.username})
)
self.assertContains(response, '="/activity/')
def test_login(self):
self.get_user()
# Login
response = self.client.post(
reverse('login'),
{'username': 'testuser', 'password': 'testpassword'}
)
self.assertRedirects(response, reverse('home'))
# Login redirect
response = self.client.get(reverse('login'))
self.assertRedirects(response, reverse('profile'))
# Logout
response = self.client.get(reverse('logout'))
self.assertRedirects(response, reverse('login'))
def test_removal(self):
# Create user
self.get_user()
# Login
self.client.login(username='testuser', password='testpassword')
response = self.client.post(
reverse('remove')
)
self.assertRedirects(response, reverse('home'))
self.assertFalse(
User.objects.filter(username='testuser').exists()
)
def test_password(self):
# Create user
self.get_user()
# Login
self.client.login(username='testuser', password='testpassword')
# Change without data
response = self.client.post(
reverse('password')
)
self.assertContains(response, 'This field is required.')
# Change with wrong password
response = self.client.post(
reverse('password'),
{
'password': '123456',
'password1': '123456',
'password2': '123456'
}
)
self.assertContains(response, 'You have entered an invalid password.')
# Change
response = self.client.post(
reverse('password'),
{
'password': 'testpassword',
'password1': '123456',
'password2': '123456'
}
)
self.assertRedirects(response, reverse('profile'))
self.assertTrue(
User.objects.get(username='testuser').check_password('123456')
)
class ProfileTest(ViewTestCase):
def test_profile(self):
# Get profile page
response = self.client.get(reverse('profile'))
self.assertContains(response, 'action="/accounts/profile/"')
# Save profile
response = self.client.post(
reverse('profile'),
{
'language': 'cs',
'languages': Language.objects.get(code='cs').id,
'secondary_languages': Language.objects.get(code='cs').id,
'first_name': 'First Last',
'email': 'noreply@weblate.org',
'username': 'testik',
}
)
self.assertRedirects(response, reverse('profile'))
class NotificationTest(ViewTestCase):
def setUp(self):
super(NotificationTest, self).setUp()
self.user.email = 'noreply@weblate.org'
self.user.save()
profile = Profile.objects.get(user=self.user)
profile.subscribe_any_translation = True
profile.subscribe_new_string = True
profile.subscribe_new_suggestion = True
profile.subscribe_new_contributor = True
profile.subscribe_new_comment = True
profile.subscribe_new_language = True
profile.subscribe_merge_failure = True
profile.subscriptions.add(self.project)
profile.languages.add(
Language.objects.get(code='cs')
)
profile.save()
def second_user(self):
user = User.objects.create_user(
username='seconduser',
password='secondpassword'
)
Profile.objects.create(user=user)
return user
def test_notify_merge_failure(self):
notify_merge_failure(
self.subproject,
'Failed merge',
'Error\nstatus'
)
# Check mail (second one is for admin)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(
mail.outbox[0].subject,
'[Weblate] Merge failure in Test/Test'
)
# Add project owner
self.subproject.project.owners.add(self.second_user())
notify_merge_failure(
self.subproject,
'Failed merge',
'Error\nstatus'
)
# Check mail (second one is for admin)
self.assertEqual(len(mail.outbox), 5)
def test_notify_new_string(self):
notify_new_string(self.get_translation())
# Check mail
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'[Weblate] New string to translate in Test/Test - Czech'
)
def test_notify_new_translation(self):
unit = self.get_unit()
unit2 = self.get_translation().unit_set.get(
source='Thank you for using Weblate.'
)
notify_new_translation(
unit,
unit2,
self.second_user()
)
# Check mail
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'[Weblate] New translation in Test/Test - Czech'
)
def test_notify_new_language(self):
second_user = self.second_user()
notify_new_language(
self.subproject,
Language.objects.filter(code='de'),
second_user
)
# Check mail (second one is for admin)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(
mail.outbox[0].subject,
'[Weblate] New language request in Test/Test'
)
# Add project owner
self.subproject.project.owners.add(second_user)
notify_new_language(
self.subproject,
Language.objects.filter(code='de'),
second_user,
)
# Check mail (second one is for admin)
self.assertEqual(len(mail.outbox), 5)
def test_notify_new_contributor(self):
unit = self.get_unit()
notify_new_contributor(
unit,
self.second_user()
)
# Check mail
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'[Weblate] New contributor in Test/Test - Czech'
)
def test_notify_new_suggestion(self):
unit = self.get_unit()
notify_new_suggestion(
unit,
Suggestion.objects.create(
contentsum=unit.contentsum,
project=unit.translation.subproject.project,
language=unit.translation.language,
target='Foo'
),
self.second_user()
)
# Check mail
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'[Weblate] New suggestion in Test/Test - Czech'
)
def test_notify_new_comment(self):
unit = self.get_unit()
notify_new_comment(
unit,
Comment.objects.create(
contentsum=unit.contentsum,
project=unit.translation.subproject.project,
language=unit.translation.language,
comment='Foo'
),
self.second_user(),
''
)
# Check mail
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'[Weblate] New comment in Test/Test'
)
def test_notify_new_comment_report(self):
unit = self.get_unit()
notify_new_comment(
unit,
Comment.objects.create(
contentsum=unit.contentsum,
project=unit.translation.subproject.project,
language=None,
comment='Foo'
),
self.second_user(),
'noreply@weblate.org'
)
# Check mail
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(
mail.outbox[0].subject,
'[Weblate] New comment in Test/Test'
)
self.assertEqual(
mail.outbox[1].subject,
'[Weblate] New comment in Test/Test'
)
class CaptchaTest(UnitTestCase):
def test_decode(self):
question = '1 + 1'
timestamp = 1000
hashed = hash_question(question, timestamp)
self.assertEqual(
(question, timestamp),
unhash_question(hashed)
)
def test_tamper(self):
hashed = hash_question('', 0) + '00'
self.assertRaises(
ValueError,
unhash_question,
hashed
)
def test_invalid(self):
self.assertRaises(
ValueError,
unhash_question,
''
)
def test_object(self):
captcha = MathCaptcha('1 * 2')
self.assertFalse(
captcha.validate(1)
)
self.assertTrue(
captcha.validate(2)
)
restored = MathCaptcha.from_hash(captcha.hashed)
self.assertEqual(
captcha.question,
restored.question
)
self.assertRaises(
ValueError,
MathCaptcha.from_hash,
captcha.hashed[:40]
)
def test_generate(self):
'''
Test generating of captcha for every operator.
'''
captcha = MathCaptcha()
for operator in MathCaptcha.operators:
captcha.operators = (operator,)
self.assertIn(operator, captcha.generate_question())
class MiddlewareTest(TestCase):
def view_method(self):
return 'VIEW'
def test_disabled(self):
middleware = RequireLoginMiddleware()
request = HttpRequest()
self.assertIsNone(
middleware.process_view(request, self.view_method, (), {})
)
@override_settings(LOGIN_REQUIRED_URLS=(r'/project/(.*)$',))
def test_protect_project(self):
middleware = RequireLoginMiddleware()
request = HttpRequest()
request.user = User()
request.META['SERVER_NAME'] = 'server'
request.META['SERVER_PORT'] = '80'
# No protection for not protected path
self.assertIsNone(
middleware.process_view(request, self.view_method, (), {})
)
request.path = '/project/foo/'
# No protection for protected path and logged in user
self.assertIsNone(
middleware.process_view(request, self.view_method, (), {})
)
# Protection for protected path and not logged in user
request.user = AnonymousUser()
self.assertIsInstance(
middleware.process_view(request, self.view_method, (), {}),
HttpResponseRedirect
)
# No protection for login and not logged in user
request.path = '/accounts/login/'
self.assertIsNone(
middleware.process_view(request, self.view_method, (), {})
)
class AvatarTest(ViewTestCase):
def setUp(self):
super(AvatarTest, self).setUp()
self.user.email = 'test@example.com'
self.user.save()
def assert_url(self):
url = avatar.avatar_for_email(self.user.email)
self.assertEqual(
'https://seccdn.libravatar.org/avatar/'
'55502f40dc8b7c769880b10874abc9d0',
url.split('?')[0]
)
def test_avatar_for_email_own(self):
backup = avatar.HAS_LIBRAVATAR
try:
avatar.HAS_LIBRAVATAR = False
self.assert_url()
finally:
avatar.HAS_LIBRAVATAR = backup
def test_avatar_for_email_libravatar(self):
if not avatar.HAS_LIBRAVATAR:
raise SkipTest('Libravatar not installed')
self.assert_url()
def test_avatar(self):
# Real user
response = self.client.get(
reverse(
'user_avatar',
kwargs={'user': self.user.username, 'size': 32}
)
)
self.assertPNG(response)
# Test caching
response = self.client.get(
reverse(
'user_avatar',
kwargs={'user': self.user.username, 'size': 32}
)
)
self.assertPNG(response)
def test_anonymous_avatar(self):
anonymous = User.objects.get(username='anonymous')
# Anonymous user
response = self.client.get(
reverse(
'user_avatar',
kwargs={'user': anonymous.username, 'size': 32}
)
)
self.assertPNG(response)
|
electrolinux/weblate
|
weblate/accounts/tests.py
|
Python
|
gpl-3.0
| 26,044
|
#!/usr/bin/env python
"""
dirac-rss-list-status
Script that dumps the DB information for the elements into the standard output.
If returns information concerning the StatusType and Status attributes.
Usage:
dirac-rss-list-status
--element= Element family to be Synchronized ( Site, Resource or Node )
--elementType= ElementType narrows the search; None if default
--name= ElementName; None if default
--tokenOwner= Owner of the token; None if default
--statusType= StatusType; None if default
--status= Status; None if default
Verbosity:
-o LogLevel=LEVEL NOTICE by default, levels available: INFO, DEBUG, VERBOSE..
"""
from DIRAC import gLogger, exit as DIRACExit, version
from DIRAC.Core.Base import Script
from DIRAC.ResourceStatusSystem.Client import ResourceStatusClient
from DIRAC.Core.Utilities.PrettyPrint import printTable
__RCSID__ = '$Id:$'
subLogger = None
switchDict = {}
def registerSwitches():
'''
Registers all switches that can be used while calling the script from the
command line interface.
'''
switches = (
( 'element=', 'Element family to be Synchronized ( Site, Resource or Node )' ),
( 'elementType=', 'ElementType narrows the search; None if default' ),
( 'name=', 'ElementName; None if default' ),
( 'tokenOwner=', 'Owner of the token; None if default' ),
( 'statusType=', 'StatusType; None if default' ),
( 'status=', 'Status; None if default' ),
)
for switch in switches:
Script.registerSwitch( '', switch[ 0 ], switch[ 1 ] )
def registerUsageMessage():
'''
Takes the script __doc__ and adds the DIRAC version to it
'''
hLine = ' ' + '='*78 + '\n'
usageMessage = hLine
usageMessage += ' DIRAC %s\n' % version
usageMessage += __doc__
usageMessage += '\n' + hLine
Script.setUsageMessage( usageMessage )
def parseSwitches():
'''
Parses the arguments passed by the user
'''
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if args:
subLogger.error( "Found the following positional args '%s', but we only accept switches" % args )
subLogger.error( "Please, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
switches = dict( Script.getUnprocessedSwitches() )
# Default values
switches.setdefault( 'elementType', None )
switches.setdefault( 'name', None )
switches.setdefault( 'tokenOwner', None )
switches.setdefault( 'statusType', None )
switches.setdefault( 'status', None )
if 'element' not in switches:
subLogger.error( "element Switch missing" )
subLogger.error( "Please, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
if not switches[ 'element' ] in ( 'Site', 'Resource', 'Node' ):
subLogger.error( "Found %s as element switch" % switches[ 'element' ] )
subLogger.error( "Please, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
subLogger.debug( "The switches used are:" )
map( subLogger.debug, switches.iteritems() )
return switches
#...............................................................................
def getElements():
'''
Given the switches, gets a list of elements with their respective statustype
and status attributes.
'''
rssClient = ResourceStatusClient.ResourceStatusClient()
meta = { 'columns' : [] }
for key in ( 'Name', 'StatusType', 'Status', 'ElementType', 'TokenOwner' ):
#Transforms from upper lower case to lower upper case
if switchDict[ key[0].lower() + key[1:] ] is None:
meta[ 'columns' ].append( key )
elements = rssClient.selectStatusElement(
switchDict[ 'element' ], 'Status',
name = switchDict[ 'name' ].split(',') if switchDict['name'] else None,
statusType = switchDict[ 'statusType' ].split(',') if switchDict['statusType'] else None,
status = switchDict[ 'status' ].split(',') if switchDict['status'] else None,
elementType = switchDict[ 'elementType' ].split(',') if switchDict['elementType'] else None,
tokenOwner = switchDict[ 'tokenOwner' ].split(',') if switchDict['tokenOwner'] else None,
meta = meta )
return elements
def tabularPrint( elementsList ):
'''
Prints the list of elements on a tabular
'''
subLogger.notice( '' )
subLogger.notice( 'Selection parameters:' )
subLogger.notice( ' %s: %s' % ( 'element'.ljust( 15 ), switchDict[ 'element' ] ) )
titles = []
for key in ( 'Name', 'StatusType', 'Status', 'ElementType', 'TokenOwner' ):
#Transforms from upper lower case to lower upper case
keyT = key[0].lower() + key[1:]
if switchDict[ keyT ] is None:
titles.append( key )
else:
subLogger.notice( ' %s: %s' % ( key.ljust( 15 ), switchDict[ keyT ] ) )
subLogger.notice( '' )
subLogger.notice( printTable( titles, elementsList, printOut = False,
numbering = False, columnSeparator = ' | ' ) )
#...............................................................................
def run():
'''
Main function of the script
'''
elements = getElements()
if not elements[ 'OK' ]:
subLogger.error( elements )
DIRACExit( 1 )
elements = elements[ 'Value' ]
tabularPrint( elements )
#...............................................................................
if __name__ == "__main__":
subLogger = gLogger.getSubLogger( __file__ )
#Script initialization
registerSwitches()
registerUsageMessage()
switchDict = parseSwitches()
#Run script
run()
#Bye
DIRACExit( 0 )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
andresailer/DIRAC
|
ResourceStatusSystem/scripts/dirac-rss-list-status.py
|
Python
|
gpl-3.0
| 5,951
|
def sv_main(num_verts=20, radius=5, num_rings=3, rotation=0.3, mdist=0.3):
# in boilerplate, could be less verbose
in_sockets = [
['s', 'num_verts', num_verts],
['s', 'radius', radius],
['s', 'num_rings', num_rings],
['s', 'rotation', rotation],
['s', 'distance', mdist]
]
from math import sin, cos, pi
import mathutils
from mathutils import Vector
TWO_PI = 2 * pi
r = radius
angle = TWO_PI / num_verts
v = []
e = []
# create vertices
for j in range(num_rings):
radial_offset = rotation * j
for i in range(num_verts):
theta = (angle * i) + radial_offset
tr = r + (0.5 * j)
v.append([cos(theta) * tr, sin(theta) * tr, 0])
# make kd tree
# documentation/blender_python_api_2_70_release/mathutils.kdtree.html
size = len(v)
kd = mathutils.kdtree.KDTree(size)
for i, vtx in enumerate(v):
kd.insert(Vector(vtx), i)
kd.balance()
# makes edges
for i, vtx in enumerate(v):
num_edges = 0
for (co, index, dist) in kd.find_range(vtx, mdist):
if i == index or (num_edges > 2):
continue
e.append([i, index])
num_edges += 1
# out boilerplate
out_sockets = [
['v', 'Vecs', [v]],
['s', 'Edges', e]
]
return in_sockets, out_sockets
|
kilon/sverchok
|
node_scripts/templates/zeffii/vert_edges_kdtree_range.py
|
Python
|
gpl-3.0
| 1,409
|
import os
import sys
import math
import time
import json
from rpc_client import RPC_Client
ROOT = os.path.dirname(os.path.realpath(sys.argv[0]))
DBPATH = os.path.join(ROOT, 'build.json')
MAXGAS = hex(int(math.pi*1e6))
def get_db():
with open(DBPATH) as dbfile:
return json.load(dbfile)
def save_db(db):
with open(DBPATH, 'w') as dbfile:
json.dump(db, dbfile, sort_keys=True, indent=4)
def confirmed_send(
to=None, sender=None, gas=MAXGAS,
data=None, value=None, blocktime=12,
rpc=None):
if rpc is None:
rpc = RPC_Client()
response = rpc.eth_sendTransaction({'to':to,
'from':sender,
'gas':gas,
'data':data,
'value':value})
assert 'error' not in response, json.dumps(response, indent=4, sort_keys=True)
txhash = response['result']
while True:
receipt = rpc.eth_getTransactionReceipt(txhash)
if receipt['result']:
return receipt
time.sleep(blocktime)
|
kustomzone/augur-core
|
pyrpctools/__init__.py
|
Python
|
gpl-3.0
| 1,125
|
# Copyright (c) Microsoft Corporation 2015
from z3 import *
x = Real('x')
y = Real('y')
s = Solver()
s.add(x + y > 5, x > 1, y > 1)
print(s.check())
print(s.model())
|
sccblom/vercors
|
deps/z3/4.4.1/Windows NT/intel/bin/example.py
|
Python
|
mpl-2.0
| 178
|
# -*- coding: utf-8 -*-
# The Hazard Library
# Copyright (C) 2013-2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports
:class:`EdwardsFah2013Alpine10Bars`,
:class:`EdwardsFah2013Alpine20Bars`,
:class:`EdwardsFah2013Alpine30Bars`,
:class:`EdwardsFah2013Alpine50Bars`,
:class:`EdwardsFah2013Alpine60Bars`,
:class:`EdwardsFah2013Alpine75Bars`,
:class:`EdwardsFah2013Alpine90Bars`,
:class:`EdwardsFah2013Alpine120Bars`.
"""
from __future__ import division
import numpy as np
from scipy.constants import g
from openquake.hazardlib.gsim.base import GMPE
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGV, PGA, SA
from openquake.hazardlib.gsim.edwards_fah_2013a_coeffs import (
COEFFS_ALPINE_60Bars,
COEFFS_ALPINE_10Bars,
COEFFS_ALPINE_20Bars,
COEFFS_ALPINE_30Bars,
COEFFS_ALPINE_50Bars,
COEFFS_ALPINE_75Bars,
COEFFS_ALPINE_90Bars,
COEFFS_ALPINE_120Bars
)
from openquake.hazardlib.gsim.utils_swiss_gmpe import (
_compute_phi_ss,
_compute_C1_term
)
class EdwardsFah2013Alpine10Bars(GMPE):
"""
This function implements the GMPE developed by Ben Edwars and Donath Fah
and published as "A Stochastic Ground-Motion Model for Switzerland"
Bulletin of the Seismological Society of America,
Vol. 103, No. 1, pp. 78–98, February 2013.
The GMPE was parametrized by Carlo Cauzzi to be implemented in OpenQuake.
This class implements the equations for 'Alpine' and 'Foreland - two
tectonic regionalizations defined for the Switzerland -
therefore this GMPE is region specific".
@ implemented by laurentiu.danciu@sed.ethz.zh
"""
#: Supported tectonic region type is ALPINE which
#: is a sub-region of Active Shallow Crust.
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are spectral acceleration,
#: and peak ground acceleration, see tables 3 and 4, pages 227 and 228.
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGV,
PGA,
SA
])
#: Supported intensity measure component is the geometric mean of two
#: horizontal components
#: :attr:`~openquake.hazardlib.const.IMC.AVERAGE_HORIZONTAL`
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: Supported standard deviation type is total,
#: Carlo Cauzzi - Personal Communication
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL
])
#: Required site parameter is only Vs30 (used to distinguish rock
#: and deep soil).
REQUIRES_SITES_PARAMETERS = set(('vs30', ))
#: Required rupture parameters: magnitude
REQUIRES_RUPTURE_PARAMETERS = set(('mag', 'rake'))
#: Required distance measure is Rrup
REQUIRES_DISTANCES = set(('rrup', ))
#: Vs30 value representing typical rock conditions in Switzerland.
#: confirmed by the Swiss GMPE group
ROCK_VS30 = 1105
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
COEFFS = self.COEFFS[imt]
R = self._compute_term_r(COEFFS, rup.mag, dists.rrup)
mean = 10 ** (self._compute_mean(COEFFS, rup.mag, R))
# Convert units to g,
# but only for PGA and SA (not PGV):
if isinstance(imt, (PGA, SA)):
mean = np.log(mean / (g*100.))
else:
# PGV:
mean = np.log(mean)
c1_rrup = _compute_C1_term(COEFFS, dists.rrup)
log_phi_ss = 1.00
stddevs = self._get_stddevs(
COEFFS, stddev_types, sites.vs30.shape[0], rup.mag, c1_rrup,
log_phi_ss, COEFFS['mean_phi_ss']
)
return mean, stddevs
def _get_stddevs(self, C, stddev_types, num_sites, mag, c1_rrup,
log_phi_ss, mean_phi_ss):
"""
Return standard deviations
"""
phi_ss = _compute_phi_ss(C, mag, c1_rrup, log_phi_ss, mean_phi_ss)
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
stddevs.append(np.sqrt(
C['tau'] * C['tau'] +
phi_ss * phi_ss) +
np.zeros(num_sites))
elif stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(phi_ss + np.zeros(num_sites))
elif stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(C['tau'] + np.zeros(num_sites))
return stddevs
def _compute_term_r(self, C, mag, rrup):
"""
Compute distance term
d = log10(max(R,rmin));
"""
if mag > self.M1:
rrup_min = 0.55
elif mag > self.M2:
rrup_min = -2.80 * mag + 14.55
else:
rrup_min = -0.295 * mag + 2.65
R = np.maximum(rrup, rrup_min)
return np.log10(R)
def _compute_term_1(self, C, mag):
"""
Compute term 1
a1 + a2.*M + a3.*M.^2 + a4.*M.^3 + a5.*M.^4 + a6.*M.^5 + a7.*M.^6
"""
return (
C['a1'] + C['a2'] * mag + C['a3'] *
np.power(mag, 2) + C['a4'] * np.power(mag, 3)
+ C['a5'] * np.power(mag, 4) + C['a6'] *
np.power(mag, 5) + C['a7'] * np.power(mag, 6)
)
def _compute_term_2(self, C, mag, R):
"""
(a8 + a9.*M + a10.*M.*M + a11.*M.*M.*M).*d(r)
"""
return (
(C['a8'] + C['a9'] * mag + C['a10'] * np.power(mag, 2) +
C['a11'] * np.power(mag, 3)) * R
)
def _compute_term_3(self, C, mag, R):
"""
(a12 + a13.*M + a14.*M.*M + a15.*M.*M.*M).*(d(r).^2)
"""
return (
(C['a12'] + C['a13'] * mag + C['a14'] * np.power(mag, 2) +
C['a15'] * np.power(mag, 3)) * np.power(R, 2)
)
def _compute_term_4(self, C, mag, R):
"""
(a16 + a17.*M + a18.*M.*M + a19.*M.*M.*M).*(d(r).^3)
"""
return (
(C['a16'] + C['a17'] * mag + C['a18'] * np.power(mag, 2) +
C['a19'] * np.power(mag, 3)) * np.power(R, 3)
)
def _compute_term_5(self, C, mag, R):
"""
(a20 + a21.*M + a22.*M.*M + a23.*M.*M.*M).*(d(r).^4)
"""
return (
(C['a20'] + C['a21'] * mag + C['a22'] * np.power(mag, 2) +
C['a23'] * np.power(mag, 3)) * np.power(R, 4)
)
def _compute_mean(self, C, mag, term_dist_r):
"""
compute mean
"""
return (self._compute_term_1(C, mag) +
self._compute_term_2(C, mag, term_dist_r) +
self._compute_term_3(C, mag, term_dist_r) +
self._compute_term_4(C, mag, term_dist_r) +
self._compute_term_5(C, mag, term_dist_r))
#: Fixed magnitude terms
M1 = 5.00
M2 = 4.70
COEFFS = COEFFS_ALPINE_10Bars
class EdwardsFah2013Alpine20Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 20Bars Model :class:`EdwardsFah2013Alpine20Bars`
"""
COEFFS = COEFFS_ALPINE_20Bars
class EdwardsFah2013Alpine30Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 30Bars Model :class:`EdwardsFah2013Alpine30Bars`
"""
COEFFS = COEFFS_ALPINE_30Bars
class EdwardsFah2013Alpine50Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 50Bars Model :class:`EdwardsFah2013Alpine50Bars`
"""
COEFFS = COEFFS_ALPINE_50Bars
class EdwardsFah2013Alpine60Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 60Bars Model :class:`EdwardsFah2013Alpine60Bars`
"""
COEFFS = COEFFS_ALPINE_60Bars
class EdwardsFah2013Alpine75Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 75Bars Model :class:`EdwardsFah2013Alpine75Bars`
"""
COEFFS = COEFFS_ALPINE_75Bars
class EdwardsFah2013Alpine90Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 90Bars Model :class:`EdwardsFah2013Alpine90Bars`
"""
COEFFS = COEFFS_ALPINE_90Bars
class EdwardsFah2013Alpine120Bars(EdwardsFah2013Alpine10Bars):
"""
This class extends :class:`EdwardsFah2013Alpine10Bars`
and implements the 120Bars Model :class:`EdwardsFah2013Alpine120Bars`
"""
COEFFS = COEFFS_ALPINE_120Bars
|
mmpagani/oq-hazardlib
|
openquake/hazardlib/gsim/edwards_fah_2013a.py
|
Python
|
agpl-3.0
| 9,481
|
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import datetime
from dateutil.relativedelta import relativedelta
from decimal import Decimal
from weboob.deprecated.browser import Page
from weboob.deprecated.browser.parsers.csvparser import CsvParser
from weboob.capabilities.bank import Account, AccountNotFound
from .accounthistory import Transaction, AccountHistory
class RedirectPage(Page):
pass
class HistoryParser(CsvParser):
FMTPARAMS = {'delimiter': ';'}
class ProAccountsList(Page):
ACCOUNT_TYPES = {u'Comptes épargne': Account.TYPE_SAVINGS,
u'Comptes courants': Account.TYPE_CHECKING,
}
def get_accounts_list(self):
for table in self.document.xpath('//div[@class="comptestabl"]/table'):
try:
account_type = self.ACCOUNT_TYPES[table.xpath('./caption/text()')[0].strip()]
except (IndexError,KeyError):
account_type = Account.TYPE_UNKNOWN
for tr in table.xpath('./tbody/tr'):
cols = tr.findall('td')
link = cols[0].find('a')
if link is None:
continue
a = Account()
a.type = account_type
a.id, a.label = map(unicode, link.attrib['title'].split(' ', 1))
tmp_balance = self.parser.tocleanstring(cols[1])
a.currency = a.get_currency(tmp_balance)
a.balance = Decimal(Transaction.clean_amount(tmp_balance))
a._card_links = []
a._link_id = link.attrib['href']
yield a
def get_account(self, id):
for account in self.get_accounts_list():
if account.id == id:
return account
raise AccountNotFound('Unable to find account: %s' % id)
class ProAccountHistory(Page):
def on_loaded(self):
link = self.document.xpath('//a[contains(@href, "telechargercomptes.ea")]/@href')[0]
self.browser.location(link)
class ProAccountHistoryDownload(Page):
def on_loaded(self):
self.browser.select_form(name='telechargement')
self.browser['dateDebutPeriode'] = (datetime.date.today() - relativedelta(months=11)).strftime('%d/%m/%Y')
self.browser.submit()
class ProAccountHistoryCSV(AccountHistory):
def get_next_link(self):
return False
def get_history(self, deferred=False):
for line in self.document.rows:
if len(line) < 4 or line[0] == 'Date':
continue
t = Transaction()
t.parse(raw=line[1], date=line[0])
t.set_amount(line[2])
t._coming = False
yield t
|
sputnick-dev/weboob
|
modules/bp/pages/pro.py
|
Python
|
agpl-3.0
| 3,390
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import timedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
class SaleOrder(models.Model):
_inherit = 'sale.order'
@api.model
def default_get(self, fields_list):
default_vals = super(SaleOrder, self).default_get(fields_list)
if "sale_order_template_id" in fields_list and not default_vals.get("sale_order_template_id"):
company_id = default_vals.get('company_id', False)
company = self.env["res.company"].browse(company_id) if company_id else self.env.company
default_vals['sale_order_template_id'] = company.sale_order_template_id.id
return default_vals
sale_order_template_id = fields.Many2one(
'sale.order.template', 'Quotation Template',
readonly=True, check_company=True,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
sale_order_option_ids = fields.One2many(
'sale.order.option', 'order_id', 'Optional Products Lines',
copy=True, readonly=True,
states={'draft': [('readonly', False)], 'sent': [('readonly', False)]})
@api.constrains('company_id', 'sale_order_option_ids')
def _check_optional_product_company_id(self):
for order in self:
companies = order.sale_order_option_ids.product_id.company_id
if companies and companies != order.company_id:
bad_products = order.sale_order_option_ids.product_id.filtered(lambda p: p.company_id and p.company_id != order.company_id)
raise ValidationError(_(
"Your quotation contains products from company %(product_company)s whereas your quotation belongs to company %(quote_company)s. \n Please change the company of your quotation or remove the products from other companies (%(bad_products)s).",
product_company=', '.join(companies.mapped('display_name')),
quote_company=order.company_id.display_name,
bad_products=', '.join(bad_products.mapped('display_name')),
))
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
if self.sale_order_template_id and self.sale_order_template_id.number_of_days > 0:
default = dict(default or {})
default['validity_date'] = fields.Date.context_today(self) + timedelta(self.sale_order_template_id.number_of_days)
return super(SaleOrder, self).copy(default=default)
@api.onchange('partner_id')
def onchange_partner_id(self):
super(SaleOrder, self).onchange_partner_id()
template = self.sale_order_template_id.with_context(lang=self.partner_id.lang)
self.note = template.note or self.note
def _compute_line_data_for_template_change(self, line):
return {
'display_type': line.display_type,
'name': line.name,
'state': 'draft',
}
def _compute_option_data_for_template_change(self, option):
price = option.product_id.lst_price
discount = 0
if self.pricelist_id:
pricelist_price = self.pricelist_id.with_context(uom=option.uom_id.id).get_product_price(option.product_id, 1, False)
if self.pricelist_id.discount_policy == 'without_discount' and price:
discount = max(0, (price - pricelist_price) * 100 / price)
else:
price = pricelist_price
return {
'product_id': option.product_id.id,
'name': option.name,
'quantity': option.quantity,
'uom_id': option.uom_id.id,
'price_unit': price,
'discount': discount
}
def update_prices(self):
self.ensure_one()
res = super().update_prices()
for line in self.sale_order_option_ids:
line.price_unit = self.pricelist_id.get_product_price(line.product_id, line.quantity, self.partner_id, uom_id=line.uom_id.id)
return res
@api.onchange('sale_order_template_id')
def onchange_sale_order_template_id(self):
if not self.sale_order_template_id:
self.require_signature = self._get_default_require_signature()
self.require_payment = self._get_default_require_payment()
return
template = self.sale_order_template_id.with_context(lang=self.partner_id.lang)
# --- first, process the list of products from the template
order_lines = [(5, 0, 0)]
for line in template.sale_order_template_line_ids:
data = self._compute_line_data_for_template_change(line)
if line.product_id:
price = line.product_id.lst_price
discount = 0
if self.pricelist_id:
pricelist_price = self.pricelist_id.with_context(uom=line.product_uom_id.id).get_product_price(line.product_id, 1, False)
if self.pricelist_id.discount_policy == 'without_discount' and price:
discount = max(0, (price - pricelist_price) * 100 / price)
else:
price = pricelist_price
data.update({
'price_unit': price,
'discount': discount,
'product_uom_qty': line.product_uom_qty,
'product_id': line.product_id.id,
'product_uom': line.product_uom_id.id,
'customer_lead': self._get_customer_lead(line.product_id.product_tmpl_id),
})
order_lines.append((0, 0, data))
self.order_line = order_lines
self.order_line._compute_tax_id()
# then, process the list of optional products from the template
option_lines = [(5, 0, 0)]
for option in template.sale_order_template_option_ids:
data = self._compute_option_data_for_template_change(option)
option_lines.append((0, 0, data))
self.sale_order_option_ids = option_lines
if template.number_of_days > 0:
self.validity_date = fields.Date.context_today(self) + timedelta(template.number_of_days)
self.require_signature = template.require_signature
self.require_payment = template.require_payment
if template.note:
self.note = template.note
def action_confirm(self):
res = super(SaleOrder, self).action_confirm()
for order in self:
if order.sale_order_template_id and order.sale_order_template_id.mail_template_id:
self.sale_order_template_id.mail_template_id.send_mail(order.id)
return res
def get_access_action(self, access_uid=None):
""" Instead of the classic form view, redirect to the online quote if it exists. """
self.ensure_one()
user = access_uid and self.env['res.users'].sudo().browse(access_uid) or self.env.user
if not self.sale_order_template_id or (not user.share and not self.env.context.get('force_website')):
return super(SaleOrder, self).get_access_action(access_uid)
return {
'type': 'ir.actions.act_url',
'url': self.get_portal_url(),
'target': 'self',
'res_id': self.id,
}
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
_description = "Sales Order Line"
sale_order_option_ids = fields.One2many('sale.order.option', 'line_id', 'Optional Products Lines')
# Take the description on the order template if the product is present in it
@api.onchange('product_id')
def product_id_change(self):
domain = super(SaleOrderLine, self).product_id_change()
if self.product_id and self.order_id.sale_order_template_id:
for line in self.order_id.sale_order_template_id.sale_order_template_line_ids:
if line.product_id == self.product_id:
self.name = line.with_context(lang=self.order_id.partner_id.lang).name + self._get_sale_order_line_multiline_description_variants()
break
return domain
class SaleOrderOption(models.Model):
_name = "sale.order.option"
_description = "Sale Options"
_order = 'sequence, id'
is_present = fields.Boolean(string="Present on Quotation",
help="This field will be checked if the option line's product is "
"already present in the quotation.",
compute="_compute_is_present", search="_search_is_present")
order_id = fields.Many2one('sale.order', 'Sales Order Reference', ondelete='cascade', index=True)
line_id = fields.Many2one('sale.order.line', ondelete="set null", copy=False)
name = fields.Text('Description', required=True)
product_id = fields.Many2one('product.product', 'Product', required=True, domain=[('sale_ok', '=', True)])
price_unit = fields.Float('Unit Price', required=True, digits='Product Price')
discount = fields.Float('Discount (%)', digits='Discount')
uom_id = fields.Many2one('uom.uom', 'Unit of Measure ', required=True, domain="[('category_id', '=', product_uom_category_id)]")
product_uom_category_id = fields.Many2one(related='product_id.uom_id.category_id', readonly=True)
quantity = fields.Float('Quantity', required=True, digits='Product Unit of Measure', default=1)
sequence = fields.Integer('Sequence', help="Gives the sequence order when displaying a list of optional products.")
@api.depends('line_id', 'order_id.order_line', 'product_id')
def _compute_is_present(self):
# NOTE: this field cannot be stored as the line_id is usually removed
# through cascade deletion, which means the compute would be false
for option in self:
option.is_present = bool(option.order_id.order_line.filtered(lambda l: l.product_id == option.product_id))
def _search_is_present(self, operator, value):
if (operator, value) in [('=', True), ('!=', False)]:
return [('line_id', '=', False)]
return [('line_id', '!=', False)]
@api.onchange('product_id', 'uom_id', 'quantity')
def _onchange_product_id(self):
if not self.product_id:
return
product = self.product_id.with_context(
lang=self.order_id.partner_id.lang,
partner=self.order_id.partner_id,
quantity=self.quantity,
date=self.order_id.date_order,
pricelist=self.order_id.pricelist_id.id,
uom=self.uom_id.id,
fiscal_position=self.env.context.get('fiscal_position')
)
self.name = product.get_product_multiline_description_sale()
self.uom_id = self.uom_id or product.uom_id
# To compute the discount a so line is created in cache
values = self._get_values_to_add_to_order()
new_sol = self.env['sale.order.line'].new(values)
new_sol._onchange_discount()
self.discount = new_sol.discount
if self.order_id.pricelist_id and self.order_id.partner_id:
self.price_unit = new_sol._get_display_price(product)
def button_add_to_order(self):
self.add_option_to_order()
def add_option_to_order(self):
self.ensure_one()
sale_order = self.order_id
if sale_order.state not in ['draft', 'sent']:
raise UserError(_('You cannot add options to a confirmed order.'))
values = self._get_values_to_add_to_order()
order_line = self.env['sale.order.line'].create(values)
order_line._compute_tax_id()
self.write({'line_id': order_line.id})
if sale_order:
sale_order.add_option_to_order_with_taxcloud()
def _get_values_to_add_to_order(self):
self.ensure_one()
return {
'order_id': self.order_id.id,
'price_unit': self.price_unit,
'name': self.name,
'product_id': self.product_id.id,
'product_uom_qty': self.quantity,
'product_uom': self.uom_id.id,
'discount': self.discount,
'company_id': self.order_id.company_id.id,
}
|
rven/odoo
|
addons/sale_management/models/sale_order.py
|
Python
|
agpl-3.0
| 12,391
|
import models
import pombola.core.models
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404, redirect
def in_place(request, slug):
place = get_object_or_404( pombola.core.models.Place, slug=slug)
projects = place.project_set
return render_to_response(
'projects/in_place.html',
{
'place': place,
'projects': projects,
},
context_instance=RequestContext(request)
)
|
patricmutwiri/pombola
|
pombola/projects/views.py
|
Python
|
agpl-3.0
| 504
|
# -*- coding: utf-8 -*-
from . import test_product_margin_classification
|
acsone/sale-workflow
|
product_margin_classification/tests/__init__.py
|
Python
|
agpl-3.0
| 74
|
# -*- encoding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import tools
from openerp.osv import fields, orm
class hr_language(orm.Model):
_name = 'hr.language'
_columns = {
'name': fields.selection(
tools.scan_languages(),
'Language',
required=True,
),
'description': fields.char(
'Description',
size=64,
required=True,
translate=True,
),
'employee_id': fields.many2one(
'hr.employee',
'Employee',
required=True,
),
'read': fields.boolean(
'Read',
),
'write': fields.boolean(
'Write',
),
'speak': fields.boolean(
'Speak',
),
}
_defaults = {
'read': True,
'write': True,
'speak': True,
}
class hr_employee(orm.Model):
_inherit = 'hr.employee'
_columns = {
'language_ids': fields.one2many(
'hr.language',
'employee_id',
'Languages',
),
}
|
macopedia/hr
|
__unported__/hr_language/hr_language.py
|
Python
|
agpl-3.0
| 2,057
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_exchange_rates_adjustment
import wizard
|
sysadminmatmoz/odoo-clearcorp
|
account_exchange_rates_adjustment/__init__.py
|
Python
|
agpl-3.0
| 1,086
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) Camptocamp SA
# Author: Arnaud WÃŒst
#
#
# This file is part of the c2c_report_tools module.
#
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
{
"name" : "c2c Reporting Tools. A library that provide a new way to create clean reports efficiently",
"version" : "5.0",
"author" : "Camptocamp",
"category" : "Generic Modules/Reporting",
"description": """ This module offer a growing collection of objects to create simple and advanced reports in a new way of doing.
You can create powerful reports with a few lines of python code and nothing else. (no sxw, rml or xml)
This module follow multiple goals:
- To accelerate report creation by creating reusable pieces of code (one line of code to create standard header and footer)
- To accelerate report generation (processing) by getting ride of uncecessary parsing and transformations (direct python to pdf generation)
- To improve reporting capabilities by getting ride of uncomplete parsers and limited middle technologies
- To make reports designs more uniform
For exemples of use, have a look at c2c_planning_management. Our first module based on this tool.
""",
"website": "http://www.camptocamp.com",
"depends" : [],
"init_xml" : [
],
"data" : [
],
"active": False,
"installable": True
}
|
VitalPet/c2c-rd-addons
|
c2c_reporting_tools_chricar/__terp__.py
|
Python
|
agpl-3.0
| 2,752
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Canonical
#
# Authors:
# Michael Vogt
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import GObject, Gtk, Gdk
import datetime
import gettext
import logging
import os
import json
import sys
import tempfile
import time
import threading
# py3
try:
from urllib.request import urlopen
urlopen # pyflakes
from queue import Queue
Queue # pyflakes
except ImportError:
# py2 fallbacks
from urllib import urlopen
from Queue import Queue
from gettext import gettext as _
from softwarecenter.backend.ubuntusso import get_ubuntu_sso_backend
import piston_mini_client
from softwarecenter.paths import SOFTWARE_CENTER_CONFIG_DIR
from softwarecenter.enums import Icons, SOFTWARE_CENTER_NAME_KEYRING
from softwarecenter.config import get_config
from softwarecenter.distro import get_distro, get_current_arch
from softwarecenter.backend.login_sso import get_sso_backend
from softwarecenter.backend.reviews import Review
from softwarecenter.db.database import Application
from softwarecenter.gwibber_helper import GwibberHelper, GwibberHelperMock
from softwarecenter.i18n import get_language
from softwarecenter.ui.gtk3.SimpleGtkbuilderApp import SimpleGtkbuilderApp
from softwarecenter.ui.gtk3.dialogs import SimpleGtkbuilderDialog
from softwarecenter.ui.gtk3.widgets.stars import ReactiveStar
from softwarecenter.utils import make_string_from_list, utf8
from softwarecenter.backend.piston.rnrclient import RatingsAndReviewsAPI
from softwarecenter.backend.piston.rnrclient_pristine import ReviewRequest
# get current distro and set default server root
distro = get_distro()
SERVER_ROOT = distro.REVIEWS_SERVER
# server status URL
SERVER_STATUS_URL = SERVER_ROOT + "/server-status/"
class UserCancelException(Exception):
""" user pressed cancel """
pass
TRANSMIT_STATE_NONE = "transmit-state-none"
TRANSMIT_STATE_INPROGRESS = "transmit-state-inprogress"
TRANSMIT_STATE_DONE = "transmit-state-done"
TRANSMIT_STATE_ERROR = "transmit-state-error"
class GRatingsAndReviews(GObject.GObject):
""" Access ratings&reviews API as a gobject """
__gsignals__ = {
# send when a transmit is started
"transmit-start": (GObject.SIGNAL_RUN_LAST,
GObject.TYPE_NONE,
(GObject.TYPE_PYOBJECT, ),
),
# send when a transmit was successful
"transmit-success": (GObject.SIGNAL_RUN_LAST,
GObject.TYPE_NONE,
(GObject.TYPE_PYOBJECT, ),
),
# send when a transmit failed
"transmit-failure": (GObject.SIGNAL_RUN_LAST,
GObject.TYPE_NONE,
(GObject.TYPE_PYOBJECT, str),
),
}
def __init__(self, token):
super(GRatingsAndReviews, self).__init__()
# piston worker thread
self.worker_thread = Worker(token)
self.worker_thread.start()
GObject.timeout_add(500,
self._check_thread_status,
None)
def submit_review(self, review):
self.emit("transmit-start", review)
self.worker_thread.pending_reviews.put(review)
def report_abuse(self, review_id, summary, text):
self.emit("transmit-start", review_id)
self.worker_thread.pending_reports.put((int(review_id), summary, text))
def submit_usefulness(self, review_id, is_useful):
self.emit("transmit-start", review_id)
self.worker_thread.pending_usefulness.put((int(review_id), is_useful))
def modify_review(self, review_id, review):
self.emit("transmit-start", review_id)
self.worker_thread.pending_modify.put((int(review_id), review))
def delete_review(self, review_id):
self.emit("transmit-start", review_id)
self.worker_thread.pending_delete.put(int(review_id))
def server_status(self):
self.worker_thread.pending_server_status()
def shutdown(self):
self.worker_thread.shutdown()
# internal
def _check_thread_status(self, data):
if self.worker_thread._transmit_state == TRANSMIT_STATE_DONE:
self.emit("transmit-success", "")
self.worker_thread._transmit_state = TRANSMIT_STATE_NONE
elif self.worker_thread._transmit_state == TRANSMIT_STATE_ERROR:
self.emit("transmit-failure", "",
self.worker_thread._transmit_error_str)
self.worker_thread._transmit_state = TRANSMIT_STATE_NONE
return True
class Worker(threading.Thread):
def __init__(self, token):
# init parent
threading.Thread.__init__(self)
self.pending_reviews = Queue()
self.pending_reports = Queue()
self.pending_usefulness = Queue()
self.pending_modify = Queue()
self.pending_delete = Queue()
self.pending_server_status = Queue()
self._shutdown = False
# FIXME: instead of a binary value we need the state associated
# with each request from the queue
self._transmit_state = TRANSMIT_STATE_NONE
self._transmit_error_str = ""
self.display_name = "No display name"
auth = piston_mini_client.auth.OAuthAuthorizer(token["token"],
token["token_secret"], token["consumer_key"],
token["consumer_secret"])
# change default server to the SSL one
distro = get_distro()
service_root = distro.REVIEWS_SERVER
self.rnrclient = RatingsAndReviewsAPI(service_root=service_root,
auth=auth)
def run(self):
"""Main thread run interface, logs into launchpad and waits
for commands
"""
logging.debug("worker thread run")
# loop
self._wait_for_commands()
def shutdown(self):
"""Request shutdown"""
self._shutdown = True
def _wait_for_commands(self):
"""internal helper that waits for commands"""
while True:
#logging.debug("worker: _wait_for_commands")
self._submit_reviews_if_pending()
self._submit_reports_if_pending()
self._submit_usefulness_if_pending()
self._submit_modify_if_pending()
self._submit_delete_if_pending()
time.sleep(0.2)
if (self._shutdown and
self.pending_reviews.empty() and
self.pending_usefulness.empty() and
self.pending_reports.empty() and
self.pending_modify.empty() and
self.pending_delete.empty()):
return
def _submit_usefulness_if_pending(self):
""" the actual usefulness function """
while not self.pending_usefulness.empty():
logging.debug("POST usefulness")
self._transmit_state = TRANSMIT_STATE_INPROGRESS
(review_id, is_useful) = self.pending_usefulness.get()
try:
res = self.rnrclient.submit_usefulness(
review_id=review_id, useful=str(is_useful))
self._transmit_state = TRANSMIT_STATE_DONE
sys.stdout.write(json.dumps(res))
except Exception as e:
logging.exception("submit_usefulness failed")
err_str = self._get_error_messages(e)
self._transmit_error_str = err_str
self._write_exception_html_log_if_needed(e)
self._transmit_state = TRANSMIT_STATE_ERROR
self.pending_usefulness.task_done()
def _submit_modify_if_pending(self):
""" the actual modify function """
while not self.pending_modify.empty():
logging.debug("_modify_review")
self._transmit_state = TRANSMIT_STATE_INPROGRESS
(review_id, review) = self.pending_modify.get()
summary = review['summary']
review_text = review['review_text']
rating = review['rating']
try:
res = self.rnrclient.modify_review(review_id=review_id,
summary=summary,
review_text=review_text,
rating=rating)
self._transmit_state = TRANSMIT_STATE_DONE
sys.stdout.write(json.dumps(vars(res)))
except Exception as e:
logging.exception("modify_review")
err_str = self._get_error_messages(e)
self._write_exception_html_log_if_needed(e)
self._transmit_state = TRANSMIT_STATE_ERROR
self._transmit_error_str = err_str
self.pending_modify.task_done()
def _submit_delete_if_pending(self):
""" the actual deletion """
while not self.pending_delete.empty():
logging.debug("POST delete")
self._transmit_state = TRANSMIT_STATE_INPROGRESS
review_id = self.pending_delete.get()
try:
res = self.rnrclient.delete_review(review_id=review_id)
self._transmit_state = TRANSMIT_STATE_DONE
sys.stdout.write(json.dumps(res))
except Exception as e:
logging.exception("delete_review failed")
self._write_exception_html_log_if_needed(e)
self._transmit_error_str = _("Failed to delete review")
self._transmit_state = TRANSMIT_STATE_ERROR
self.pending_delete.task_done()
def _submit_reports_if_pending(self):
""" the actual report function """
while not self.pending_reports.empty():
logging.debug("POST report")
self._transmit_state = TRANSMIT_STATE_INPROGRESS
(review_id, summary, text) = self.pending_reports.get()
try:
res = self.rnrclient.flag_review(review_id=review_id,
reason=summary,
text=text)
self._transmit_state = TRANSMIT_STATE_DONE
sys.stdout.write(json.dumps(res))
except Exception as e:
logging.exception("flag_review failed")
err_str = self._get_error_messages(e)
self._transmit_error_str = err_str
self._write_exception_html_log_if_needed(e)
self._transmit_state = TRANSMIT_STATE_ERROR
self.pending_reports.task_done()
def _write_exception_html_log_if_needed(self, e):
# write out a "oops.html"
if type(e) is piston_mini_client.APIError:
f = tempfile.NamedTemporaryFile(
prefix="sc_submit_oops_", suffix=".html", delete=False)
# new piston-mini-client has only the body of the returned data
# older just pushes it into a big string
if hasattr(e, "body") and e.body:
f.write(e.body)
else:
f.write(str(e))
# reviews
def queue_review(self, review):
""" queue a new review for sending to LP """
logging.debug("queue_review %s" % review)
self.pending_reviews.put(review)
def _submit_reviews_if_pending(self):
""" the actual submit function """
while not self.pending_reviews.empty():
logging.debug("_submit_review")
self._transmit_state = TRANSMIT_STATE_INPROGRESS
review = self.pending_reviews.get()
piston_review = ReviewRequest()
piston_review.package_name = review.app.pkgname
piston_review.app_name = review.app.appname
piston_review.summary = review.summary
piston_review.version = review.package_version
piston_review.review_text = review.text
piston_review.date = str(review.date)
piston_review.rating = review.rating
piston_review.language = review.language
piston_review.arch_tag = get_current_arch()
piston_review.origin = review.origin
piston_review.distroseries = distro.get_codename()
try:
res = self.rnrclient.submit_review(review=piston_review)
self._transmit_state = TRANSMIT_STATE_DONE
# output the resulting ReviewDetails object as json so
# that the parent can read it
sys.stdout.write(json.dumps(vars(res)))
except Exception as e:
logging.exception("submit_review")
err_str = self._get_error_messages(e)
self._write_exception_html_log_if_needed(e)
self._transmit_state = TRANSMIT_STATE_ERROR
self._transmit_error_str = err_str
self.pending_reviews.task_done()
def _get_error_messages(self, e):
if type(e) is piston_mini_client.APIError:
try:
logging.warning(e.body)
error_msg = json.loads(e.body)['errors']
errs = error_msg["__all__"]
err_str = _("Server's response was:")
for err in errs:
err_str = _("%s\n%s") % (err_str, err)
except:
err_str = _("Unknown error communicating with server. "
"Check your log and consider raising a bug report "
"if this problem persists")
logging.warning(e)
else:
err_str = _("Unknown error communicating with server. Check "
"your log and consider raising a bug report if this "
"problem persists")
logging.warning(e)
return err_str
def verify_server_status(self):
""" verify that the server we want to talk to can be reached
this method should be overriden if clients talk to a different
server than rnr
"""
try:
resp = urlopen(SERVER_STATUS_URL).read()
if resp != "ok":
return False
except Exception as e:
logging.error("exception from '%s': '%s'" % (SERVER_STATUS_URL, e))
return False
return True
class BaseApp(SimpleGtkbuilderApp):
def __init__(self, datadir, uifile):
SimpleGtkbuilderApp.__init__(
self, os.path.join(datadir, "ui/gtk3", uifile), "software-center")
# generic data
self.token = None
self.display_name = None
self._login_successful = False
self._whoami_token_reset_nr = 0
#persistent config
configfile = os.path.join(
SOFTWARE_CENTER_CONFIG_DIR, "submit_reviews.cfg")
self.config = get_config(configfile)
# status spinner
self.status_spinner = Gtk.Spinner()
self.status_spinner.set_size_request(32, 32)
self.login_spinner_vbox.pack_start(self.status_spinner, False, False,
0)
self.login_spinner_vbox.reorder_child(self.status_spinner, 0)
self.status_spinner.show()
#submit status spinner
self.submit_spinner = Gtk.Spinner()
self.submit_spinner.set_size_request(*Gtk.icon_size_lookup(
Gtk.IconSize.SMALL_TOOLBAR)[:2])
#submit error image
self.submit_error_img = Gtk.Image()
self.submit_error_img.set_from_stock(Gtk.STOCK_DIALOG_ERROR,
Gtk.IconSize.SMALL_TOOLBAR)
#submit success image
self.submit_success_img = Gtk.Image()
self.submit_success_img.set_from_stock(Gtk.STOCK_APPLY,
Gtk.IconSize.SMALL_TOOLBAR)
#submit warn image
self.submit_warn_img = Gtk.Image()
self.submit_warn_img.set_from_stock(Gtk.STOCK_DIALOG_INFO,
Gtk.IconSize.SMALL_TOOLBAR)
#label size to prevent image or spinner from resizing
self.label_transmit_status.set_size_request(-1,
Gtk.icon_size_lookup(Gtk.IconSize.SMALL_TOOLBAR)[1])
def _get_parent_xid_for_login_window(self):
# no get_xid() yet in gir world
#return self.submit_window.get_window().get_xid()
return ""
def run(self):
# initially display a 'Connecting...' page
self.main_notebook.set_current_page(0)
self.login_status_label.set_markup(_(u"Signing in\u2026"))
self.status_spinner.start()
self.submit_window.show()
# now run the loop
self.login()
def quit(self, exitcode=0):
sys.exit(exitcode)
def _add_spellcheck_to_textview(self, textview):
""" adds a spellchecker (if available) to the given Gtk.textview """
pass
#~ try:
#~ import gtkspell
#~ # mvo: gtkspell.get_from_text_view() is broken, so we use this
#~ # method instead, the second argument is the language to
#~ # use (that is directly passed to pspell)
#~ spell = gtkspell.Spell(textview, None)
#~ except:
#~ return
#~ return spell
def login(self, show_register=True):
logging.debug("login()")
login_window_xid = self._get_parent_xid_for_login_window()
help_text = _("To review software or to report abuse you need to "
"sign in to a Ubuntu Single Sign-On account.")
self.sso = get_sso_backend(login_window_xid,
SOFTWARE_CENTER_NAME_KEYRING, help_text)
self.sso.connect("login-successful", self._maybe_login_successful)
self.sso.connect("login-canceled", self._login_canceled)
if show_register:
self.sso.login_or_register()
else:
self.sso.login()
def _login_canceled(self, sso):
self.status_spinner.hide()
self.login_status_label.set_markup(
'<b><big>%s</big></b>' % _("Login was canceled"))
def _maybe_login_successful(self, sso, oauth_result):
"""called after we have the token, then we go and figure out our
name
"""
logging.debug("_maybe_login_successful")
self.token = oauth_result
self.ssoapi = get_ubuntu_sso_backend()
self.ssoapi.connect("whoami", self._whoami_done)
self.ssoapi.connect("error", self._whoami_error)
# this will automatically verify the token and retrigger login
# if its expired
self.ssoapi.whoami()
def _whoami_done(self, ssologin, result):
logging.debug("_whoami_done")
self.display_name = result["displayname"]
self._create_gratings_api()
self.login_successful(self.display_name)
def _whoami_error(self, ssologin, e):
logging.error("whoami error '%s'" % e)
# show error
self.status_spinner.hide()
self.login_status_label.set_markup(
'<b><big>%s</big></b>' % _("Failed to log in"))
def login_successful(self, display_name):
""" callback when the login was successful """
pass
def on_button_cancel_clicked(self, button=None):
# bring it down gracefully
if hasattr(self, "api"):
self.api.shutdown()
while Gtk.events_pending():
Gtk.main_iteration()
self.quit(1)
def _create_gratings_api(self):
self.api = GRatingsAndReviews(self.token)
self.api.connect("transmit-start", self.on_transmit_start)
self.api.connect("transmit-success", self.on_transmit_success)
self.api.connect("transmit-failure", self.on_transmit_failure)
def on_transmit_start(self, api, trans):
self.button_post.set_sensitive(False)
self.button_cancel.set_sensitive(False)
self._change_status("progress", _(self.SUBMIT_MESSAGE))
def on_transmit_success(self, api, trans):
self.api.shutdown()
self.quit()
def on_transmit_failure(self, api, trans, error):
self._change_status("fail", error)
self.button_post.set_sensitive(True)
self.button_cancel.set_sensitive(True)
def _change_status(self, type, message):
"""method to separate the updating of status icon/spinner and
message in the submit review window, takes a type (progress,
fail, success, clear, warning) as a string and a message
string then updates status area accordingly
"""
self._clear_status_imagery()
self.label_transmit_status.set_text("")
if type == "progress":
self.status_hbox.pack_start(self.submit_spinner, False, False, 0)
self.status_hbox.reorder_child(self.submit_spinner, 0)
self.submit_spinner.show()
self.submit_spinner.start()
self.label_transmit_status.set_text(message)
elif type == "fail":
self.status_hbox.pack_start(self.submit_error_img, False, False, 0)
self.status_hbox.reorder_child(self.submit_error_img, 0)
self.submit_error_img.show()
self.label_transmit_status.set_text(_(self.FAILURE_MESSAGE))
self.error_textview.get_buffer().set_text(_(message))
self.detail_expander.show()
elif type == "success":
self.status_hbox.pack_start(self.submit_success_img, False, False,
0)
self.status_hbox.reorder_child(self.submit_success_img, 0)
self.submit_success_img.show()
self.label_transmit_status.set_text(message)
elif type == "warning":
self.status_hbox.pack_start(self.submit_warn_img, False, False, 0)
self.status_hbox.reorder_child(self.submit_warn_img, 0)
self.submit_warn_img.show()
self.label_transmit_status.set_text(message)
def _clear_status_imagery(self):
self.detail_expander.hide()
self.detail_expander.set_expanded(False)
#clears spinner or error image from dialog submission label
# before trying to display one or the other
if self.submit_spinner.get_parent():
self.status_hbox.remove(self.submit_spinner)
if self.submit_error_img.get_window():
self.status_hbox.remove(self.submit_error_img)
if self.submit_success_img.get_window():
self.status_hbox.remove(self.submit_success_img)
if self.submit_warn_img.get_window():
self.status_hbox.remove(self.submit_warn_img)
class SubmitReviewsApp(BaseApp):
""" review a given application or package """
STAR_SIZE = (32, 32)
APP_ICON_SIZE = 48
#character limits for text boxes and hurdles for indicator changes
# (overall field maximum, limit to display warning, limit to change
# colour)
SUMMARY_CHAR_LIMITS = (80, 60, 70)
REVIEW_CHAR_LIMITS = (5000, 4900, 4950)
#alert colours for character warning labels
NORMAL_COLOUR = "000000"
ERROR_COLOUR = "FF0000"
SUBMIT_MESSAGE = _("Submitting Review")
FAILURE_MESSAGE = _("Failed to submit review")
SUCCESS_MESSAGE = _("Review submitted")
def __init__(self, app, version, iconname, origin, parent_xid, datadir,
action="submit", review_id=0):
BaseApp.__init__(self, datadir, "submit_review.ui")
self.datadir = datadir
# legal fineprint, do not change without consulting a lawyer
msg = _("By submitting this review, you agree not to include "
"anything defamatory, infringing, or illegal. Canonical "
"may, at its discretion, publish your name and review in "
"Ubuntu Software Center and elsewhere, and allow the "
"software or content author to publish it too.")
self.label_legal_fineprint.set_markup(
'<span size="x-small">%s</span>' % msg)
# additional icons come from app-install-data
self.icons = Gtk.IconTheme.get_default()
self.icons.append_search_path("/usr/share/app-install/icons/")
self.submit_window.connect("destroy", self.on_button_cancel_clicked)
self._add_spellcheck_to_textview(self.textview_review)
self.star_rating = ReactiveStar()
alignment = Gtk.Alignment.new(0.0, 0.5, 1.0, 1.0)
alignment.set_padding(3, 3, 3, 3)
alignment.add(self.star_rating)
self.star_rating.set_size_as_pixel_value(36)
self.star_caption = Gtk.Label()
alignment.show_all()
self.rating_hbox.pack_start(alignment, True, True, 0)
self.rating_hbox.reorder_child(alignment, 0)
self.rating_hbox.pack_start(self.star_caption, False, False, 0)
self.rating_hbox.reorder_child(self.star_caption, 1)
self.review_buffer = self.textview_review.get_buffer()
self.detail_expander.hide()
self.retrieve_api = RatingsAndReviewsAPI()
# data
self.app = app
self.version = version
self.origin = origin
self.iconname = iconname
self.action = action
self.review_id = int(review_id)
# parent xid
#~ if parent_xid:
#~ win = Gdk.Window.foreign_new(int(parent_xid))
#~ wnck_get_xid_from_pid(os.getpid())
#~ win = ''
#~ self.review_buffer.set_text(str(win))
#~ if win:
#~ self.submit_window.realize()
#~ self.submit_window.get_window().set_transient_for(win)
self.submit_window.set_position(Gtk.WindowPosition.MOUSE)
self._confirm_cancel_yes_handler = 0
self._confirm_cancel_no_handler = 0
self._displaying_cancel_confirmation = False
self.submit_window.connect("key-press-event", self._on_key_press_event)
self.review_summary_entry.connect('changed',
self._on_mandatory_text_entry_changed)
self.star_rating.connect('changed', self._on_mandatory_fields_changed)
self.review_buffer.connect('changed', self._on_text_entry_changed)
# gwibber stuff
self.gwibber_combo = Gtk.ComboBoxText.new()
#cells = self.gwibber_combo.get_cells()
#cells[0].set_property("ellipsize", pango.ELLIPSIZE_END)
self.gwibber_hbox.pack_start(self.gwibber_combo, True, True, 0)
if "SOFTWARE_CENTER_GWIBBER_MOCK_USERS" in os.environ:
self.gwibber_helper = GwibberHelperMock()
else:
self.gwibber_helper = GwibberHelper()
# get a dict with a saved gwibber_send (boolean) and gwibber
# account_id for persistent state
self.gwibber_prefs = self._get_gwibber_prefs()
# gwibber stuff
self._setup_gwibber_gui()
#now setup rest of app based on whether submit or modify
if self.action == "submit":
self._init_submit()
elif self.action == "modify":
self._init_modify()
def _init_submit(self):
self.submit_window.set_title(_("Review %s") %
gettext.dgettext("app-install-data", self.app.name))
def _init_modify(self):
self._populate_review()
self.submit_window.set_title(_("Modify Your %(appname)s Review") % {
'appname': gettext.dgettext("app-install-data", self.app.name)})
self.button_post.set_label(_("Modify"))
self.SUBMIT_MESSAGE = _("Updating your review")
self.FAILURE_MESSAGE = _("Failed to edit review")
self.SUCCESS_MESSAGE = _("Review updated")
self._enable_or_disable_post_button()
def _populate_review(self):
try:
review_data = self.retrieve_api.get_review(
review_id=self.review_id)
app = Application(appname=review_data.app_name,
pkgname=review_data.package_name)
self.app = app
self.review_summary_entry.set_text(review_data.summary)
self.star_rating.set_rating(review_data.rating)
self.review_buffer.set_text(review_data.review_text)
# save original review field data, for comparison purposes when
# user makes changes to fields
self.orig_summary_text = review_data.summary
self.orig_star_rating = review_data.rating
self.orig_review_text = review_data.review_text
self.version = review_data.version
self.origin = review_data.origin
except piston_mini_client.APIError:
logging.warn(
'Unable to retrieve review id %s for editing. Exiting' %
self.review_id)
self.quit(2)
def _setup_details(self, widget, app, iconname, version, display_name):
# icon shazam
try:
icon = self.icons.load_icon(iconname, self.APP_ICON_SIZE, 0)
except:
icon = self.icons.load_icon(Icons.MISSING_APP, self.APP_ICON_SIZE,
0)
self.review_appicon.set_from_pixbuf(icon)
# title
app = utf8(gettext.dgettext("app-install-data", app.name))
version = utf8(version)
self.review_title.set_markup(
'<b><span size="x-large">%s</span></b>\n%s' % (app, version))
# review label
self.review_label.set_markup(_('Review by: %s') %
display_name.encode('utf8'))
# review summary label
self.review_summary_label.set_markup(_('Summary:'))
#rating label
self.rating_label.set_markup(_('Rating:'))
#error detail link label
self.label_expander.set_markup('<small><u>%s</u></small>' %
(_('Error Details')))
def _has_user_started_reviewing(self):
summary_chars = self.review_summary_entry.get_text_length()
review_chars = self.review_buffer.get_char_count()
return summary_chars > 0 or review_chars > 0
def _on_mandatory_fields_changed(self, *args):
self._enable_or_disable_post_button()
def _on_mandatory_text_entry_changed(self, widget):
self._check_summary_character_count()
self._on_mandatory_fields_changed(widget)
def _on_text_entry_changed(self, widget):
self._check_review_character_count()
self._on_mandatory_fields_changed(widget)
def _enable_or_disable_post_button(self):
summary_chars = self.review_summary_entry.get_text_length()
review_chars = self.review_buffer.get_char_count()
if (summary_chars and summary_chars <= self.SUMMARY_CHAR_LIMITS[0] and
review_chars and review_chars <= self.REVIEW_CHAR_LIMITS[0] and
int(self.star_rating.get_rating()) > 0):
self.button_post.set_sensitive(True)
self._change_status("clear", "")
else:
self.button_post.set_sensitive(False)
self._change_status("clear", "")
# set post button insensitive, if review being modified is the same
# as what is currently in the UI fields checks if 'original' review
# attributes exist to avoid exceptions when this method has been
# called prior to review being retrieved
if self.action == 'modify' and hasattr(self, "orig_star_rating"):
if self._modify_review_is_the_same():
self.button_post.set_sensitive(False)
self._change_status("warning", _("Can't submit unmodified"))
else:
self._change_status("clear", "")
def _modify_review_is_the_same(self):
"""checks if review fields are the same as the review being modified
and returns True if so
"""
# perform an initial check on character counts to return False if any
# don't match, avoids doing unnecessary string comparisons
if (self.review_summary_entry.get_text_length() !=
len(self.orig_summary_text) or
self.review_buffer.get_char_count() != len(self.orig_review_text)):
return False
#compare rating
if self.star_rating.get_rating() != self.orig_star_rating:
return False
#compare summary text
if (self.review_summary_entry.get_text().decode('utf-8') !=
self.orig_summary_text):
return False
#compare review text
if (self.review_buffer.get_text(
self.review_buffer.get_start_iter(),
self.review_buffer.get_end_iter(),
include_hidden_chars=False).decode('utf-8') !=
self.orig_review_text):
return False
return True
def _check_summary_character_count(self):
summary_chars = self.review_summary_entry.get_text_length()
if summary_chars > self.SUMMARY_CHAR_LIMITS[1] - 1:
markup = self._get_fade_colour_markup(
self.NORMAL_COLOUR, self.ERROR_COLOUR,
self.SUMMARY_CHAR_LIMITS[2], self.SUMMARY_CHAR_LIMITS[0],
summary_chars)
self.summary_char_label.set_markup(markup)
else:
self.summary_char_label.set_text('')
def _check_review_character_count(self):
review_chars = self.review_buffer.get_char_count()
if review_chars > self.REVIEW_CHAR_LIMITS[1] - 1:
markup = self._get_fade_colour_markup(
self.NORMAL_COLOUR, self.ERROR_COLOUR,
self.REVIEW_CHAR_LIMITS[2], self.REVIEW_CHAR_LIMITS[0],
review_chars)
self.review_char_label.set_markup(markup)
else:
self.review_char_label.set_text('')
def _get_fade_colour_markup(self, full_col, empty_col, cmin, cmax, curr):
"""takes two colours as well as a minimum and maximum value then
fades one colour into the other based on the proportion of the
current value between the min and max
returns a pango color string
"""
markup = '<span fgcolor="#%s">%s</span>'
if curr > cmax:
return markup % (empty_col, str(cmax - curr))
elif curr <= cmin: # saves division by 0 later if cmin == cmax
return markup % (full_col, str(cmax - curr))
else:
#distance between min and max values to fade colours
scale = cmax - cmin
#percentage to fade colour by, based on current number of chars
percentage = (curr - cmin) / float(scale)
full_rgb = self._convert_html_to_rgb(full_col)
empty_rgb = self._convert_html_to_rgb(empty_col)
#calc changes to each of the r g b values to get the faded colour
red_change = full_rgb[0] - empty_rgb[0]
green_change = full_rgb[1] - empty_rgb[1]
blue_change = full_rgb[2] - empty_rgb[2]
new_red = int(full_rgb[0] - (percentage * red_change))
new_green = int(full_rgb[1] - (percentage * green_change))
new_blue = int(full_rgb[2] - (percentage * blue_change))
return_color = self._convert_rgb_to_html(new_red, new_green,
new_blue)
return markup % (return_color, str(cmax - curr))
def _convert_html_to_rgb(self, html):
r = html[0:2]
g = html[2:4]
b = html[4:6]
return (int(r, 16), int(g, 16), int(b, 16))
def _convert_rgb_to_html(self, r, g, b):
return "%s%s%s" % ("%02X" % r,
"%02X" % g,
"%02X" % b)
def on_button_post_clicked(self, button):
logging.debug("enter_review ok button")
review = Review(self.app)
text_buffer = self.textview_review.get_buffer()
review.text = text_buffer.get_text(text_buffer.get_start_iter(),
text_buffer.get_end_iter(),
False) # include_hidden_chars
review.summary = self.review_summary_entry.get_text()
review.date = datetime.datetime.now()
review.language = get_language()
review.rating = int(self.star_rating.get_rating())
review.package_version = self.version
review.origin = self.origin
if self.action == "submit":
self.api.submit_review(review)
elif self.action == "modify":
changes = {'review_text': review.text,
'summary': review.summary,
'rating': review.rating}
self.api.modify_review(self.review_id, changes)
def login_successful(self, display_name):
self.main_notebook.set_current_page(1)
self._setup_details(self.submit_window, self.app,
self.iconname, self.version, display_name)
self.textview_review.grab_focus()
def _setup_gwibber_gui(self):
self.gwibber_accounts = self.gwibber_helper.accounts()
list_length = len(self.gwibber_accounts)
if list_length == 0:
self._on_no_gwibber_accounts()
elif list_length == 1:
self._on_one_gwibber_account()
else:
self._on_multiple_gwibber_accounts()
def _get_gwibber_prefs(self):
if self.config.has_option("reviews", "gwibber_send"):
send = self.config.getboolean("reviews", "gwibber_send")
else:
send = False
if self.config.has_option("reviews", "account_id"):
account_id = self.config.get("reviews", "account_id")
else:
account_id = False
return {
"gwibber_send": send,
"account_id": account_id
}
def _on_no_gwibber_accounts(self):
self.gwibber_hbox.hide()
self.gwibber_checkbutton.set_active(False)
def _on_one_gwibber_account(self):
account = self.gwibber_accounts[0]
self.gwibber_hbox.show()
self.gwibber_combo.hide()
from softwarecenter.utils import utf8
acct_text = utf8(_("Also post this review to %s (@%s)")) % (
utf8(account['service'].capitalize()), utf8(account['username']))
self.gwibber_checkbutton.set_label(acct_text)
# simplifies on_transmit_successful later
self.gwibber_combo.append_text(acct_text)
self.gwibber_combo.set_active(0)
# auto select submit via gwibber checkbutton if saved prefs say True
self.gwibber_checkbutton.set_active(self.gwibber_prefs['gwibber_send'])
def _on_multiple_gwibber_accounts(self):
self.gwibber_hbox.show()
self.gwibber_combo.show()
# setup accounts combo
self.gwibber_checkbutton.set_label(_("Also post this review to: "))
for account in self.gwibber_accounts:
acct_text = "%s (@%s)" % (
account['service'].capitalize(), account['username'])
self.gwibber_combo.append_text(acct_text)
# add "all" to both combo and accounts (the later is only pseudo)
self.gwibber_combo.append_text(_("All my Gwibber services"))
self.gwibber_accounts.append({"id": "pseudo-sc-all"})
# reapply preferences
self.gwibber_checkbutton.set_active(self.gwibber_prefs['gwibber_send'])
gwibber_active_account = 0
for account in self.gwibber_accounts:
if account['id'] == self.gwibber_prefs['account_id']:
gwibber_active_account = self.gwibber_accounts.index(account)
self.gwibber_combo.set_active(gwibber_active_account)
def _post_to_one_gwibber_account(self, msg, account):
""" little helper to facilitate posting message to twitter account
passed in
"""
status_text = _("Posting to %s") % utf8(
account['service'].capitalize())
self._change_status("progress", status_text)
return self.gwibber_helper.send_message(msg, account['id'])
def on_transmit_success(self, api, trans):
"""on successful submission of a review, try to send to gwibber as
well
"""
self._run_gwibber_submits(api, trans)
def _on_key_press_event(self, widget, event):
if event.keyval == Gdk.KEY_Escape:
self._confirm_cancellation()
def _confirm_cancellation(self):
if (self._has_user_started_reviewing() and not
self._displaying_cancel_confirmation):
def do_cancel(widget):
self.submit_window.destroy()
self.quit()
def undo_cancel(widget):
self._displaying_cancel_confirmation = False
self.response_hbuttonbox.set_visible(True)
self.main_notebook.set_current_page(1)
self.response_hbuttonbox.set_visible(False)
self.confirm_cancel_yes.grab_focus()
self.main_notebook.set_current_page(2)
self._displaying_cancel_confirmation = True
if not self._confirm_cancel_yes_handler:
tag = self.confirm_cancel_yes.connect("clicked", do_cancel)
self._confirm_cancel_yes_handler = tag
if not self._confirm_cancel_no_handler:
tag = self.confirm_cancel_no.connect("clicked", undo_cancel)
self._confirm_cancel_no_handler = tag
else:
self.submit_window.destroy()
self.quit()
def _get_send_accounts(self, sel_index):
"""return the account referenced by the passed in index, or all
accounts if the index of the combo points to the pseudo-sc-all
string
"""
if self.gwibber_accounts[sel_index]["id"] == "pseudo-sc-all":
return self.gwibber_accounts
else:
return [self.gwibber_accounts[sel_index]]
def _submit_to_gwibber(self, msg, send_accounts):
"""for each send_account passed in, try to submit to gwibber
then return a list of accounts that failed to submit (empty list
if all succeeded)
"""
#list of gwibber accounts that failed to submit, used later to allow
# selective re-send if user desires
failed_accounts = []
for account in send_accounts:
if account["id"] != "pseudo-sc-all":
if not self._post_to_one_gwibber_account(msg, account):
failed_accounts.append(account)
return failed_accounts
def _run_gwibber_submits(self, api, trans):
"""check if gwibber send should occur and send via gwibber if so"""
gwibber_success = True
using_gwibber = self.gwibber_checkbutton.get_active()
if using_gwibber:
i = self.gwibber_combo.get_active()
msg = (self._gwibber_message())
send_accounts = self._get_send_accounts(i)
self._save_gwibber_state(True, self.gwibber_accounts[i]['id'])
#tries to send to gwibber, and gets back any failed accounts
failed_accounts = self._submit_to_gwibber(msg, send_accounts)
if len(failed_accounts) > 0:
gwibber_success = False
#FIXME: send an error string to this method instead of empty
# string
self._on_gwibber_fail(api, trans, failed_accounts, "")
else:
# prevent _save_gwibber_state from overwriting the account id
# in config if the checkbutton was not selected
self._save_gwibber_state(False, None)
# run parent handler on gwibber success, otherwise this will be dealt
# with in _on_gwibber_fail
if gwibber_success:
self._success_status()
BaseApp.on_transmit_success(self, api, trans)
def _gwibber_retry_some(self, api, trans, accounts):
""" perform selective retrying of gwibber posting, using only
accounts passed in
"""
gwibber_success = True
failed_accounts = []
msg = (self._gwibber_message())
for account in accounts:
if not self._post_to_one_gwibber_account(msg, account):
failed_accounts.append(account)
gwibber_success = False
if not gwibber_success:
#FIXME: send an error string to this method instead of empty string
self._on_gwibber_fail(api, trans, failed_accounts, "")
else:
self._success_status()
BaseApp.on_transmit_success(self, api, trans)
def _success_status(self):
"""Updates status area to show success for 2 seconds then allows
window to proceed
"""
self._change_status("success", _(self.SUCCESS_MESSAGE))
while Gtk.events_pending():
Gtk.main_iteration()
time.sleep(2)
def _on_gwibber_fail(self, api, trans, failed_accounts, error):
self._change_status("fail", _("Problems posting to Gwibber"))
#list to hold service strings in the format: "Service (@username)"
failed_services = []
for account in failed_accounts:
failed_services.append("%s (@%s)" % (
account['service'].capitalize(), account['username']))
glade_dialog = SimpleGtkbuilderDialog(self.datadir,
domain="software-center")
dialog = glade_dialog.dialog_gwibber_error
dialog.set_transient_for(self.submit_window)
# build the failure string
# TRANSLATORS: the part in %s can either be a single entry
# like "facebook" or a string like
# "factbook and twister"
error_str = gettext.ngettext(
"There was a problem posting this review to %s.",
"There was a problem posting this review to %s.",
len(failed_services))
error_str = make_string_from_list(error_str, failed_services)
dialog.set_markup(error_str)
dialog.format_secondary_text(error)
result = dialog.run()
dialog.destroy()
if result == Gtk.RESPONSE_ACCEPT:
self._gwibber_retry_some(api, trans, failed_accounts)
else:
BaseApp.on_transmit_success(self, api, trans)
def _save_gwibber_state(self, gwibber_send, account_id):
if not self.config.has_section("reviews"):
self.config.add_section("reviews")
self.config.set("reviews", "gwibber_send", str(gwibber_send))
if account_id:
self.config.set("reviews", "account_id", account_id)
self.config.write()
def _gwibber_message(self, max_len=140):
""" build a gwibber message of max_len"""
def _gwibber_message_string_from_data(appname, rating, summary, link):
""" helper so that we do not duplicate the "reviewed..." string """
return _("reviewed %(appname)s in Ubuntu: %(rating)s "
"%(summary)s %(link)s") % {
'appname': appname,
'rating': rating,
'summary': summary,
'link': link}
rating = self.star_rating.get_rating()
rating_string = ''
#fill star ratings for string
for i in range(1, 6):
if i <= rating:
rating_string = rating_string + u"\u2605"
else:
rating_string = rating_string + u"\u2606"
review_summary_text = self.review_summary_entry.get_text()
# FIXME: currently the link is not useful (at all) for most
# people not runnig ubuntu
#app_link = "http://apt.ubuntu.com/p/%s" % self.app.pkgname
app_link = ""
gwib_msg = _gwibber_message_string_from_data(
self.app.name, rating_string, review_summary_text, app_link)
#check char count and ellipsize review summary if larger than 140 chars
if len(gwib_msg) > max_len:
chars_to_reduce = len(gwib_msg) - (max_len - 1)
new_char_count = len(review_summary_text) - chars_to_reduce
review_summary_text = (review_summary_text[:new_char_count] +
u"\u2026")
gwib_msg = _gwibber_message_string_from_data(
self.app.name, rating_string, review_summary_text, app_link)
return gwib_msg
class ReportReviewApp(BaseApp):
""" report a given application or package """
APP_ICON_SIZE = 48
SUBMIT_MESSAGE = _(u"Sending report\u2026")
FAILURE_MESSAGE = _("Failed to submit report")
def __init__(self, review_id, parent_xid, datadir):
BaseApp.__init__(self, datadir, "report_abuse.ui")
# status
self._add_spellcheck_to_textview(self.textview_report)
## make button sensitive when textview has content
self.textview_report.get_buffer().connect(
"changed", self._enable_or_disable_report_button)
# data
self.review_id = review_id
# title
self.submit_window.set_title(_("Flag as Inappropriate"))
# parent xid
#if parent_xid:
# #win = Gtk.gdk.window_foreign_new(int(parent_xid))
# if win:
# self.submit_window.realize()
# self.submit_window.window.set_transient_for(win)
# mousepos
self.submit_window.set_position(Gtk.WindowPosition.MOUSE)
# simple APIs ftw!
self.combobox_report_summary = Gtk.ComboBoxText.new()
self.report_body_vbox.pack_start(self.combobox_report_summary, False,
False, 0)
self.report_body_vbox.reorder_child(self.combobox_report_summary, 2)
self.combobox_report_summary.show()
for term in [_(u"Please make a selection\u2026"),
# TRANSLATORS: The following is one entry in a combobox that is
# located directly beneath a label asking 'Why is this review
# inappropriate?'.
# This text refers to a possible reason for why the corresponding
# review is being flagged as inappropriate.
_("Offensive language"),
# TRANSLATORS: The following is one entry in a combobox that is
# located directly beneath a label asking 'Why is this review
# inappropriate?'.
# This text refers to a possible reason for why the corresponding
# review is being flagged as inappropriate.
_("Infringes copyright"),
# TRANSLATORS: The following is one entry in a combobox that is
# located directly beneath a label asking 'Why is this review
# inappropriate?'.
# This text refers to a possible reason for why the corresponding
# review is being flagged as inappropriate.
_("Contains inaccuracies"),
# TRANSLATORS: The following is one entry in a combobox that is
# located directly beneath a label asking 'Why is this review
# inappropriate?'.
# This text refers to a possible reason for why the corresponding
# review is being flagged as inappropriate.
_("Other")]:
self.combobox_report_summary.append_text(term)
self.combobox_report_summary.set_active(0)
self.combobox_report_summary.connect(
"changed", self._enable_or_disable_report_button)
def _enable_or_disable_report_button(self, widget):
if (self.textview_report.get_buffer().get_char_count() > 0 and
self.combobox_report_summary.get_active() != 0):
self.button_post.set_sensitive(True)
else:
self.button_post.set_sensitive(False)
def _setup_details(self, widget, display_name):
# report label
self.report_label.set_markup(_('Please give details:'))
# review summary label
self.report_summary_label.set_markup(
_('Why is this review inappropriate?'))
#error detail link label
self.label_expander.set_markup('<small><u>%s</u></small>'
% (_('Error Details')))
def on_button_post_clicked(self, button):
logging.debug("report_abuse ok button")
report_summary = self.combobox_report_summary.get_active_text()
text_buffer = self.textview_report.get_buffer()
report_text = text_buffer.get_text(text_buffer.get_start_iter(),
text_buffer.get_end_iter(),
include_hidden_chars=False)
self.api.report_abuse(self.review_id, report_summary, report_text)
def login_successful(self, display_name):
logging.debug("login_successful")
self.main_notebook.set_current_page(1)
#self.label_reporter.set_text(display_name)
self._setup_details(self.submit_window, display_name)
class SubmitUsefulnessApp(BaseApp):
SUBMIT_MESSAGE = _(u"Sending usefulness\u2026")
def __init__(self, review_id, parent_xid, is_useful, datadir):
BaseApp.__init__(self, datadir, "submit_usefulness.ui")
# data
self.review_id = review_id
self.is_useful = bool(is_useful)
# no UI except for error conditions
self.parent_xid = parent_xid
# override behavior of baseapp here as we don't actually
# have a UI by default
def _get_parent_xid_for_login_window(self):
return self.parent_xid
def login_successful(self, display_name):
logging.debug("submit usefulness")
self.main_notebook.set_current_page(1)
self.api.submit_usefulness(self.review_id, self.is_useful)
def on_transmit_failure(self, api, trans, error):
logging.warn("exiting - error: %s" % error)
self.api.shutdown()
self.quit(2)
# override parents run to only trigger login (and subsequent
# events) but no UI, if this is commented out, there is some
# stub ui that can be useful for testing
def run(self):
self.login()
# override UI update methods from BaseApp to prevent them
# causing errors if called when UI is hidden
def _clear_status_imagery(self):
pass
def _change_status(self, type, message):
pass
class DeleteReviewApp(BaseApp):
SUBMIT_MESSAGE = _(u"Deleting review\u2026")
FAILURE_MESSAGE = _("Failed to delete review")
def __init__(self, review_id, parent_xid, datadir):
# uses same UI as submit usefulness because
# (a) it isn't shown and (b) it's similar in usage
BaseApp.__init__(self, datadir, "submit_usefulness.ui")
# data
self.review_id = review_id
# no UI except for error conditions
self.parent_xid = parent_xid
# override behavior of baseapp here as we don't actually
# have a UI by default
def _get_parent_xid_for_login_window(self):
return self.parent_xid
def login_successful(self, display_name):
logging.debug("delete review")
self.main_notebook.set_current_page(1)
self.api.delete_review(self.review_id)
def on_transmit_failure(self, api, trans, error):
logging.warn("exiting - error: %s" % error)
self.api.shutdown()
self.quit(2)
# override parents run to only trigger login (and subsequent
# events) but no UI, if this is commented out, there is some
# stub ui that can be useful for testing
def run(self):
self.login()
# override UI update methods from BaseApp to prevent them
# causing errors if called when UI is hidden
def _clear_status_imagery(self):
pass
def _change_status(self, type, message):
pass
|
gusDuarte/software-center-5.2
|
softwarecenter/ui/gtk3/review_gui_helper.py
|
Python
|
lgpl-3.0
| 55,851
|
__all__ = ['DirectEntryScroll']
from pandac.PandaModules import *
import DirectGuiGlobals as DGG
from DirectScrolledFrame import *
from DirectFrame import *
from DirectEntry import *
class DirectEntryScroll(DirectFrame):
def __init__(self, entry, parent = None, **kw):
optiondefs = (
('pgFunc', PGVirtualFrame, None),
('relief', None, None),
('clipSize', (-1, 1, -1, 1), self.setClipSize),
)
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, parent, **kw)
self.canvas = None
self.visXMin = 0.0
self.visXMax = 0.0
self.clipXMin = 0.0
self.clipXMax = 0.0
self.initialiseoptions(DirectEntryScroll)
# don't set a scale on the entry
# instead make it the correct size, use something like:
# text_scale = 0.035,
# frameSize = (-0.006, 3.2, -0.015, 0.036),
# if you need to scale the entry scale it's parent instead
self.entry = entry
self.canvas = NodePath(self.guiItem.getCanvasNode())
self.entry.reparentTo(self.canvas)
self.canvas.setPos(0,0,0)
self.entry.bind(DGG.CURSORMOVE,self.cursorMove)
self.canvas.node().setBounds(OmniBoundingVolume())
self.canvas.node().setFinal(1)
self.resetCanvas()
def cursorMove(self, cursorX, cursorY):
cursorX = self.entry.guiItem.getCursorX() * self.entry['text_scale'][0]
canvasX = self.canvas.getX()
visXMin = self.clipXMin - canvasX
visXMax = self.clipXMax - canvasX
visXCenter = (visXMin + visXMax) * 0.5
distanceToCenter = visXCenter - cursorX
clipExtent = self.clipXMax - self.clipXMin
entryExtent = self.entry['text_scale'][0] * self.entry['width']
entryWiggle = entryExtent - clipExtent
if abs(distanceToCenter) > (clipExtent * 0.5):
self.moveToCenterCursor()
def moveToCenterCursor(self):
cursorX = self.entry.guiItem.getCursorX() * self.entry['text_scale'][0]
canvasX = self.canvas.getX()
visXMin = self.clipXMin - canvasX
visXMax = self.clipXMax - canvasX
visXCenter = (visXMin + visXMax) * 0.5
distanceToCenter = visXCenter - cursorX
newX = canvasX + distanceToCenter
clipExtent = self.clipXMax - self.clipXMin
entryExtent = self.entry['text_scale'][0] * self.entry['width']
entryWiggle = entryExtent - clipExtent
if self.entry.guiItem.getCursorPosition() <= 0: #deals with the cursor jump bug
newX = 0.0
elif newX > 0.0:
newX = 0.0
elif newX < (-entryWiggle):
newX = -entryWiggle
#print("CursorX %s CanvasX %s VisCenter %s Distance %s NewX %s Wiggle %s" % (cursorX, canvasX, visXCenter, distanceToCenter, newX, entryWiggle))
self.canvas.setX(newX)
def destroy(self):
# Destroy children of the canvas
for child in self.canvas.getChildren():
childGui = self.guiDict.get(child.getName())
if childGui:
childGui.destroy()
else:
parts = child.getName().split('-')
simpleChildGui = self.guiDict.get(parts[-1])
if simpleChildGui:
simpleChildGui.destroy()
self.entry.destroy()
self.entry = None
DirectFrame.destroy(self)
def getCanvas(self):
return self.canvas
def setClipSize(self):
self.guiItem.setClipFrame(self['clipSize'])
self.clipXMin = self['clipSize'][0]
self.clipXMax = self['clipSize'][1]
self.visXMin = self.clipXMin
self.visXMax = self.clipXMax
if self.canvas:
self.resetCanvas()
def resetCanvas(self):
self.canvas.setPos(0,0,0)
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/pgui/DirectEntryScroll.py
|
Python
|
apache-2.0
| 4,075
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
import sqlalchemy as sa
from neutron_lib.db import constants as db_const
from neutron.db import migration
"""fip qos
Revision ID: 594422d373ee
Revises: 7d32f979895f
Create Date: 2016-04-26 17:16:10.323756
"""
# revision identifiers, used by Alembic.
revision = '594422d373ee'
down_revision = '7d32f979895f'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.QUEENS]
def upgrade():
op.create_table(
'qos_fip_policy_bindings',
sa.Column('policy_id',
sa.String(length=db_const.UUID_FIELD_SIZE),
sa.ForeignKey('qos_policies.id', ondelete='CASCADE'),
nullable=False),
sa.Column('fip_id',
sa.String(length=db_const.UUID_FIELD_SIZE),
sa.ForeignKey('floatingips.id', ondelete='CASCADE'),
nullable=False, unique=True))
|
noironetworks/neutron
|
neutron/db/migration/alembic_migrations/versions/queens/expand/594422d373ee_fip_qos.py
|
Python
|
apache-2.0
| 1,485
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import glob
import os
import pexpect
import re
import tempfile
from trove.guestagent.strategies.restore import base
from trove.openstack.common import log as logging
from trove.common import exception
from trove.common import utils
import trove.guestagent.datastore.mysql.service as dbaas
from trove.common.i18n import _ # noqa
LOG = logging.getLogger(__name__)
class MySQLRestoreMixin(object):
"""Common utils for restoring MySQL databases."""
RESET_ROOT_RETRY_TIMEOUT = 100
RESET_ROOT_SLEEP_INTERVAL = 10
# Reset the root password in a single transaction with 'FLUSH PRIVILEGES'
# to ensure we never leave database wide open without 'grant tables'.
RESET_ROOT_MYSQL_COMMANDS = ("START TRANSACTION;",
"UPDATE `mysql`.`user` SET"
" `password`=PASSWORD('')"
" WHERE `user`='root';",
"FLUSH PRIVILEGES;",
"COMMIT;")
# This is a suffix MySQL appends to the file name given in
# the '--log-error' startup parameter.
_ERROR_LOG_SUFFIX = '.err'
_ERROR_MESSAGE_PATTERN = re.compile("^ERROR:\s+.+$")
def mysql_is_running(self):
try:
utils.execute_with_timeout("/usr/bin/mysqladmin", "ping")
LOG.debug("MySQL is up and running.")
return True
except exception.ProcessExecutionError:
LOG.debug("MySQL is not running.")
return False
def mysql_is_not_running(self):
try:
utils.execute_with_timeout("/usr/bin/pgrep", "mysqld")
LOG.info("MySQL is still running.")
return False
except exception.ProcessExecutionError:
LOG.debug("MySQL is not running.")
return True
def poll_until_then_raise(self, event, exc):
try:
utils.poll_until(event,
sleep_time=self.RESET_ROOT_SLEEP_INTERVAL,
time_out=self.RESET_ROOT_RETRY_TIMEOUT)
except exception.PollTimeOut:
raise exc
def _start_mysqld_safe_with_init_file(self, init_file, err_log_file):
child = pexpect.spawn("sudo mysqld_safe"
" --skip-grant-tables"
" --skip-networking"
" --init-file='%s'"
" --log-error='%s'" %
(init_file.name, err_log_file.name)
)
try:
i = child.expect(['Starting mysqld daemon'])
if i == 0:
LOG.info(_("Starting MySQL"))
except pexpect.TIMEOUT:
LOG.exception(_("Got a timeout launching mysqld_safe"))
finally:
# There is a race condition here where we kill mysqld before
# the init file been executed. We need to ensure mysqld is up.
#
# mysqld_safe will start even if init-file statement(s) fail.
# We therefore also check for errors in the log file.
self.poll_until_then_raise(
self.mysql_is_running,
base.RestoreError("Reset root password failed:"
" mysqld did not start!"))
first_err_message = self._find_first_error_message(err_log_file)
if first_err_message:
raise base.RestoreError("Reset root password failed: %s"
% first_err_message)
LOG.info(_("Root password reset successfully."))
LOG.debug("Cleaning up the temp mysqld process.")
utils.execute_with_timeout("mysqladmin", "-uroot", "shutdown")
LOG.debug("Polling for shutdown to complete.")
try:
utils.poll_until(self.mysql_is_not_running,
sleep_time=self.RESET_ROOT_SLEEP_INTERVAL,
time_out=self.RESET_ROOT_RETRY_TIMEOUT)
LOG.debug("Database successfully shutdown")
except exception.PollTimeOut:
LOG.debug("Timeout shutting down database "
"- performing killall on mysqld_safe.")
utils.execute_with_timeout("killall", "mysqld_safe",
root_helper="sudo",
run_as_root=True)
self.poll_until_then_raise(
self.mysql_is_not_running,
base.RestoreError("Reset root password failed: "
"mysqld did not stop!"))
def reset_root_password(self):
with tempfile.NamedTemporaryFile() as init_file:
utils.execute_with_timeout("sudo", "chmod", "a+r", init_file.name)
self._writelines_one_per_line(init_file,
self.RESET_ROOT_MYSQL_COMMANDS)
# Do not attempt to delete the file as the 'trove' user.
# The process writing into it may have assumed its ownership.
# Only owners can delete temporary
# files (restricted deletion).
err_log_file = tempfile.NamedTemporaryFile(
suffix=self._ERROR_LOG_SUFFIX,
delete=False)
try:
self._start_mysqld_safe_with_init_file(init_file, err_log_file)
finally:
err_log_file.close()
MySQLRestoreMixin._delete_file(err_log_file.name)
def _writelines_one_per_line(self, fp, lines):
fp.write(os.linesep.join(lines))
fp.flush()
def _find_first_error_message(self, fp):
if MySQLRestoreMixin._is_non_zero_file(fp):
return MySQLRestoreMixin._find_first_pattern_match(
fp,
self._ERROR_MESSAGE_PATTERN
)
return None
@classmethod
def _delete_file(self, file_path):
"""Force-remove a given file as root.
Do not raise an exception on failure.
"""
if os.path.isfile(file_path):
try:
utils.execute_with_timeout("rm", "-f", file_path,
run_as_root=True,
root_helper="sudo")
except Exception:
LOG.exception("Could not remove file: '%s'" % file_path)
@classmethod
def _is_non_zero_file(self, fp):
file_path = fp.name
return os.path.isfile(file_path) and (os.path.getsize(file_path) > 0)
@classmethod
def _find_first_pattern_match(self, fp, pattern):
for line in fp:
if pattern.match(line):
return line
return None
class MySQLDump(base.RestoreRunner, MySQLRestoreMixin):
"""Implementation of Restore Strategy for MySQLDump."""
__strategy_name__ = 'mysqldump'
base_restore_cmd = 'sudo mysql'
class InnoBackupEx(base.RestoreRunner, MySQLRestoreMixin):
"""Implementation of Restore Strategy for InnoBackupEx."""
__strategy_name__ = 'innobackupex'
base_restore_cmd = 'sudo xbstream -x -C %(restore_location)s'
base_prepare_cmd = ('sudo innobackupex --apply-log %(restore_location)s'
' --defaults-file=%(restore_location)s/backup-my.cnf'
' --ibbackup xtrabackup 2>/tmp/innoprepare.log')
def __init__(self, *args, **kwargs):
super(InnoBackupEx, self).__init__(*args, **kwargs)
self.prepare_cmd = self.base_prepare_cmd % kwargs
self.prep_retcode = None
def pre_restore(self):
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
app.stop_db()
LOG.info(_("Cleaning out restore location: %s."),
self.restore_location)
utils.execute_with_timeout("chmod", "-R", "0777",
self.restore_location,
root_helper="sudo",
run_as_root=True)
utils.clean_out(self.restore_location)
def _run_prepare(self):
LOG.debug("Running innobackupex prepare: %s.", self.prepare_cmd)
self.prep_retcode = utils.execute(self.prepare_cmd, shell=True)
LOG.info(_("Innobackupex prepare finished successfully."))
def post_restore(self):
self._run_prepare()
utils.execute_with_timeout("chown", "-R", "-f", "mysql",
self.restore_location,
root_helper="sudo",
run_as_root=True)
self._delete_old_binlogs()
self.reset_root_password()
app = dbaas.MySqlApp(dbaas.MySqlAppStatus.get())
app.start_mysql()
def _delete_old_binlogs(self):
files = glob.glob(os.path.join(self.restore_location, "ib_logfile*"))
for f in files:
os.unlink(f)
class InnoBackupExIncremental(InnoBackupEx):
__strategy_name__ = 'innobackupexincremental'
incremental_prep = ('sudo innobackupex'
' --apply-log'
' --redo-only'
' %(restore_location)s'
' --defaults-file=%(restore_location)s/backup-my.cnf'
' --ibbackup xtrabackup'
' %(incremental_args)s'
' 2>/tmp/innoprepare.log')
def __init__(self, *args, **kwargs):
super(InnoBackupExIncremental, self).__init__(*args, **kwargs)
self.restore_location = kwargs.get('restore_location')
self.content_length = 0
def _incremental_restore_cmd(self, incremental_dir):
"""Return a command for a restore with a incremental location."""
args = {'restore_location': incremental_dir}
return (self.decrypt_cmd +
self.unzip_cmd +
(self.base_restore_cmd % args))
def _incremental_prepare_cmd(self, incremental_dir):
if incremental_dir is not None:
incremental_arg = '--incremental-dir=%s' % incremental_dir
else:
incremental_arg = ''
args = {
'restore_location': self.restore_location,
'incremental_args': incremental_arg,
}
return self.incremental_prep % args
def _incremental_prepare(self, incremental_dir):
prepare_cmd = self._incremental_prepare_cmd(incremental_dir)
LOG.debug("Running innobackupex prepare: %s.", prepare_cmd)
utils.execute(prepare_cmd, shell=True)
LOG.info(_("Innobackupex prepare finished successfully."))
def _incremental_restore(self, location, checksum):
"""Recursively apply backups from all parents.
If we are the parent then we restore to the restore_location and
we apply the logs to the restore_location only.
Otherwise if we are an incremental we restore to a subfolder to
prevent stomping on the full restore data. Then we run apply log
with the '--incremental-dir' flag
"""
metadata = self.storage.load_metadata(location, checksum)
incremental_dir = None
if 'parent_location' in metadata:
LOG.info(_("Restoring parent: %(parent_location)s"
" checksum: %(parent_checksum)s.") % metadata)
parent_location = metadata['parent_location']
parent_checksum = metadata['parent_checksum']
# Restore parents recursively so backup are applied sequentially
self._incremental_restore(parent_location, parent_checksum)
# for *this* backup set the incremental_dir
# just use the checksum for the incremental path as it is
# sufficiently unique /var/lib/mysql/<checksum>
incremental_dir = os.path.join(self.restore_location, checksum)
utils.execute("mkdir", "-p", incremental_dir,
root_helper="sudo",
run_as_root=True)
command = self._incremental_restore_cmd(incremental_dir)
else:
# The parent (full backup) use the same command from InnobackupEx
# super class and do not set an incremental_dir.
command = self.restore_cmd
self.content_length += self._unpack(location, checksum, command)
self._incremental_prepare(incremental_dir)
# Delete unpacked incremental backup metadata
if incremental_dir:
utils.execute("rm", "-fr", incremental_dir, root_helper="sudo",
run_as_root=True)
def _run_restore(self):
"""Run incremental restore.
First grab all parents and prepare them with '--redo-only'. After
all backups are restored the super class InnoBackupEx post_restore
method is called to do the final prepare with '--apply-log'
"""
self._incremental_restore(self.location, self.checksum)
return self.content_length
|
CMSS-BCRDB/RDS
|
trove/guestagent/strategies/restore/mysql_impl.py
|
Python
|
apache-2.0
| 13,702
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from mpp.lib.PSQL import PSQL
from mpp.lib.gpdbverify import GpdbVerify
from mpp.lib.config import GPDBConfig
from mpp.models import MPPTestCase
class DbStateClass(MPPTestCase):
def __init__(self,methodName,config=None):
if config is not None:
self.config = config
else:
self.config = GPDBConfig()
self.gpverify = GpdbVerify(config=self.config)
super(DbStateClass,self).__init__(methodName)
def check_system(self):
'''
@summary: Check whether the system is up and sync. Exit out if not
'''
cmd ="select count(*) from gp_segment_configuration where content<> -1 ;"
count_all = PSQL.run_sql_command(cmd, flags ='-q -t', dbname='postgres')
cmd ="select count(*) from gp_segment_configuration where content<> -1 and mode = 's' and status = 'u';"
count_up_and_sync = PSQL.run_sql_command(cmd, flags ='-q -t', dbname='postgres')
if count_all.strip() != count_up_and_sync.strip() :
raise Exception('The cluster is not in up/sync ............')
else:
tinctest.logger.info("\n Starting New Test: System is up and in sync .........")
def check_catalog(self,dbname=None, alldb=True, online=False, testname=None, outputFile=None, host=None, port=None):
'''1. Run gpcheckcat'''
(errorCode, hasError, gpcheckcat_output, repairScriptDir) = self.gpverify.gpcheckcat(dbname=dbname, alldb=alldb, online=online, testname=testname, outputFile=outputFile, host=host, port=port)
if errorCode != 0:
raise Exception('GpCheckcat failed with errcode %s '% (errorCode))
def check_mirrorintegrity(self, master=False):
'''Runs checkmirrorintegrity(default), check_mastermirrorintegrity(when master=True) '''
(checkmirror, fix_outfile) = self.gpverify.gpcheckmirrorseg(master=master)
if not checkmirror:
self.fail('Checkmirrorseg failed. Fix file location : %s' %fix_outfile)
tinctest.logger.info('Successfully completed integrity check')
def run_validation(self):
'''
1. gpcheckcat
2. checkmirrorintegrity
3. check_mastermirrorintegrity
'''
self.check_catalog()
self.check_mirrorintegrity()
if self.config.has_master_mirror():
self.check_mirrorintegrity(master=True)
|
rvs/gpdb
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/lib/dbstate.py
|
Python
|
apache-2.0
| 3,081
|
#!/usr/bin/env python
#
# Copyright (C) 2011 Nigel Bree
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#-----------------------------------------------------------------------------
# This application mainly provides a simple way for installations of the Steam
# Limiter application from http://steam-limiter.googlecode.com to determine
# whether an updated version of the application has been made available. That's
# just a simple matter of retrieving some data from a URL to retrieve the
# current version number and a download URL for the latest installer.
#
# Given the way that web access to the source repository in Google Code works
# in principle that could be used instead, but there are a couple of advantages
# to having a service like this instead, in addition to the fact that as with
# the NSIS installer for the limiter client it's a handy example of how to do
# such things.
#
# For instance, an additional thing I could add to this is to have an installer
# extension for the limiter client app which can retrieve the client's real IP
# and thus suggest to it the ideal server (or server list) to set as the filter
# default, instead of assuming TelstraClear - TC is the default ISP at present
# since that's my ISP, but the filter app is usable by other New Zealand ISPs
# and it would be nice to be able to make that seamless.
#
import jinja2
import os
import webapp2
import logging
from google.appengine.ext.webapp import template
from google.appengine.ext import db
from google.appengine.api import users, xmpp, mail
# These will most likely eventually become some datastore items in future, but
# making them static will do just to start the update support off.
self_base = 'http://steam-limiter.appspot.com'
if 'Development' in os.environ ['SERVER_SOFTWARE']:
self_base = 'http://localhost:8080'
old_defaults = {
'latest': '0.7.1.0',
'download': 'http://steam-limiter.googlecode.com/files/steamlimit-0.7.1.0.exe'
}
new_defaults = {
'latest': '0.7.1.0',
'download': self_base + '/files/steamlimit-0.7.1.0.exe',
'proxyfilter': 'content*.steampowered.com=%(proxy)s;*.cs.steampowered.com=%(proxy)s',
'proxyallow': '//%(proxy)s=*;//content*.steampowered.com=%(proxy)s;//cs.steampowered.com=%(proxy)s'
}
import app_common
import old_isps
# Data on unmetering for ISPs, for the expanded rule types available in the
# v2 of the configuration API, for steam-limiter 0.7.1.0 and later where I
# can now handle rewriting "CS" type servers to normal ones, and where I can
# thus afford to use a simpler rule format for ISPs running proxy servers to
# do unmetering (which is most of them).
#
# For this version, I'm also completely dropping all the old Port 27030 rules
new_isps = {
- 1: { 'name': 'Unknown', 'server': '0.0.0.0',
'filter': '# No specific content server for your ISP' },
# Note that most NZ Universities appear to have peering with and/or student
# internet provided via Snap! - most I've lumped in as part of Snap! but
# Waikato is a special case having an old netblock with a full class B and
# it is being set as its own case, just using the same rules as Snap! for
# now. I'll call it Lightstream (which is a semi-commercial spinoff used
# for student internet) since that's probably most useful.
# Note that aside from most NZ ISPs not generally understanding the concept
# of giving things like servers DNS names, pretty much all of these are
# filtered so I can't detect whether they support port 80 or not, and none
# of the ISPs document this properly.
# TelstraClear have made wlgwpstmcon01.telstraclear.co.nz go away, but the
# new name is steam.cdn.vodafone.co.nz - Valve have actually just
# started to advertise this server as steam.cdn.vodafone.co.nz, so I'm also
# allowing it that way but it appears to not be blocking requests via the
# content*.steampowered.com name and so it's all good! It appears that
# despite no official announcement, they've actually done something right.
0: { 'name': 'TelstraClear New Zealand',
'proxy': 'steam.cdn.vodafone.co.nz' },
10: { 'name': 'Telstra BigPond Australia',
'proxy': 'steam.content.bigpondgames.com' },
11: { 'name': 'Internode Australia',
'proxy': 'steam.cdn.on.net' },
12: { 'name': 'iiNet Australia',
'proxy': 'steam.cdn.on.net' },
# iPrimus evidently support a small list of Steam servers hosted in some
# regional peering exchanges, and that's it (no proxy servers).
14: { 'name': 'iPrimus Australia',
'filter': 'content*.steampowered.com=steam.ix.asn.au,steam01.qld.ix.asn.au,steam01.vic.ix.asn.au;' +
'*.cs.steampowered.com=valve217.cs.steampowered.com',
'allow': '//*.ix.asn.au=*;//*.steampowered.com=*' },
# Similarly to iPrimus, these small regional ISPs don't document what they
# do and some of this data may be out of data due to acquisitions, since the
# iiNet group has acquired a lot of regional ISPs.
15: { 'name': 'Westnet Internet Services (Perth, WA)',
'filter': 'content*.steampowered.com=valve217.cs.steampowered.com,files-oc-syd.games.on.net',
'allow': '//steam.cdn.on.net=*' },
16: { 'name': 'Adam Internet (Adelaide, SA)',
'filter': '*:27030=valve217.cs.steampowered.com,files-oc-syd.games.on.net;' +
'content*.steampowered.com=valve217.cs.steampowered.com,files-oc-syd.games.on.net',
'allow': '//steam.cdn.on.net=*' },
17: { 'name': 'EAccess Broadband, Australia',
'filter': '# No known unmetered Steam server' },
# Slots 18-29 are reserved for future Australian ISPs or tertiary institutions.
# Because it seems customers with dual ISP accounts is common in South
# Africa (along with a large fraction of the retail ISPs being pure
# resellers), detection in ZA needs extra work from the client side to
# be sure of what connectivity is present, so there are rule extensions
# to detect dual-ISP situations and prefer the WebAfrica unmetered server
# if there's connectivity to the WebAfrica customer side.
30: { 'name': 'Internet Solutions (Johannesburg, South Africa)', 'server': '196.38.180.3',
'filter': '*:27030=steam.isgaming.co.za',
'allow': '//steam.isgaming.co.za=*',
'test': {
'report': True,
'steam.wa.co.za icmp *.wa.co.za': {
0: {
'ispname': 'WebAfrica/IS dual ISP',
'filterrule': '*:27030=steam.wa.co.za,steam2.wa.co.za;content*.steampowered.com=steam.wa.co.za,steam2.wa.co.za',
'allow': '//*.wa.co.za=*;//content*.steampowered.com=*'
}
}
}
},
31: { 'name': 'webafrica (Cape Town, South Africa)', 'server': '41.185.24.21',
'filter': '*:27030=steam.wa.co.za,steam2.wa.co.za;content*.steampowered.com=steam.wa.co.za,steam2.wa.co.za',
'allow': '//*.wa.co.za=*;//content*.steampowered.com=*'
},
32: { 'name': 'Telkom SAIX, South Africa', 'server': '0.0.0.0',
'filter': '# No known unmetered Steam server',
'test': {
'report': True,
'steam.wa.co.za icmp *.wa.co.za': {
0: {
'ispname': 'WebAfrica/SAIX dual ISP',
'filterrule': '*:27030=steam.wa.co.za,steam2.wa.co.za;content*.steampowered.com=steam.wa.co.za,steam2.wa.co.za',
'allow': '//*.wa.co.za=*;//content*.steampowered.com=*'
}
}
}
},
33: { 'name': 'MWeb, South Africa', 'server': '196.28.69.201',
'filter': '*:27030=196.28.69.201,196.28.169.201',
'test': {
'report': True,
'steam.wa.co.za icmp *.wa.co.za': {
0: {
'ispname': 'WebAfrica/MWeb dual ISP',
'filterrule': '*:27030=steam.wa.co.za,steam2.wa.co.za;content*.steampowered.com=steam.wa.co.za,steam2.wa.co.za',
'allow': '//*.wa.co.za=*;//content*.steampowered.com=*'
}
}
}
},
34: { 'name': 'Cybersmart, South Africa', 'server': '0.0.0.0',
'filter': '# No known Steam server for Cybersmart',
'test': {
'report': True,
'steam.wa.co.za icmp *.wa.co.za': {
0: {
'ispname': 'WebAfrica/Cybersmart dual ISP',
'filterrule': '*:27030=steam.wa.co.za,steam2.wa.co.za;content*.steampowered.com=steam.wa.co.za,steam2.wa.co.za',
'allow': '//*.wa.co.za=*;//content*.steampowered.com=*'
}
}
}
},
# Slots 35-39 are reserved for future South African ISPs
# Slots 40-49 are reserved for future use (used to be for Iceland but that
# country is deprecated as it has no unmetered servers).
# Regularly installs turn up from Google netblocks; possibly this is part
# of sandboxed malware scanning of Google Code downloads, but equally for
# all I know it could be humans, possibly in the Sydney office where they
# develop Google Maps.
50: { 'name': 'Google, Inc', 'server': '0.0.0.0',
'filter': '# What Steam server do Google use...?' },
# I really have no idea what's going on with installs from Comcast netblocks
# so I'd hope one day someone using one bothers to explain it to me. I've
# also seen a few installs from AT&T as well, equally baffling.
60: { 'name': 'Comcast Communications', 'server': '0.0.0.0',
'filter': '# No rules for Comcast, please suggest some!' },
61: { 'name': 'AT&T Internet Services', 'server': '0.0.0.0',
'filter': '# No rules for AT&T, please suggest some!' }
}
# Simple utility cliches.
def bundle (handler, isps = new_isps, defaults = new_defaults,
source = None):
return app_common.bundle (handler, isps, defaults, source)
def send (handler, data = None, key = None):
isps = new_isps
defaults = new_defaults
# Decide what rules to serve. If I want to get fancy, I can
# parse out the steam-limiter version from the User-Agent
# header. For now, I just use it to sniff the difference
# between normal browser visits and update tests, and have
# the newer versions of stream-limiter explicitly request
# a "v" rather than having me try and parse/sniff the string
# version they pass in the user agent string.
ver = handler.request.get ('v', default_value = None)
if ver is None or ver == '0':
agent = handler.request.headers ['User-Agent']
if ver == '0' or agent.startswith ('steam-limiter/'):
isps = old_isps.isps
defaults = old_defaults
# Allow manually forcing an IP to override the source host,
# for testing purposes.
alt_addr = handler.request.get ('ip', default_value = None)
if not data:
data = bundle (handler, isps, defaults, alt_addr)
if key:
data = data.get (key)
app_common.send (handler, data)
def expand (handler, name, context):
path = os.path.join (os.path.dirname (__file__), name)
handler.response.out.write (template.render (path, context))
# The landing page for human readers to see
class MainHandler (webapp2.RequestHandler):
def get (self):
context = {
'user': users.get_current_user ()
}
expand (self, 'index.html', context)
# The query page for the latest revision, which can information about the latest
# version number in various forms
class LatestHandler (webapp2.RequestHandler):
def get (self):
send (self, key = 'latest')
# A query page for the path to the latest download; I used to have an option
# to redirect to the download, but that's now available via /get
#
# This is one of the places versions matter, since older versions won't
# auto-upgrade if we can't point at Google Code. It's likely that I'll have
# two download systems, one for older pre-0.7.1.0 versions and one for the
# newer ones that allows download from this service.
class DownloadHandler (webapp2.RequestHandler):
def get (self):
send (self, key = 'download')
# A query page for exercising the IP->ISP mapping; the bit below for loopback
# is for local testing since that doesn't yield a valid IP for the matching
# algorithm to use.
class IspHandler (webapp2.RequestHandler):
def get (self):
send (self, key = 'ispname')
# Return the newer style of filter list.
class FilterRuleHandler (webapp2.RequestHandler):
def get (self):
send (self, key = 'filterrule')
# Return a customized server list, or the default global one
class AllowHostHandler (webapp2.RequestHandler):
def get (self):
send (self, key = 'allow')
# Return a bundle of all the configuration pieces as a JSON-style
# map.
class BundleHandler (webapp2.RequestHandler):
def get (self):
send (self)
# Feedback model for the feedback submission form to persist
class Feedback (db.Model):
content = db.TextProperty ()
source = db.StringProperty ()
timestamp = db.DateTimeProperty (auto_now = True)
# Handle a feedback form, to allow people to spam me with whatever they like...
# given that currently I'm suffering from a lack of feedback, this is meant
# to help overcome that. We shall see if it works.
class FeedbackHandler (webapp2.RequestHandler):
def get (self):
expand (self, 'feedback.html', { })
def post (self):
text = self.request.get ('content')
if text != '':
item = Feedback (content = text, source = self.request.remote_addr)
item.put ()
notifyOwner (text, 'feedback')
expand (self, 'thanks.html', { })
# Similar to the general text feedback, we can have users upload their custom
# rules as suggestions for future versions or revisions of the rule base now
# that the rulebase exists completely in the webservice.
class UploadedRule (db.Model):
ispName = db.StringProperty ()
filterRule = db.StringProperty (multiline = True)
notes = db.StringProperty (multiline = True)
source = db.StringProperty ()
country = db.StringProperty ()
timestamp = db.DateTimeProperty (auto_now = True)
# Handle a new-rule suggestion form, intended to support a future automatic
# upload of a user's custom rules.
class UploadRuleHandler (webapp2.RequestHandler):
def get (self):
expand (self, 'uploadrule.html', { })
def post (self):
isp = self.request.get ('ispname')
rule = self.request.get ('filterrule')
note = self.request.get ('content')
country = self.request.headers.get ('X-AppEngine-Country')
country = country or 'Unknown'
if rule != '':
item = UploadedRule (ispName = isp, filterRule = rule, notes = note,
source = self.request.remote_addr,
country = country)
item.put ()
notifyOwner (isp + ' ==> ' + rule + '\n' + note, 'rule')
expand (self, 'thanks.html', { })
# Handle a posted report from a special local test - this is primarily used
# in beta builds to see how some of the client-end rule extensions are being
# processed.
class TestReportHandler (webapp2.RequestHandler):
def get (self):
expand (self, 'uploadrule.html', { })
def post (self):
test = self.request.get ('test')
result = self.request.get ('result')
country = self.request.headers.get ('X-AppEngine-Country')
country = country or 'Unknown'
notifyOwner (test + ' ==> ' + result + '\n', 'test')
expand (self, 'thanks.html', { })
# Custom 404 that suggests filing an issue rather than the default blank.
class NotFoundHandler (webapp2.RequestHandler):
def get (self):
self.error (404)
expand (self, 'default_error.html', { })
# Plumb up the GAE boilerplate with a mapping of URLs to handlers.
app = webapp2.WSGIApplication ([('/', MainHandler),
('/latest', LatestHandler),
('/download', DownloadHandler),
('/ispname', IspHandler),
('/filterrule', FilterRuleHandler),
('/allow', AllowHostHandler),
('/all', BundleHandler),
('/feedback', FeedbackHandler),
('/uploadrule', UploadRuleHandler),
('/testreport', TestReportHandler),
('/.*', NotFoundHandler)],
debug = True)
def main ():
application.run ()
if __name__ == '__main__':
main ()
|
uvbs/steam-limiter
|
updateapp/main.py
|
Python
|
bsd-2-clause
| 18,251
|
""" CryptoPy - a pure python cryptographic libraries
crypto.passwords package
Copyright (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
"""
|
realms-team/solmanager
|
libs/smartmeshsdk-REL-1.3.0.1/external_libs/cryptopy/crypto/passwords/__init__.py
|
Python
|
bsd-3-clause
| 179
|
from itty import *
@error(500)
def my_great_500(request, exception):
html_output = """
<html>
<head>
<title>Application Error! OH NOES!</title>
</head>
<body>
<h1>OH NOES!</h1>
<p>Yep, you broke it.</p>
<p>Exception: %s</p>
</body>
</html>
""" % exception
response = Response(html_output, status=500)
return response.send(request._start_response)
@get('/hello')
def hello(request):
return 'Hello errors!'
@get('/test_404')
def test_404(request):
raise NotFound('Not here, sorry.')
return 'This should never happen.'
@get('/test_500')
def test_500(request):
raise AppError('Oops.')
return 'This should never happen either.'
@get('/test_other')
def test_other(request):
raise RuntimeError('Oops.')
return 'This should never happen either either.'
@get('/test_403')
def test_403(request):
raise Forbidden('No soup for you!')
return 'This should never happen either either either.'
@get('/test_redirect')
def test_redirect(request):
raise Redirect('/hello')
run_itty()
|
husio/itty
|
examples/error_handling.py
|
Python
|
bsd-3-clause
| 1,152
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.iteritems():
fig = pl.figure(fignum, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = pl.figure(fignum, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
pl.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
pl.show()
|
florian-f/sklearn
|
examples/cluster/plot_cluster_iris.py
|
Python
|
bsd-3-clause
| 2,573
|
"""Unit tests for collections.defaultdict."""
import os
import copy
import pickle
import tempfile
import unittest
from collections import defaultdict
def foobar():
return list
class TestDefaultDict(unittest.TestCase):
def test_basic(self):
d1 = defaultdict()
self.assertEqual(d1.default_factory, None)
d1.default_factory = list
d1[12].append(42)
self.assertEqual(d1, {12: [42]})
d1[12].append(24)
self.assertEqual(d1, {12: [42, 24]})
d1[13]
d1[14]
self.assertEqual(d1, {12: [42, 24], 13: [], 14: []})
self.assertTrue(d1[12] is not d1[13] is not d1[14])
d2 = defaultdict(list, foo=1, bar=2)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, {"foo": 1, "bar": 2})
self.assertEqual(d2["foo"], 1)
self.assertEqual(d2["bar"], 2)
self.assertEqual(d2[42], [])
self.assertIn("foo", d2)
self.assertIn("foo", d2.keys())
self.assertIn("bar", d2)
self.assertIn("bar", d2.keys())
self.assertIn(42, d2)
self.assertIn(42, d2.keys())
self.assertNotIn(12, d2)
self.assertNotIn(12, d2.keys())
d2.default_factory = None
self.assertEqual(d2.default_factory, None)
try:
d2[15]
except KeyError as err:
self.assertEqual(err.args, (15,))
else:
self.fail("d2[15] didn't raise KeyError")
self.assertRaises(TypeError, defaultdict, 1)
def test_missing(self):
d1 = defaultdict()
self.assertRaises(KeyError, d1.__missing__, 42)
d1.default_factory = list
self.assertEqual(d1.__missing__(42), [])
def test_repr(self):
d1 = defaultdict()
self.assertEqual(d1.default_factory, None)
self.assertEqual(repr(d1), "defaultdict(None, {})")
self.assertEqual(eval(repr(d1)), d1)
d1[11] = 41
self.assertEqual(repr(d1), "defaultdict(None, {11: 41})")
d2 = defaultdict(int)
self.assertEqual(d2.default_factory, int)
d2[12] = 42
self.assertEqual(repr(d2), "defaultdict(<class 'int'>, {12: 42})")
def foo(): return 43
d3 = defaultdict(foo)
self.assertTrue(d3.default_factory is foo)
d3[13]
self.assertEqual(repr(d3), "defaultdict(%s, {13: 43})" % repr(foo))
def test_print(self):
d1 = defaultdict()
def foo(): return 42
d2 = defaultdict(foo, {1: 2})
# NOTE: We can't use tempfile.[Named]TemporaryFile since this
# code must exercise the tp_print C code, which only gets
# invoked for *real* files.
tfn = tempfile.mktemp()
try:
f = open(tfn, "w+")
try:
print(d1, file=f)
print(d2, file=f)
f.seek(0)
self.assertEqual(f.readline(), repr(d1) + "\n")
self.assertEqual(f.readline(), repr(d2) + "\n")
finally:
f.close()
finally:
os.remove(tfn)
def test_copy(self):
d1 = defaultdict()
d2 = d1.copy()
self.assertEqual(type(d2), defaultdict)
self.assertEqual(d2.default_factory, None)
self.assertEqual(d2, {})
d1.default_factory = list
d3 = d1.copy()
self.assertEqual(type(d3), defaultdict)
self.assertEqual(d3.default_factory, list)
self.assertEqual(d3, {})
d1[42]
d4 = d1.copy()
self.assertEqual(type(d4), defaultdict)
self.assertEqual(d4.default_factory, list)
self.assertEqual(d4, {42: []})
d4[12]
self.assertEqual(d4, {42: [], 12: []})
# Issue 6637: Copy fails for empty default dict
d = defaultdict()
d['a'] = 42
e = d.copy()
self.assertEqual(e['a'], 42)
def test_shallow_copy(self):
d1 = defaultdict(foobar, {1: 1})
d2 = copy.copy(d1)
self.assertEqual(d2.default_factory, foobar)
self.assertEqual(d2, d1)
d1.default_factory = list
d2 = copy.copy(d1)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, d1)
def test_deep_copy(self):
d1 = defaultdict(foobar, {1: [1]})
d2 = copy.deepcopy(d1)
self.assertEqual(d2.default_factory, foobar)
self.assertEqual(d2, d1)
self.assertTrue(d1[1] is not d2[1])
d1.default_factory = list
d2 = copy.deepcopy(d1)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, d1)
def test_keyerror_without_factory(self):
d1 = defaultdict()
try:
d1[(1,)]
except KeyError as err:
self.assertEqual(err.args[0], (1,))
else:
self.fail("expected KeyError")
def test_recursive_repr(self):
# Issue2045: stack overflow when default_factory is a bound method
class sub(defaultdict):
def __init__(self):
self.default_factory = self._factory
def _factory(self):
return []
d = sub()
self.assertRegex(repr(d),
r"defaultdict\(<bound method .*sub\._factory "
r"of defaultdict\(\.\.\., \{\}\)>, \{\}\)")
# NOTE: printing a subclass of a builtin type does not call its
# tp_print slot. So this part is essentially the same test as above.
tfn = tempfile.mktemp()
try:
f = open(tfn, "w+")
try:
print(d, file=f)
finally:
f.close()
finally:
os.remove(tfn)
def test_callable_arg(self):
self.assertRaises(TypeError, defaultdict, {})
def test_pickling(self):
d = defaultdict(int)
d[1]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(d, proto)
o = pickle.loads(s)
self.assertEqual(d, o)
if __name__ == "__main__":
unittest.main()
|
yotchang4s/cafebabepy
|
src/main/python/test/test_defaultdict.py
|
Python
|
bsd-3-clause
| 6,033
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
_HEADERS = """ELF Header:
Magic: 7f 45 4c 46 01 01 01 00 00 00 00 00 00 00 00 00
Class: ELF32
Data: 2's complement, little endian
Version: 1 (current)
OS/ABI: UNIX - System V
ABI Version: 0
Type: DYN (Shared object file)
Machine: ARM
Version: 0x1
Entry point address: 0x0
Start of program headers: 52 (bytes into file)
Start of section headers: 628588000 (bytes into file)
Flags: 0x5000200, Version5 EABI, soft-float ABI
Size of this header: 52 (bytes)
Size of program headers: 32 (bytes)
Number of program headers: 9
Size of section headers: 40 (bytes)
Number of section headers: 40
Section header string table index: 39
"""
_SECTIONS = """There are 40 section headers, starting at offset 0x25777de0:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
[ 1] .interp PROGBITS 00000154 000154 000013 00 A 0 0 1
[ 2] .note.gnu.build-id NOTE 00000168 000168 000024 00 A 0 0 4
[ 3] .dynsym DYNSYM 0000018c 00018c 001960 10 A 4 1 4
[ 4] .dynstr STRTAB 00001b0c 001b0c 000fb9 00 A 0 0 1
[ 5] .hash HASH 00002ad4 002ad4 000a7c 04 A 3 0 4
[ 6] .gnu.version VERSYM 00003558 003558 00032c 02 A 3 0 2
[ 7] .gnu.version_d VERDEF 00003888 003888 00001c 00 A 4 1 4
[ 8] .gnu.version_r VERNEED 000038a4 0038a4 000060 00 A 4 3 4
[ 9] .rel.dyn REL 00003904 003904 288498 08 A 3 0 4
[10] .rel.plt REL 0029fbec 29fbec 000b00 08 A 3 0 4
[11] .plt PROGBITS 002a06ec 2a06ec 001094 00 AX 0 0 4
[12] .text PROGBITS 0028d900 28d900 2250ba8 00 AX 0 0 64
[13] .rodata PROGBITS 0266e5f0 000084 5a72e4 00 A 0 0 256
[14] .ARM.exidx ARM_EXIDX 02bd3d10 2bd3d10 1771c8 08 AL 12 0 4
[15] .ARM.extab PROGBITS 02bd5858 2bd5858 02cd50 00 A 0 0 4
[16] .data.rel.ro.local PROGBITS 02c176f0 2c166f0 0c0e08 00 WA 0 0 16
[17] .data.rel.ro PROGBITS 02cd8500 2cd8500 104108 00 WA 0 0 16
[18] .init_array INIT_ARRAY 02ddc608 2ddc608 000008 00 WA 0 0 4
[19] .fini_array FINI_ARRAY 02ddc6f4 2ddc6f4 000008 00 WA 0 0 4
[20] .dynamic DYNAMIC 02ddc6fc 2ddc6fc 000130 08 WA 4 0 4
[21] .got PROGBITS 02ddc834 2ddc834 00a7cc 00 WA 0 0 4
[22] .data PROGBITS 02de7000 2de7000 018d88 00 WA 0 0 32
[23] .bss NOBITS 02dffda0 2dffda0 13d7e8 00 WA 0 0 32
[35] .note.gnu.gold-version NOTE 00000000 22700c98 00001c 00 0 0 4
[36] .ARM.attributes ARM_ATTRIBUTES 00000000 22700cb4 00003c 00 0 0 1
[37] .symtab SYMTAB 00000000 22700cf0 105ef20 10 38 901679 4
[38] .strtab STRTAB 00000000 234c4950 213a4fe 00 0 0 1
[39] .shstrtab STRTAB 00000000 257b46da 0001b4 00 0 0 1
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings)
I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)
O (extra OS processing required) o (OS specific), p (processor specific)
"""
_NOTES = """
Displaying notes found at file offset 0x00000168 with length 0x00000024:
Owner Data size\tDescription
GNU 0x00000014\tNT_GNU_BUILD_ID (unique build ID bitstring)
Build ID: WhatAnAmazingBuildId
Displaying notes found at file offset 0x226c41e8 with length 0x0000001c:
Owner Data size\tDescription
GNU 0x00000009\tNT_GNU_GOLD_VERSION (gold version)
"""
_OBJECT_OUTPUTS = {
'obj/third_party/icu/icuuc/ucnv_ext.o': """\
There are 71 section headers, starting at offset 0x3114:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
[ 1] .strtab STRTAB 00000000 0029ac 000765 00 0 0 1
[ 2] .text PROGBITS 00000000 000034 000000 00 AX 0 0 4
[ 3] .text.ucnv_extIni PROGBITS 00000000 000034 0000c6 00 AX 0 0 2
[ 4] .rel.text.ucnv_ex REL 00000000 0023f4 000010 08 70 3 4
[ 5] .ARM.exidx.text.u ARM_EXIDX 00000000 0000fc 000008 00 AL 3 0 4
[60] .rodata.str1.1 PROGBITS 00000000 000015 000015 01 AMS 0 0 1
[56] .debug_str PROGBITS 00000000 000c50 0003c5 01 MS 0 0 1
[57] .debug_abbrev PROGBITS 00000000 001015 0000a1 00 0 0 1
[58] .debug_info PROGBITS 00000000 0010b6 000151 00 0 0 1
[59] .rel.debug_info REL 00000000 002544 0001e8 08 70 58 4
[60] .debug_ranges PROGBITS 00000000 001207 0000b0 00 0 0 1
[61] .rel.debug_ranges REL 00000000 00272c 000130 08 70 60 4
[62] .debug_macinfo PROGBITS 00000000 0012b7 000001 00 0 0 1
[63] .comment PROGBITS 00000000 0012b8 000024 01 MS 0 0 1
[64] .note.GNU-stack PROGBITS 00000000 0012dc 000000 00 0 0 1
[65] .ARM.attributes ARM_ATTRIBUTES 00000000 0012dc 00003c 00 0 0 1
[66] .debug_frame PROGBITS 00000000 001318 0001e4 00 0 0 4
[67] .rel.debug_frame REL 00000000 00285c 0000e0 08 70 66 4
[68] .debug_line PROGBITS 00000000 0014fc 000965 00 0 0 1
[69] .rel.debug_line REL 00000000 00293c 000070 08 70 68 4
[70] .symtab SYMTAB 00000000 001e64 000590 10 1 74 4
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings)
I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)
O (extra OS processing required) o (OS specific), p (processor specific)
""",
'obj/third_party/WebKit.a': """\
File: obj/third_party/WebKit.a(PaintChunker.o)
There are 68 section headers, starting at offset 0x5650:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings)
I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)
O (extra OS processing required) o (OS specific), p (processor specific)
File: obj/third_party/WebKit.a(ContiguousContainer.o)
There are 68 section headers, starting at offset 0x5650:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings)
I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)
O (extra OS processing required) o (OS specific), p (processor specific)
""",
'obj/base/base/page_allocator.o': """\
There are 68 section headers, starting at offset 0x5650:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
[ 1] .rodata.str1.1 PROGBITS 00000000 000015 000005 01 AMS 0 0 1
""",
'obj/third_party/ffmpeg/libffmpeg_internal.a': """\
File: obj/third_party/ffmpeg/libffmpeg_internal.a(fft_float.o)
There are 68 section headers, starting at offset 0x5650:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
[ 1] .rodata.str1.1 PROGBITS 00000000 000015 000005 01 AMS 0 0 1
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings)
I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)
O (extra OS processing required) o (OS specific), p (processor specific)
File: obj/third_party/ffmpeg/libffmpeg_internal.a(fft_fixed.o)
There are 68 section headers, starting at offset 0x5650:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings)
I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)
O (extra OS processing required) o (OS specific), p (processor specific)
""",
'../../third_party/gvr-android-sdk/libgvr_shim_static_arm.a': """\
File: ../../third_party/gvr-android-sdk/libgvr_shim_static_arm.a(\
libcontroller_api_impl.a_controller_api_impl.o)
There are 68 section headers, starting at offset 0x5650:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings)
I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)
O (extra OS processing required) o (OS specific), p (processor specific)
File: ../../third_party/gvr-android-sdk/libgvr_shim_static_arm.a(\
libport_android_jni.a_jni_utils.o)
There are 68 section headers, starting at offset 0x5650:
Section Headers:
[Nr] Name Type Addr Off Size ES Flg Lk Inf Al
[ 0] NULL 00000000 000000 000000 00 0 0 0
Key to Flags:
W (write), A (alloc), X (execute), M (merge), S (strings)
I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)
O (extra OS processing required) o (OS specific), p (processor specific)
""",
}
def _PrintHeader(path):
sys.stdout.write('\n')
sys.stdout.write('File: ' + path + '\n')
def _PrintOutput(path):
payload = _OBJECT_OUTPUTS.get(os.path.normpath(path))
assert payload, 'No mock_nm.py entry for: ' + path
sys.stdout.write(payload)
sys.stdout.write('\n')
def main():
paths = [p for p in sys.argv[1:] if not p.startswith('-')]
if paths[0].endswith('.o') or paths[0].endswith('.a'):
if len(paths) > 1:
for path in paths:
_PrintHeader(path)
_PrintOutput(path)
else:
_PrintOutput(paths[0])
elif sys.argv[1] == '-h':
sys.stdout.write(_HEADERS)
elif sys.argv[1] == '-S':
sys.stdout.write(_SECTIONS)
elif sys.argv[1] == '-n':
sys.stdout.write(_NOTES)
else:
assert False, 'Invalid args: %s' % sys.argv
if __name__ == '__main__':
main()
|
scheib/chromium
|
tools/binary_size/libsupersize/testdata/mock_toolchain/mock_readelf.py
|
Python
|
bsd-3-clause
| 11,261
|
#!/usr/bin/env python
from circuits import Component, Event
class bark(Event):
"""bark Event"""
class Pound(Component):
def __init__(self):
super(Pound, self).__init__()
self.bob = Bob().register(self)
self.fred = Fred().register(self)
class Dog(Component):
def started(self, *args):
self.fire(bark())
def bark(self):
print("Woof! I'm %s!" % name) # noqa
class Bob(Dog):
"""Bob"""
channel = "bob"
class Fred(Dog):
"""Fred"""
channel = "fred"
Pound().run()
|
treemo/circuits
|
docs/source/tutorials/woof/009.py
|
Python
|
mit
| 550
|
import unittest
import os
import math
from rdbtools import RdbCallback, RdbParser
class RedisParserTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_rdb(self):
r = load_rdb('empty_database.rdb')
self.assert_('start_rdb' in r.methods_called)
self.assert_('end_rdb' in r.methods_called)
self.assertEquals(len(r.databases), 0, msg = "didn't expect any databases")
def test_multiple_databases(self):
r = load_rdb('multiple_databases.rdb')
self.assert_(len(r.databases), 2)
self.assert_(1 not in r.databases)
self.assertEquals(r.databases[0]["key_in_zeroth_database"], "zero")
self.assertEquals(r.databases[2]["key_in_second_database"], "second")
def test_keys_with_expiry(self):
r = load_rdb('keys_with_expiry.rdb')
expiry = r.expiry[0]['expires_ms_precision']
self.assertEquals(expiry.year, 2022)
self.assertEquals(expiry.month, 12)
self.assertEquals(expiry.day, 25)
self.assertEquals(expiry.hour, 10)
self.assertEquals(expiry.minute, 11)
self.assertEquals(expiry.second, 12)
self.assertEquals(expiry.microsecond, 573000)
def test_integer_keys(self):
r = load_rdb('integer_keys.rdb')
self.assertEquals(r.databases[0][125], "Positive 8 bit integer")
self.assertEquals(r.databases[0][0xABAB], "Positive 16 bit integer")
self.assertEquals(r.databases[0][0x0AEDD325], "Positive 32 bit integer")
def test_negative_integer_keys(self):
r = load_rdb('integer_keys.rdb')
self.assertEquals(r.databases[0][-123], "Negative 8 bit integer")
self.assertEquals(r.databases[0][-0x7325], "Negative 16 bit integer")
self.assertEquals(r.databases[0][-0x0AEDD325], "Negative 32 bit integer")
def test_string_key_with_compression(self):
r = load_rdb('easily_compressible_string_key.rdb')
key = "".join('a' for x in range(0, 200))
value = "Key that redis should compress easily"
self.assertEquals(r.databases[0][key], value)
def test_zipmap_thats_compresses_easily(self):
r = load_rdb('zipmap_that_compresses_easily.rdb')
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["a"], "aa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aa"], "aaaa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aaaaa"], "aaaaaaaaaaaaaa")
def test_zipmap_that_doesnt_compress(self):
r = load_rdb('zipmap_that_doesnt_compress.rdb')
self.assertEquals(r.databases[0]["zimap_doesnt_compress"]["MKD1G6"], 2)
self.assertEquals(r.databases[0]["zimap_doesnt_compress"]["YNNXK"], "F7TI")
def test_zipmap_with_big_values(self):
''' See issue https://github.com/sripathikrishnan/redis-rdb-tools/issues/2
Values with length around 253/254/255 bytes are treated specially in the parser
This test exercises those boundary conditions
In order to test a bug with large ziplists, it is necessary to start
Redis with "hash-max-ziplist-value 21000", create this rdb file,
and run the test. That forces the 20kbyte value to be stored as a
ziplist with a length encoding of 5 bytes.
'''
r = load_rdb('zipmap_with_big_values.rdb')
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["253bytes"]), 253)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["254bytes"]), 254)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["255bytes"]), 255)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["300bytes"]), 300)
self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["20kbytes"]), 20000)
def test_hash_as_ziplist(self):
'''In redis dump version = 4, hashmaps are stored as ziplists'''
r = load_rdb('hash_as_ziplist.rdb')
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["a"], "aa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aa"], "aaaa")
self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aaaaa"], "aaaaaaaaaaaaaa")
def test_dictionary(self):
r = load_rdb('dictionary.rdb')
self.assertEquals(r.lengths[0]["force_dictionary"], 1000)
self.assertEquals(r.databases[0]["force_dictionary"]["ZMU5WEJDG7KU89AOG5LJT6K7HMNB3DEI43M6EYTJ83VRJ6XNXQ"],
"T63SOS8DQJF0Q0VJEZ0D1IQFCYTIPSBOUIAI9SB0OV57MQR1FI")
self.assertEquals(r.databases[0]["force_dictionary"]["UHS5ESW4HLK8XOGTM39IK1SJEUGVV9WOPK6JYA5QBZSJU84491"],
"6VULTCV52FXJ8MGVSFTZVAGK2JXZMGQ5F8OVJI0X6GEDDR27RZ")
def test_ziplist_that_compresses_easily(self):
r = load_rdb('ziplist_that_compresses_easily.rdb')
self.assertEquals(r.lengths[0]["ziplist_compresses_easily"], 6)
for idx, length in enumerate([6, 12, 18, 24, 30, 36]) :
self.assertEquals(("".join("a" for x in xrange(length))), r.databases[0]["ziplist_compresses_easily"][idx])
def test_ziplist_that_doesnt_compress(self):
r = load_rdb('ziplist_that_doesnt_compress.rdb')
self.assertEquals(r.lengths[0]["ziplist_doesnt_compress"], 2)
self.assert_("aj2410" in r.databases[0]["ziplist_doesnt_compress"])
self.assert_("cc953a17a8e096e76a44169ad3f9ac87c5f8248a403274416179aa9fbd852344"
in r.databases[0]["ziplist_doesnt_compress"])
def test_ziplist_with_integers(self):
r = load_rdb('ziplist_with_integers.rdb')
expected_numbers = []
for x in range(0,13):
expected_numbers.append(x)
expected_numbers += [-2, 13, 25, -61, 63, 16380, -16000, 65535, -65523, 4194304, 0x7fffffffffffffff]
self.assertEquals(r.lengths[0]["ziplist_with_integers"], len(expected_numbers))
for num in expected_numbers :
self.assert_(num in r.databases[0]["ziplist_with_integers"], "Cannot find %d" % num)
def test_linkedlist(self):
r = load_rdb('linkedlist.rdb')
self.assertEquals(r.lengths[0]["force_linkedlist"], 1000)
self.assert_("JYY4GIFI0ETHKP4VAJF5333082J4R1UPNPLE329YT0EYPGHSJQ" in r.databases[0]["force_linkedlist"])
self.assert_("TKBXHJOX9Q99ICF4V78XTCA2Y1UYW6ERL35JCIL1O0KSGXS58S" in r.databases[0]["force_linkedlist"])
def test_intset_16(self):
r = load_rdb('intset_16.rdb')
self.assertEquals(r.lengths[0]["intset_16"], 3)
for num in (0x7ffe, 0x7ffd, 0x7ffc) :
self.assert_(num in r.databases[0]["intset_16"])
def test_intset_32(self):
r = load_rdb('intset_32.rdb')
self.assertEquals(r.lengths[0]["intset_32"], 3)
for num in (0x7ffefffe, 0x7ffefffd, 0x7ffefffc) :
self.assert_(num in r.databases[0]["intset_32"])
def test_intset_64(self):
r = load_rdb('intset_64.rdb')
self.assertEquals(r.lengths[0]["intset_64"], 3)
for num in (0x7ffefffefffefffe, 0x7ffefffefffefffd, 0x7ffefffefffefffc) :
self.assert_(num in r.databases[0]["intset_64"])
def test_regular_set(self):
r = load_rdb('regular_set.rdb')
self.assertEquals(r.lengths[0]["regular_set"], 6)
for member in ("alpha", "beta", "gamma", "delta", "phi", "kappa") :
self.assert_(member in r.databases[0]["regular_set"], msg=('%s missing' % member))
def test_sorted_set_as_ziplist(self):
r = load_rdb('sorted_set_as_ziplist.rdb')
self.assertEquals(r.lengths[0]["sorted_set_as_ziplist"], 3)
zset = r.databases[0]["sorted_set_as_ziplist"]
self.assert_(floateq(zset['8b6ba6718a786daefa69438148361901'], 1))
self.assert_(floateq(zset['cb7a24bb7528f934b841b34c3a73e0c7'], 2.37))
self.assert_(floateq(zset['523af537946b79c4f8369ed39ba78605'], 3.423))
def test_filtering_by_keys(self):
r = load_rdb('parser_filters.rdb', filters={"keys":"k[0-9]"})
self.assertEquals(r.databases[0]['k1'], "ssssssss")
self.assertEquals(r.databases[0]['k3'], "wwwwwwww")
self.assertEquals(len(r.databases[0]), 2)
def test_filtering_by_type(self):
r = load_rdb('parser_filters.rdb', filters={"types":["sortedset"]})
self.assert_('z1' in r.databases[0])
self.assert_('z2' in r.databases[0])
self.assert_('z3' in r.databases[0])
self.assert_('z4' in r.databases[0])
self.assertEquals(len(r.databases[0]), 4)
def test_filtering_by_database(self):
r = load_rdb('multiple_databases.rdb', filters={"dbs":[2]})
self.assert_('key_in_zeroth_database' not in r.databases[0])
self.assert_('key_in_second_database' in r.databases[2])
self.assertEquals(len(r.databases[0]), 0)
self.assertEquals(len(r.databases[2]), 1)
def test_rdb_version_5_with_checksum(self):
r = load_rdb('rdb_version_5_with_checksum.rdb')
self.assertEquals(r.databases[0]['abcd'], 'efgh')
self.assertEquals(r.databases[0]['foo'], 'bar')
self.assertEquals(r.databases[0]['bar'], 'baz')
self.assertEquals(r.databases[0]['abcdef'], 'abcdef')
self.assertEquals(r.databases[0]['longerstring'], 'thisisalongerstring.idontknowwhatitmeans')
def floateq(f1, f2) :
return math.fabs(f1 - f2) < 0.00001
def load_rdb(file_name, filters=None) :
r = MockRedis()
parser = RdbParser(r, filters)
parser.parse(os.path.join(os.path.dirname(__file__), 'dumps', file_name))
return r
class MockRedis(RdbCallback):
def __init__(self) :
self.databases = {}
self.lengths = {}
self.expiry = {}
self.methods_called = []
self.dbnum = 0
def currentdb(self) :
return self.databases[self.dbnum]
def store_expiry(self, key, expiry) :
self.expiry[self.dbnum][key] = expiry
def store_length(self, key, length) :
if not self.dbnum in self.lengths :
self.lengths[self.dbnum] = {}
self.lengths[self.dbnum][key] = length
def get_length(self, key) :
if not key in self.lengths[self.dbnum] :
raise Exception('Key %s does not have a length' % key)
return self.lengths[self.dbnum][key]
def start_rdb(self):
self.methods_called.append('start_rdb')
def start_database(self, dbnum):
self.dbnum = dbnum
self.databases[dbnum] = {}
self.expiry[dbnum] = {}
self.lengths[dbnum] = {}
def set(self, key, value, expiry, info):
self.currentdb()[key] = value
if expiry :
self.store_expiry(key, expiry)
def start_hash(self, key, length, expiry, info):
if key in self.currentdb() :
raise Exception('start_hash called with key %s that already exists' % key)
else :
self.currentdb()[key] = {}
if expiry :
self.store_expiry(key, expiry)
self.store_length(key, length)
def hset(self, key, field, value):
if not key in self.currentdb() :
raise Exception('start_hash not called for key = %s', key)
self.currentdb()[key][field] = value
def end_hash(self, key):
if not key in self.currentdb() :
raise Exception('start_hash not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] :
raise Exception('Lengths mismatch on hash %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def start_set(self, key, cardinality, expiry, info):
if key in self.currentdb() :
raise Exception('start_set called with key %s that already exists' % key)
else :
self.currentdb()[key] = []
if expiry :
self.store_expiry(key, expiry)
self.store_length(key, cardinality)
def sadd(self, key, member):
if not key in self.currentdb() :
raise Exception('start_set not called for key = %s', key)
self.currentdb()[key].append(member)
def end_set(self, key):
if not key in self.currentdb() :
raise Exception('start_set not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] :
raise Exception('Lengths mismatch on set %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def start_list(self, key, length, expiry, info):
if key in self.currentdb() :
raise Exception('start_list called with key %s that already exists' % key)
else :
self.currentdb()[key] = []
if expiry :
self.store_expiry(key, expiry)
self.store_length(key, length)
def rpush(self, key, value) :
if not key in self.currentdb() :
raise Exception('start_list not called for key = %s', key)
self.currentdb()[key].append(value)
def end_list(self, key):
if not key in self.currentdb() :
raise Exception('start_set not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] :
raise Exception('Lengths mismatch on list %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def start_sorted_set(self, key, length, expiry, info):
if key in self.currentdb() :
raise Exception('start_sorted_set called with key %s that already exists' % key)
else :
self.currentdb()[key] = {}
if expiry :
self.store_expiry(key, expiry)
self.store_length(key, length)
def zadd(self, key, score, member):
if not key in self.currentdb() :
raise Exception('start_sorted_set not called for key = %s', key)
self.currentdb()[key][member] = score
def end_sorted_set(self, key):
if not key in self.currentdb() :
raise Exception('start_set not called for key = %s', key)
if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] :
raise Exception('Lengths mismatch on sortedset %s, expected length = %d, actual = %d'
% (key, self.lengths[self.dbnum][key], len(self.currentdb()[key])))
def end_database(self, dbnum):
if self.dbnum != dbnum :
raise Exception('start_database called with %d, but end_database called %d instead' % (self.dbnum, dbnum))
def end_rdb(self):
self.methods_called.append('end_rdb')
|
idning/redis-rdb-tools
|
tests/parser_tests.py
|
Python
|
mit
| 14,949
|
import datetime
from django import template
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed, add_domain
from django.utils import feedgenerator, tzinfo
from django.utils.encoding import iri_to_uri
try:
from django.utils.encoding import force_text
except ImportError:
# Django < 1.5
from django.utils.encoding import force_unicode as force_text
USE_SINGLE_SIGNON = getattr(settings, "DISQUS_USE_SINGLE_SIGNON", False)
class WxrFeedType(feedgenerator.Rss201rev2Feed):
def rss_attributes(self):
return {
'version': self._version,
'xmlns:content': 'http://purl.org/rss/1.0/modules/content/',
'xmlns:dsq': 'http://www.disqus.com/',
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'xmlns:wp': 'http://wordpress.org/export/1.0/',
}
def format_date(self, date):
return date.strftime('%Y-%m-%d %H:%M:%S')
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=(), item_copyright=None,
ttl=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate, which is a datetime.datetime object, and
enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'comments': comments,
'unique_id': to_unicode(unique_id),
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def add_root_elements(self, handler):
pass
def add_item_elements(self, handler, item):
if item['comments'] is None:
return
handler.addQuickElement('title', item['title'])
handler.addQuickElement('link', item['link'])
handler.addQuickElement('content:encoded', item['description'])
handler.addQuickElement('dsq:thread_identifier', item['unique_id'])
handler.addQuickElement('wp:post_date_gmt',
self.format_date(item['pubdate']).decode('utf-8'))
handler.addQuickElement('wp:comment_status', item['comment_status'])
self.write_comments(handler, item['comments'])
def add_comment_elements(self, handler, comment):
if USE_SINGLE_SIGNON:
handler.startElement('dsq:remote', {})
handler.addQuickElement('dsq:id', comment['user_id'])
handler.addQuickElement('dsq:avatar', comment['avatar'])
handler.endElement('dsq:remote')
handler.addQuickElement('wp:comment_id', comment['id'])
handler.addQuickElement('wp:comment_author', comment['user_name'])
handler.addQuickElement('wp:comment_author_email', comment['user_email'])
handler.addQuickElement('wp:comment_author_url', comment['user_url'])
handler.addQuickElement('wp:comment_author_IP', comment['ip_address'])
handler.addQuickElement('wp:comment_date_gmt',
self.format_date(comment['submit_date']).decode('utf-8'))
handler.addQuickElement('wp:comment_content', comment['comment'])
handler.addQuickElement('wp:comment_approved', comment['is_approved'])
if comment['parent'] is not None:
handler.addQuickElement('wp:comment_parent', comment['parent'])
def write_comments(self, handler, comments):
for comment in comments:
handler.startElement('wp:comment', {})
self.add_comment_elements(handler, comment)
handler.endElement('wp:comment')
class BaseWxrFeed(Feed):
feed_type = WxrFeedType
def get_feed(self, obj, request):
current_site = Site.objects.get_current()
link = self._Feed__get_dynamic_attr('link', obj)
link = add_domain(current_site.domain, link)
feed = self.feed_type(
title = self._Feed__get_dynamic_attr('title', obj),
link = link,
description = self._Feed__get_dynamic_attr('description', obj),
)
title_tmp = None
if self.title_template is not None:
try:
title_tmp = template.loader.get_template(self.title_template)
except template.TemplateDoesNotExist:
pass
description_tmp = None
if self.description_template is not None:
try:
description_tmp = template.loader.get_template(self.description_template)
except template.TemplateDoesNotExist:
pass
for item in self._Feed__get_dynamic_attr('items', obj):
if title_tmp is not None:
title = title_tmp.render(
template.RequestContext(request, {
'obj': item, 'site': current_site
}))
else:
title = self._Feed__get_dynamic_attr('item_title', item)
if description_tmp is not None:
description = description_tmp.render(
template.RequestContext(request, {
'obj': item, 'site': current_site
}))
else:
description = self._Feed__get_dynamic_attr('item_description', item)
link = add_domain(
current_site.domain,
self._Feed__get_dynamic_attr('item_link', item),
)
pubdate = self._Feed__get_dynamic_attr('item_pubdate', item)
if pubdate and not hasattr(pubdate, 'tzinfo'):
ltz = tzinfo.LocalTimezone(pubdate)
pubdate = pubdate.replace(tzinfo=ltz)
feed.add_item(
title = title,
link = link,
description = description,
unique_id = self._Feed__get_dynamic_attr('item_guid', item, link),
pubdate = pubdate,
comment_status = self._Feed__get_dynamic_attr('item_comment_status', item, 'open'),
comments = self._get_comments(item)
)
return feed
def _get_comments(self, item):
cmts = self._Feed__get_dynamic_attr('item_comments', item)
output = []
for comment in cmts:
output.append({
'user_id': self._Feed__get_dynamic_attr('comment_user_id', comment),
'avatar': self._Feed__get_dynamic_attr('comment_avatar', comment),
'id': str(self._Feed__get_dynamic_attr('comment_id', comment)),
'user_name': self._Feed__get_dynamic_attr('comment_user_name', comment),
'user_email': self._Feed__get_dynamic_attr('comment_user_email', comment),
'user_url': self._Feed__get_dynamic_attr('comment_user_url', comment),
'ip_address': self._Feed__get_dynamic_attr('comment_ip_address', comment),
'submit_date': self._Feed__get_dynamic_attr('comment_submit_date', comment),
'comment': self._Feed__get_dynamic_attr('comment_comment', comment),
'is_approved': str(self._Feed__get_dynamic_attr('comment_is_approved', comment)),
'parent': str(self._Feed__get_dynamic_attr('comment_parent', comment)),
})
return output
class ContribCommentsWxrFeed(BaseWxrFeed):
link = "/"
def item_comments(self, item):
from django.contrib.comments.models import Comment
ctype = ContentType.objects.get_for_model(item)
return Comment.objects.filter(content_type=ctype, object_pk=item.pk)
def item_guid(self, item):
ctype = ContentType.objects.get_for_model(item)
return "%s_%s" % (ctype.name, item.pk)
def comment_id(self, comment):
return comment.pk
def comment_user_id(self, comment):
return force_text(comment.user_id)
def comment_user_name(self, comment):
return force_text(comment.user_name)
def comment_user_email(self, comment):
return force_text(comment.user_email)
def comment_user_url(self, comment):
return force_text(comment.user_url)
def comment_ip_address(self, comment):
return force_text(comment.ip_address)
def comment_submit_date(self, comment):
return comment.submit_date
def comment_comment(self, comment):
return comment.comment
def comment_is_approved(self, comment):
return int(comment.is_public)
comment_parent = 0
|
steventimberman/masterDebater
|
venv/lib/python2.7/site-packages/disqus/wxr_feed.py
|
Python
|
mit
| 9,430
|
import os
# toolchains options
ARCH='arm'
CPU='cortex-m7'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m7 -mthumb -mfpu=fpv5-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M7.fp.sp'
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M7'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv5_sp'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M7'
AFLAGS += ' --fpu VFPv5_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
|
blueskycoco/rt-thread
|
bsp/stm32/stm32f767-fire-challenger/rtconfig.py
|
Python
|
gpl-2.0
| 4,079
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2015 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
A replacement ActionGroup that correctly loads named icons from an icon theme.
"""
from gi.repository import Gtk
class ActionGroup(Gtk.ActionGroup):
def add_actions(self, action_list, **kwargs):
Gtk.ActionGroup.add_actions(self, action_list, **kwargs)
self.fix_icon_name(action_list)
def add_toggle_actions(self, action_list, **kwargs):
Gtk.ActionGroup.add_toggle_actions(self, action_list, **kwargs)
self.fix_icon_name(action_list)
def add_radio_actions(self, action_list, **kwargs):
Gtk.ActionGroup.add_radio_actions(self, action_list, **kwargs)
self.fix_icon_name(action_list)
def fix_icon_name(self, action_list):
for action_tuple in action_list:
if action_tuple[1]:
action = self.get_action(action_tuple[0])
action.set_icon_name(action_tuple[1])
|
beernarrd/gramps
|
gramps/gui/actiongroup.py
|
Python
|
gpl-2.0
| 1,674
|
#!/usr/bin/env python
"""
Usage:
AppcastReplaceItem <path-to-appcast> <old-version> <new-version> <path-to-dmg>
Example: AppcastReplaceItem appcast-release.xml 1.1.4 1.2 Release/build/Adium_1.2.dmg
"""
# Configurable variables.
app_name = 'Adium'
changelog_fmt = 'http://www.adium.im/changelogs/%(version)s.html'
enclosure_fmt = ' <enclosure sparkle:md5Sum="%(md5)s" sparkle:version="%(version)s" url="%(url)s" length="%(file_size)s" type="application/octet-stream"/>\n'
# End of configurable variables.
import xml.etree.cElementTree as ElementTree
import sys
import os
import time
import subprocess
from stat import *
args = dict(zip(('appcast_path', 'old_version', 'version', 'dmg_pathname'), sys.argv[1:]))
try:
appcast_path = args['appcast_path']
old_version = args['old_version']
version = args['version']
dmg_pathname = args['dmg_pathname']
except KeyError:
sys.exit(__doc__.strip())
else:
args['app_name'] = app_name
# Get the length and modification time of the dmg file.
sb = os.stat(dmg_pathname)
file_size = args['file_size'] = sb[ST_SIZE]
dmg_mod_time = time.localtime(sb[ST_MTIME])
# Suffix for the day of the month.
th = (['st', 'nd', 'rd'] + ['th'] * 7) * 4
# GMT offset in hours.
gmt_offset = '%+i' % (-int(time.timezone / 3600),)
# Format, which we must fill in with the above items first.
time_fmt = '%A, %B %dth, %Y %H:%M:%S GMT+0'.replace('th', th[dmg_mod_time.tm_mday - 1]).replace('+0', gmt_offset)
dmg_mod_date = args['dmg_mod_date'] = time.strftime(time_fmt, dmg_mod_time)
openssl_md5 = subprocess.Popen(['openssl', 'md5', dmg_pathname], stdout=subprocess.PIPE)
# Skip the prefix
openssl_md5.stdout.read(len('MD5(') + len(dmg_pathname) + len(')= '))
md5 = args['md5'] = openssl_md5.stdout.read().strip()
exit_status = openssl_md5.wait()
if exit_status != 0: sys.exit('openssl md5 exited with status ' + str(exit_status))
# An MD5 hash is 16 bytes, which is 32 digits hexadecimal.
assert len(md5) == 32, 'MD5 sum is %u bytes' % (len(md5),)
dmg_filename = os.path.split(dmg_pathname)[1]
url = args['url'] = 'http://adiumx.cachefly.net/' + dmg_filename
# Because XML parsing with the standard library is a PITA, we're going to do it the hackish way.
xmlfile = file(appcast_path)
lines = []
is_in_item = False
is_correct_item = False
found_correct_item = False
for line in xmlfile:
if not is_in_item:
if '<item>' in line:
is_in_item = True
else:
if '</item>' in line:
is_in_item = False
is_correct_item = False
elif '<title>' in line:
if '>%(app_name)s %(old_version)s<' % args in line:
line = line.replace(old_version, version)
is_correct_item = found_correct_item = True
elif is_correct_item:
if'<pubDate>' in line:
line = ' <pubDate>%(dmg_mod_date)s</pubDate>\n' % args
elif '<sparkle:releaseNotesLink>' in line:
line = ' <sparkle:releaseNotesLink>%s</sparkle:releaseNotesLink>\n' % (changelog_fmt % args,)
elif '<enclosure' in line:
line = enclosure_fmt % args
lines.append(line)
if not found_correct_item:
sys.exit('No item found for version %(old_version)s' % args)
xmlfile = file(appcast_path, 'w')
xmlfile.writelines(lines)
|
mosleyjr/Adium
|
Utilities/AppcastReplaceItem.py
|
Python
|
gpl-2.0
| 3,173
|
""" rewrite of lambdify - This stuff is not stable at all.
It is for internal use in the new plotting module.
It may (will! see the Q'n'A in the source) be rewritten.
It's completely self contained. Especially it does not use lambdarepr.
It does not aim to replace the current lambdify. Most importantly it will never
ever support anything else than sympy expressions (no Matrices, dictionaries
and so on).
"""
from __future__ import print_function, division
import re
from sympy import Symbol, NumberSymbol, I, zoo, oo
from sympy.core.compatibility import exec_
from sympy.utilities.iterables import numbered_symbols
# We parse the expression string into a tree that identifies functions. Then
# we translate the names of the functions and we translate also some strings
# that are not names of functions (all this according to translation
# dictionaries).
# If the translation goes to another module (like numpy) the
# module is imported and 'func' is translated to 'module.func'.
# If a function can not be translated, the inner nodes of that part of the
# tree are not translated. So if we have Integral(sqrt(x)), sqrt is not
# translated to np.sqrt and the Integral does not crash.
# A namespace for all this is generated by crawling the (func, args) tree of
# the expression. The creation of this namespace involves many ugly
# workarounds.
# The namespace consists of all the names needed for the sympy expression and
# all the name of modules used for translation. Those modules are imported only
# as a name (import numpy as np) in order to keep the namespace small and
# manageable.
# Please, if there is a bug, do not try to fix it here! Rewrite this by using
# the method proposed in the last Q'n'A below. That way the new function will
# work just as well, be just as simple, but it wont need any new workarounds.
# If you insist on fixing it here, look at the workarounds in the function
# sympy_expression_namespace and in lambdify.
# Q: Why are you not using python abstract syntax tree?
# A: Because it is more complicated and not much more powerful in this case.
# Q: What if I have Symbol('sin') or g=Function('f')?
# A: You will break the algorithm. We should use srepr to defend against this?
# The problem with Symbol('sin') is that it will be printed as 'sin'. The
# parser will distinguish it from the function 'sin' because functions are
# detected thanks to the opening parenthesis, but the lambda expression won't
# understand the difference if we have also the sin function.
# The solution (complicated) is to use srepr and maybe ast.
# The problem with the g=Function('f') is that it will be printed as 'f' but in
# the global namespace we have only 'g'. But as the same printer is used in the
# constructor of the namespace there will be no problem.
# Q: What if some of the printers are not printing as expected?
# A: The algorithm wont work. You must use srepr for those cases. But even
# srepr may not print well. All problems with printers should be considered
# bugs.
# Q: What about _imp_ functions?
# A: Those are taken care for by evalf. A special case treatment will work
# faster but it's not worth the code complexity.
# Q: Will ast fix all possible problems?
# A: No. You will always have to use some printer. Even srepr may not work in
# some cases. But if the printer does not work, that should be considered a
# bug.
# Q: Is there same way to fix all possible problems?
# A: Probably by constructing our strings ourself by traversing the (func,
# args) tree and creating the namespace at the same time. That actually sounds
# good.
from sympy.external import import_module
import warnings
#TODO debugging output
class vectorized_lambdify(object):
""" Return a sufficiently smart, vectorized and lambdified function.
Returns only reals.
This function uses experimental_lambdify to created a lambdified
expression ready to be used with numpy. Many of the functions in sympy
are not implemented in numpy so in some cases we resort to python cmath or
even to evalf.
The following translations are tried:
only numpy complex
- on errors raised by sympy trying to work with ndarray:
only python cmath and then vectorize complex128
When using python cmath there is no need for evalf or float/complex
because python cmath calls those.
This function never tries to mix numpy directly with evalf because numpy
does not understand sympy Float. If this is needed one can use the
float_wrap_evalf/complex_wrap_evalf options of experimental_lambdify or
better one can be explicit about the dtypes that numpy works with.
Check numpy bug http://projects.scipy.org/numpy/ticket/1013 to know what
types of errors to expect.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_np=True)
self.vector_func = self.lambda_func
self.failure = False
def __call__(self, *args):
np = import_module('numpy')
np_old_err = np.seterr(invalid='raise')
try:
temp_args = (np.array(a, dtype=np.complex) for a in args)
results = self.vector_func(*temp_args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
except Exception as e:
#DEBUG: print 'Error', type(e), e
if ((isinstance(e, TypeError)
and 'unhashable type: \'numpy.ndarray\'' in str(e))
or
(isinstance(e, ValueError)
and ('Invalid limits given:' in str(e)
or 'negative dimensions are not allowed' in str(e) # XXX
or 'sequence too large; must be smaller than 32' in str(e)))): # XXX
# Almost all functions were translated to numpy, but some were
# left as sympy functions. They received an ndarray as an
# argument and failed.
# sin(ndarray(...)) raises "unhashable type"
# Integral(x, (x, 0, ndarray(...))) raises "Invalid limits"
# other ugly exceptions that are not well understood (marked with XXX)
# TODO: Cleanup the ugly special cases marked with xxx above.
# Solution: use cmath and vectorize the final lambda.
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_python_cmath=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
else:
# Complete failure. One last try with no translations, only
# wrapping in complex((...).evalf()) and returning the real
# part.
if self.failure:
raise e
else:
self.failure = True
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_evalf=True,
complex_wrap_evalf=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
finally:
np.seterr(**np_old_err)
return results
class lambdify(object):
"""Returns the lambdified function.
This function uses experimental_lambdify to create a lambdified
expression. It uses cmath to lambdify the expression. If the function
is not implemented in python cmath, python cmath calls evalf on those
functions.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_evalf=True,
use_python_cmath=True)
self.failure = False
def __call__(self, args):
args = complex(args)
try:
#The result can be sympy.Float. Hence wrap it with complex type.
result = complex(self.lambda_func(args))
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
except Exception as e:
# The exceptions raised by sympy, cmath are not consistent and
# hence it is not possible to specify all the exceptions that
# are to be caught. Presently there are no cases for which the code
# reaches this block other than ZeroDivisionError and complex
# comparison. Also the exception is caught only once. If the
# exception repeats itself,
# then it is not caught and the corresponding error is raised.
# XXX: Remove catching all exceptions once the plotting module
# is heavily tested.
if isinstance(e, ZeroDivisionError):
return None
elif isinstance(e, TypeError) and ('no ordering relation is'
' defined for complex numbers'
in str(e) or 'unorderable '
'types' in str(e) or "not "
"supported between instances of"
in str(e)):
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
use_python_math=True)
result = self.lambda_func(args.real)
return result
else:
if self.failure:
raise e
#Failure
#Try wrapping it with complex(..).evalf()
self.failure = True
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
complex_wrap_evalf=True)
result = self.lambda_func(args)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
def experimental_lambdify(*args, **kwargs):
l = Lambdifier(*args, **kwargs)
return l
class Lambdifier(object):
def __init__(self, args, expr, print_lambda=False, use_evalf=False,
float_wrap_evalf=False, complex_wrap_evalf=False,
use_np=False, use_python_math=False, use_python_cmath=False,
use_interval=False):
self.print_lambda = print_lambda
self.use_evalf = use_evalf
self.float_wrap_evalf = float_wrap_evalf
self.complex_wrap_evalf = complex_wrap_evalf
self.use_np = use_np
self.use_python_math = use_python_math
self.use_python_cmath = use_python_cmath
self.use_interval = use_interval
# Constructing the argument string
# - check
if not all([isinstance(a, Symbol) for a in args]):
raise ValueError('The arguments must be Symbols.')
# - use numbered symbols
syms = numbered_symbols(exclude=expr.free_symbols)
newargs = [next(syms) for i in args]
expr = expr.xreplace(dict(zip(args, newargs)))
argstr = ', '.join([str(a) for a in newargs])
del syms, newargs, args
# Constructing the translation dictionaries and making the translation
self.dict_str = self.get_dict_str()
self.dict_fun = self.get_dict_fun()
exprstr = str(expr)
# the & and | operators don't work on tuples, see discussion #12108
exprstr = exprstr.replace(" & "," and ").replace(" | "," or ")
newexpr = self.tree2str_translate(self.str2tree(exprstr))
# Constructing the namespaces
namespace = {}
namespace.update(self.sympy_atoms_namespace(expr))
namespace.update(self.sympy_expression_namespace(expr))
# XXX Workaround
# Ugly workaround because Pow(a,Half) prints as sqrt(a)
# and sympy_expression_namespace can not catch it.
from sympy import sqrt
namespace.update({'sqrt': sqrt})
namespace.update({'Eq': lambda x, y: x == y})
# End workaround.
if use_python_math:
namespace.update({'math': __import__('math')})
if use_python_cmath:
namespace.update({'cmath': __import__('cmath')})
if use_np:
try:
namespace.update({'np': __import__('numpy')})
except ImportError:
raise ImportError(
'experimental_lambdify failed to import numpy.')
if use_interval:
namespace.update({'imath': __import__(
'sympy.plotting.intervalmath', fromlist=['intervalmath'])})
namespace.update({'math': __import__('math')})
# Construct the lambda
if self.print_lambda:
print(newexpr)
eval_str = 'lambda %s : ( %s )' % (argstr, newexpr)
self.eval_str = eval_str
exec_("from __future__ import division; MYNEWLAMBDA = %s" % eval_str, namespace)
self.lambda_func = namespace['MYNEWLAMBDA']
def __call__(self, *args, **kwargs):
return self.lambda_func(*args, **kwargs)
##############################################################################
# Dicts for translating from sympy to other modules
##############################################################################
###
# builtins
###
# Functions with different names in builtins
builtin_functions_different = {
'Min': 'min',
'Max': 'max',
'Abs': 'abs',
}
# Strings that should be translated
builtin_not_functions = {
'I': '1j',
# 'oo': '1e400',
}
###
# numpy
###
# Functions that are the same in numpy
numpy_functions_same = [
'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'exp', 'log',
'sqrt', 'floor', 'conjugate',
]
# Functions with different names in numpy
numpy_functions_different = {
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"im": "imag",
"ln": "log",
"Max": "amax",
"Min": "amin",
"re": "real",
"Abs": "abs",
}
# Strings that should be translated
numpy_not_functions = {
'pi': 'np.pi',
'oo': 'np.inf',
'E': 'np.e',
}
###
# python math
###
# Functions that are the same in math
math_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'atan2',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'erf', 'sqrt', 'floor', 'factorial', 'gamma',
]
# Functions with different names in math
math_functions_different = {
'ceiling': 'ceil',
'ln': 'log',
'loggamma': 'lgamma'
}
# Strings that should be translated
math_not_functions = {
'pi': 'math.pi',
'E': 'math.e',
}
###
# python cmath
###
# Functions that are the same in cmath
cmath_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'sqrt',
]
# Functions with different names in cmath
cmath_functions_different = {
'ln': 'log',
'arg': 'phase',
}
# Strings that should be translated
cmath_not_functions = {
'pi': 'cmath.pi',
'E': 'cmath.e',
}
###
# intervalmath
###
interval_not_functions = {
'pi': 'math.pi',
'E': 'math.e'
}
interval_functions_same = [
'sin', 'cos', 'exp', 'tan', 'atan', 'log',
'sqrt', 'cosh', 'sinh', 'tanh', 'floor',
'acos', 'asin', 'acosh', 'asinh', 'atanh',
'Abs', 'And', 'Or'
]
interval_functions_different = {
'Min': 'imin',
'Max': 'imax',
'ceiling': 'ceil',
}
###
# mpmath, etc
###
#TODO
###
# Create the final ordered tuples of dictionaries
###
# For strings
def get_dict_str(self):
dict_str = dict(self.builtin_not_functions)
if self.use_np:
dict_str.update(self.numpy_not_functions)
if self.use_python_math:
dict_str.update(self.math_not_functions)
if self.use_python_cmath:
dict_str.update(self.cmath_not_functions)
if self.use_interval:
dict_str.update(self.interval_not_functions)
return dict_str
# For functions
def get_dict_fun(self):
dict_fun = dict(self.builtin_functions_different)
if self.use_np:
for s in self.numpy_functions_same:
dict_fun[s] = 'np.' + s
for k, v in self.numpy_functions_different.items():
dict_fun[k] = 'np.' + v
if self.use_python_math:
for s in self.math_functions_same:
dict_fun[s] = 'math.' + s
for k, v in self.math_functions_different.items():
dict_fun[k] = 'math.' + v
if self.use_python_cmath:
for s in self.cmath_functions_same:
dict_fun[s] = 'cmath.' + s
for k, v in self.cmath_functions_different.items():
dict_fun[k] = 'cmath.' + v
if self.use_interval:
for s in self.interval_functions_same:
dict_fun[s] = 'imath.' + s
for k, v in self.interval_functions_different.items():
dict_fun[k] = 'imath.' + v
return dict_fun
##############################################################################
# The translator functions, tree parsers, etc.
##############################################################################
def str2tree(self, exprstr):
"""Converts an expression string to a tree.
Functions are represented by ('func_name(', tree_of_arguments).
Other expressions are (head_string, mid_tree, tail_str).
Expressions that do not contain functions are directly returned.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> str2tree(str(Integral(x, (x, 1, y))))
('', ('Integral(', 'x, (x, 1, y)'), ')')
>>> str2tree(str(x+y))
'x + y'
>>> str2tree(str(x+y*sin(z)+1))
('x + y*', ('sin(', 'z'), ') + 1')
>>> str2tree('sin(y*(y + 1.1) + (sin(y)))')
('', ('sin(', ('y*(y + 1.1) + (', ('sin(', 'y'), '))')), ')')
"""
#matches the first 'function_name('
first_par = re.search(r'(\w+\()', exprstr)
if first_par is None:
return exprstr
else:
start = first_par.start()
end = first_par.end()
head = exprstr[:start]
func = exprstr[start:end]
tail = exprstr[end:]
count = 0
for i, c in enumerate(tail):
if c == '(':
count += 1
elif c == ')':
count -= 1
if count == -1:
break
func_tail = self.str2tree(tail[:i])
tail = self.str2tree(tail[i:])
return (head, (func, func_tail), tail)
@classmethod
def tree2str(cls, tree):
"""Converts a tree to string without translations.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> tree2str = Lambdifier([x], x).tree2str
>>> tree2str(str2tree(str(x+y*sin(z)+1)))
'x + y*sin(z) + 1'
"""
if isinstance(tree, str):
return tree
else:
return ''.join(map(cls.tree2str, tree))
def tree2str_translate(self, tree):
"""Converts a tree to string with translations.
Function names are translated by translate_func.
Other strings are translated by translate_str.
"""
if isinstance(tree, str):
return self.translate_str(tree)
elif isinstance(tree, tuple) and len(tree) == 2:
return self.translate_func(tree[0][:-1], tree[1])
else:
return ''.join([self.tree2str_translate(t) for t in tree])
def translate_str(self, estr):
"""Translate substrings of estr using in order the dictionaries in
dict_tuple_str."""
for pattern, repl in self.dict_str.items():
estr = re.sub(pattern, repl, estr)
return estr
def translate_func(self, func_name, argtree):
"""Translate function names and the tree of arguments.
If the function name is not in the dictionaries of dict_tuple_fun then the
function is surrounded by a float((...).evalf()).
The use of float is necessary as np.<function>(sympy.Float(..)) raises an
error."""
if func_name in self.dict_fun:
new_name = self.dict_fun[func_name]
argstr = self.tree2str_translate(argtree)
return new_name + '(' + argstr
else:
template = '(%s(%s)).evalf(' if self.use_evalf else '%s(%s'
if self.float_wrap_evalf:
template = 'float(%s)' % template
elif self.complex_wrap_evalf:
template = 'complex(%s)' % template
# Wrapping should only happen on the outermost expression, which
# is the only thing we know will be a number.
float_wrap_evalf = self.float_wrap_evalf
complex_wrap_evalf = self.complex_wrap_evalf
self.float_wrap_evalf = False
self.complex_wrap_evalf = False
ret = template % (func_name, self.tree2str_translate(argtree))
self.float_wrap_evalf = float_wrap_evalf
self.complex_wrap_evalf = complex_wrap_evalf
return ret
##############################################################################
# The namespace constructors
##############################################################################
@classmethod
def sympy_expression_namespace(cls, expr):
"""Traverses the (func, args) tree of an expression and creates a sympy
namespace. All other modules are imported only as a module name. That way
the namespace is not polluted and rests quite small. It probably causes much
more variable lookups and so it takes more time, but there are no tests on
that for the moment."""
if expr is None:
return {}
else:
funcname = str(expr.func)
# XXX Workaround
# Here we add an ugly workaround because str(func(x))
# is not always the same as str(func). Eg
# >>> str(Integral(x))
# "Integral(x)"
# >>> str(Integral)
# "<class 'sympy.integrals.integrals.Integral'>"
# >>> str(sqrt(x))
# "sqrt(x)"
# >>> str(sqrt)
# "<function sqrt at 0x3d92de8>"
# >>> str(sin(x))
# "sin(x)"
# >>> str(sin)
# "sin"
# Either one of those can be used but not all at the same time.
# The code considers the sin example as the right one.
regexlist = [
r'<class \'sympy[\w.]*?.([\w]*)\'>$',
# the example Integral
r'<function ([\w]*) at 0x[\w]*>$', # the example sqrt
]
for r in regexlist:
m = re.match(r, funcname)
if m is not None:
funcname = m.groups()[0]
# End of the workaround
# XXX debug: print funcname
args_dict = {}
for a in expr.args:
if (isinstance(a, Symbol) or
isinstance(a, NumberSymbol) or
a in [I, zoo, oo]):
continue
else:
args_dict.update(cls.sympy_expression_namespace(a))
args_dict.update({funcname: expr.func})
return args_dict
@staticmethod
def sympy_atoms_namespace(expr):
"""For no real reason this function is separated from
sympy_expression_namespace. It can be moved to it."""
atoms = expr.atoms(Symbol, NumberSymbol, I, zoo, oo)
d = {}
for a in atoms:
# XXX debug: print 'atom:' + str(a)
d[str(a)] = a
return d
|
wxgeo/geophar
|
wxgeometrie/sympy/plotting/experimental_lambdify.py
|
Python
|
gpl-2.0
| 26,133
|
#!/usr/bin/env python
from flask.ext.script import Manager
from flask_frozen import Freezer
import discovery
import logging
out = logging.StreamHandler()
out.setFormatter(logging.Formatter())
out.setLevel(logging.DEBUG)
logging.getLogger('freepto-web').setLevel(logging.INFO)
logging.getLogger('freepto-web').addHandler(out)
from app import app
manager = Manager(app)
freezer = Freezer(app)
@freezer.register_generator
def index():
yield {}
@freezer.register_generator
def page_index():
for lang in discovery.lang_dirs:
yield {'lang': lang}
@freezer.register_generator
def page():
for lang in discovery.lang_dirs:
for title in discovery.find_pages(lang):
yield {'lang': lang, 'title': title}
@manager.command
def freeze():
freezer.freeze()
if __name__ == "__main__":
manager.run()
|
vinc3nt/freepto-web
|
manage.py
|
Python
|
gpl-2.0
| 842
|
#!/usr/bin/env python
from sqlalchemy import create_engine, and_, or_
from sqlalchemy.orm.session import sessionmaker
from rts2.db import Targets,Grb
Session = sessionmaker()
engine = create_engine('postgresql://petr:petr@localhost/stars',echo='debug')
Session.configure(bind=engine)
sess = Session()
targ = sess.query(Targets)
#q = sess.query(ApacheCatalog)
print targ.filter(Targets.tar_id == 1000).all()
print targ.filter(and_(Targets.tar_ra < 20, Targets.tar_dec < 0, Targets.tar_dec > -20)).all()
grb = sess.query(Grb)
print grb.filter(Grb.tar_id == 50001).all()
|
xyficu/rts2
|
python/testalchemy.py
|
Python
|
gpl-2.0
| 575
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from . import Rule
#-------------------------------------------------------------------------
#
# HasSourceOf
#
#-------------------------------------------------------------------------
class HasSourceOfBase(Rule):
"""Rule that checks for objects that have a particular source."""
labels = [ _('Source ID:') ]
name = 'Object with the <source>'
category = _('Citation/source filters')
description = 'Matches objects who have a particular source'
def prepare(self, db, user):
if self.list[0] == '':
self.source_handle = None
self.nosource = True
return
self.nosource = False
try:
self.source_handle = db.get_source_from_gramps_id(
self.list[0]).get_handle()
except:
self.source_handle = None
def apply(self, db, object):
if not self.source_handle:
if self.nosource:
# check whether the citation list is empty as a proxy for
# there being no sources
return len(object.get_all_citation_lists()) == 0
else:
return False
else:
for citation_handle in object.get_all_citation_lists():
citation = db.get_citation_from_handle(citation_handle)
if citation.get_reference_handle() == self.source_handle:
return True
return False
|
sam-m888/gramps
|
gramps/gen/filters/rules/_hassourceofbase.py
|
Python
|
gpl-2.0
| 2,721
|
import gc
import hashlib
import os
import os.path
import tempfile
import zipfile
import numpy as np
import pytest
import requests
from hyperspy import signals
from hyperspy.io import load
MY_PATH = os.path.dirname(__file__)
ZIPF = os.path.join(MY_PATH, "edax_files.zip")
TMP_DIR = tempfile.TemporaryDirectory()
TEST_FILES_OK = os.path.isfile(ZIPF)
REASON = ""
SHA256SUM = "e217c71efbd208da4b52e9cf483443f9da2175f2924a96447ed393086fe32008"
# The test files are not included in HyperSpy v1.4 because their file size is 36.5MB
# taking the HyperSpy source distribution file size above PyPI's 60MB limit.
# As a temporary solution, we attempt to download the test files from GitHub
# and skip the tests if the download fails.
if not TEST_FILES_OK:
try:
r = requests.get(
"https://github.com/hyperspy/hyperspy/blob/e7a323a3bb9b237c24bd9267d2cc4fcb31bb99f3/hyperspy/tests/io/edax_files.zip?raw=true")
SHA256SUM_GOT = hashlib.sha256(r.content).hexdigest()
if SHA256SUM_GOT == SHA256SUM:
with open(ZIPF, 'wb') as f:
f.write(r.content)
TEST_FILES_OK = True
else:
REASON = "wrong sha256sum of downloaded file. Expected: %s, got: %s" % SHA256SUM, SHA256SUM_GOT
except BaseException as e:
REASON = "download of EDAX test files failed: %s" % e
def setup_module():
if TEST_FILES_OK:
with zipfile.ZipFile(ZIPF, 'r') as zipped:
zipped.extractall(TMP_DIR.name)
pytestmark = pytest.mark.skipif(not TEST_FILES_OK,
reason=REASON)
def teardown_module():
TMP_DIR.cleanup()
class TestSpcSpectrum_v061_xrf:
@classmethod
def setup_class(cls):
cls.spc = load(os.path.join(TMP_DIR.name, "spc0_61-ipr333_xrf.spc"))
cls.spc_loadAll = load(os.path.join(TMP_DIR.name,
"spc0_61-ipr333_xrf.spc"),
load_all_spc=True)
@classmethod
def teardown_class(cls):
del cls.spc, cls.spc_loadAll
gc.collect()
def test_data(self):
# test datatype
assert np.uint32 == TestSpcSpectrum_v061_xrf.spc.data.dtype
# test data shape
assert (4000,) == TestSpcSpectrum_v061_xrf.spc.data.shape
# test 40 datapoints
assert (
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 319, 504, 639, 924,
1081, 1326, 1470, 1727, 1983, 2123, 2278, 2509, 2586, 2639,
2681, 2833, 2696, 2704, 2812, 2745, 2709, 2647, 2608, 2620,
2571, 2669] == TestSpcSpectrum_v061_xrf.spc.data[:40].tolist())
def test_parameters(self):
elements = TestSpcSpectrum_v061_xrf.spc.metadata.as_dictionary()[
'Sample']['elements']
sem_dict = TestSpcSpectrum_v061_xrf.spc.metadata.as_dictionary()[
'Acquisition_instrument']['SEM'] # this will eventually need to
# be changed when XRF-specific
# features are added
eds_dict = sem_dict['Detector']['EDS']
signal_dict = TestSpcSpectrum_v061_xrf.spc.metadata.as_dictionary()[
'Signal']
# Testing SEM parameters
np.testing.assert_allclose(30, sem_dict['beam_energy'])
np.testing.assert_allclose(0, sem_dict['Stage']['tilt_alpha'])
# Testing EDS parameters
np.testing.assert_allclose(45, eds_dict['azimuth_angle'])
np.testing.assert_allclose(35, eds_dict['elevation_angle'])
np.testing.assert_allclose(137.92946, eds_dict['energy_resolution_MnKa'],
atol=1E-5)
np.testing.assert_allclose(2561.0, eds_dict['live_time'], atol=1E-6)
# Testing elements
assert ({'Al', 'Ca', 'Cl', 'Cr', 'Fe', 'K', 'Mg', 'Mn', 'Si', 'Y'} ==
set(elements))
# Testing HyperSpy parameters
assert 'EDS_SEM' == signal_dict['signal_type']
assert isinstance(TestSpcSpectrum_v061_xrf.spc, signals.EDSSEMSpectrum)
def test_axes(self):
spc_ax_manager = {'axis-0': {'_type': 'UniformDataAxis',
'name': 'Energy',
'navigate': False,
'is_binned': True,
'offset': 0.0,
'scale': 0.01,
'size': 4000,
'units': 'keV'}}
assert (spc_ax_manager ==
TestSpcSpectrum_v061_xrf.spc.axes_manager.as_dictionary())
def test_load_all_spc(self):
spc_header = TestSpcSpectrum_v061_xrf.spc_loadAll.original_metadata[
'spc_header']
np.testing.assert_allclose(4, spc_header['analysisType'])
np.testing.assert_allclose(4, spc_header['analyzerType'])
np.testing.assert_allclose(2013, spc_header['collectDateYear'])
np.testing.assert_allclose(9, spc_header['collectDateMon'])
np.testing.assert_allclose(26, spc_header['collectDateDay'])
np.testing.assert_equal(b'Garnet1.', spc_header['fileName'].view('|S8')[0])
np.testing.assert_allclose(45, spc_header['xRayTubeZ'])
class TestSpcSpectrum_v070_eds:
@classmethod
def setup_class(cls):
cls.spc = load(os.path.join(TMP_DIR.name, "single_spect.spc"))
cls.spc_loadAll = load(os.path.join(TMP_DIR.name,
"single_spect.spc"),
load_all_spc=True)
@classmethod
def teardown_class(cls):
del cls.spc, cls.spc_loadAll
gc.collect()
def test_data(self):
# test datatype
assert np.uint32 == TestSpcSpectrum_v070_eds.spc.data.dtype
# test data shape
assert (4096,) == TestSpcSpectrum_v070_eds.spc.data.shape
# test 1st 20 datapoints
assert (
[0, 0, 0, 0, 0, 0, 1, 2, 3, 3, 10, 4, 10, 10, 45, 87, 146, 236,
312, 342] == TestSpcSpectrum_v070_eds.spc.data[:20].tolist())
def test_parameters(self):
elements = TestSpcSpectrum_v070_eds.spc.metadata.as_dictionary()[
'Sample']['elements']
sem_dict = TestSpcSpectrum_v070_eds.spc.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
signal_dict = TestSpcSpectrum_v070_eds.spc.metadata.as_dictionary()[
'Signal']
# Testing SEM parameters
np.testing.assert_allclose(22, sem_dict['beam_energy'])
np.testing.assert_allclose(0, sem_dict['Stage']['tilt_alpha'])
# Testing EDS parameters
np.testing.assert_allclose(0, eds_dict['azimuth_angle'])
np.testing.assert_allclose(34, eds_dict['elevation_angle'])
np.testing.assert_allclose(129.31299, eds_dict['energy_resolution_MnKa'],
atol=1E-5)
np.testing.assert_allclose(50.000004, eds_dict['live_time'], atol=1E-6)
# Testing elements
assert ({'Al', 'C', 'Ce', 'Cu', 'F', 'Ho', 'Mg', 'O'} ==
set(elements))
# Testing HyperSpy parameters
assert 'EDS_SEM' == signal_dict['signal_type']
assert isinstance(TestSpcSpectrum_v070_eds.spc, signals.EDSSEMSpectrum)
def test_axes(self):
spc_ax_manager = {'axis-0': {'_type': 'UniformDataAxis',
'name': 'Energy',
'navigate': False,
'is_binned': True,
'offset': 0.0,
'scale': 0.01,
'size': 4096,
'units': 'keV'}}
assert (spc_ax_manager ==
TestSpcSpectrum_v070_eds.spc.axes_manager.as_dictionary())
def test_load_all_spc(self):
spc_header = TestSpcSpectrum_v070_eds.spc_loadAll.original_metadata[
'spc_header']
np.testing.assert_allclose(4, spc_header['analysisType'])
np.testing.assert_allclose(5, spc_header['analyzerType'])
np.testing.assert_allclose(2016, spc_header['collectDateYear'])
np.testing.assert_allclose(4, spc_header['collectDateMon'])
np.testing.assert_allclose(19, spc_header['collectDateDay'])
np.testing.assert_equal(b'C:\\ProgramData\\EDAX\\jtaillon\\Cole\\Mapping\\Lsm\\'
b'GFdCr\\950\\Area 1\\spectrum20160419153851427_0.spc',
spc_header['longFileName'].view('|S256')[0])
np.testing.assert_allclose(0, spc_header['xRayTubeZ'])
class TestSpdMap_070_eds:
@classmethod
def setup_class(cls):
cls.spd = load(os.path.join(TMP_DIR.name, "spd_map.spd"),
convert_units=True)
@classmethod
def teardown_class(cls):
del cls.spd
gc.collect()
def test_data(self):
# test d_type
assert np.uint16 == TestSpdMap_070_eds.spd.data.dtype
# test d_shape
assert (200, 256, 2500) == TestSpdMap_070_eds.spd.data.shape
assert ([[[0, 0, 0, 0, 0], # test random data
[0, 0, 1, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0]]] ==
TestSpdMap_070_eds.spd.data[15:20, 15:20, 15:20].tolist())
def test_parameters(self):
elements = TestSpdMap_070_eds.spd.metadata.as_dictionary()[
'Sample']['elements']
sem_dict = TestSpdMap_070_eds.spd.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
signal_dict = TestSpdMap_070_eds.spd.metadata.as_dictionary()['Signal']
# Testing SEM parameters
np.testing.assert_allclose(22, sem_dict['beam_energy'])
np.testing.assert_allclose(0, sem_dict['Stage']['tilt_alpha'])
# Testing EDS parameters
np.testing.assert_allclose(0, eds_dict['azimuth_angle'])
np.testing.assert_allclose(34, eds_dict['elevation_angle'])
np.testing.assert_allclose(126.60252, eds_dict['energy_resolution_MnKa'],
atol=1E-5)
np.testing.assert_allclose(2621.4399, eds_dict['live_time'], atol=1E-4)
# Testing elements
assert {'Ce', 'Co', 'Cr', 'Fe', 'Gd', 'La', 'Mg', 'O',
'Sr'} == set(elements)
# Testing HyperSpy parameters
assert 'EDS_SEM' == signal_dict['signal_type']
assert isinstance(TestSpdMap_070_eds.spd, signals.EDSSEMSpectrum)
def test_axes(self):
spd_ax_manager = {'axis-0': {'_type': 'UniformDataAxis',
'name': 'y',
'navigate': True,
'is_binned': False,
'offset': 0.0,
'scale': 14.227345585823057,
'size': 200,
'units': 'nm'},
'axis-1': {'_type': 'UniformDataAxis',
'name': 'x',
'navigate': True,
'is_binned': False,
'offset': 0.0,
'scale': 14.235896058380602,
'size': 256,
'units': 'nm'},
'axis-2': {'_type': 'UniformDataAxis',
'name': 'Energy',
'navigate': False,
'is_binned': True,
'offset': 0.0,
'scale': 0.0050000000000000001,
'size': 2500,
'units': 'keV'}}
assert (spd_ax_manager ==
TestSpdMap_070_eds.spd.axes_manager.as_dictionary())
def test_ipr_reading(self):
ipr_header = TestSpdMap_070_eds.spd.original_metadata['ipr_header']
np.testing.assert_allclose(0.014235896, ipr_header['mppX'])
np.testing.assert_allclose(0.014227346, ipr_header['mppY'])
def test_spc_reading(self):
# Test to make sure that spc metadata matches spd metadata
spc_header = TestSpdMap_070_eds.spd.original_metadata['spc_header']
elements = TestSpdMap_070_eds.spd.metadata.as_dictionary()[
'Sample']['elements']
sem_dict = TestSpdMap_070_eds.spd.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
np.testing.assert_allclose(spc_header.azimuth,
eds_dict['azimuth_angle'])
np.testing.assert_allclose(spc_header.detReso,
eds_dict['energy_resolution_MnKa'])
np.testing.assert_allclose(spc_header.elevation,
eds_dict['elevation_angle'])
np.testing.assert_allclose(spc_header.liveTime,
eds_dict['live_time'])
np.testing.assert_allclose(spc_header.evPerChan,
TestSpdMap_070_eds.spd.axes_manager[2].scale * 1000)
np.testing.assert_allclose(spc_header.kV,
sem_dict['beam_energy'])
np.testing.assert_allclose(spc_header.numElem,
len(elements))
class TestSpdMap_061_xrf:
@classmethod
def setup_class(cls):
cls.spd = load(os.path.join(TMP_DIR.name, "spc0_61-ipr333_xrf.spd"),
convert_units=True)
@classmethod
def teardown_class(cls):
del cls.spd
gc.collect()
def test_data(self):
# test d_type
assert np.uint16 == TestSpdMap_061_xrf.spd.data.dtype
# test d_shape
assert (200, 256, 2000) == TestSpdMap_061_xrf.spd.data.shape
assert ([[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]],
[[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]] ==
TestSpdMap_061_xrf.spd.data[15:20, 15:20, 15:20].tolist())
def test_parameters(self):
elements = TestSpdMap_061_xrf.spd.metadata.as_dictionary()['Sample'][
'elements']
sem_dict = TestSpdMap_061_xrf.spd.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
signal_dict = TestSpdMap_061_xrf.spd.metadata.as_dictionary()['Signal']
# Testing SEM parameters
np.testing.assert_allclose(30, sem_dict['beam_energy'])
np.testing.assert_allclose(0, sem_dict['Stage']['tilt_alpha'])
# Testing EDS parameters
np.testing.assert_allclose(45, eds_dict['azimuth_angle'])
np.testing.assert_allclose(35, eds_dict['elevation_angle'])
np.testing.assert_allclose(137.92946, eds_dict['energy_resolution_MnKa'],
atol=1E-5)
np.testing.assert_allclose(2561.0, eds_dict['live_time'], atol=1E-4)
# Testing elements
assert {'Al', 'Ca', 'Cl', 'Cr', 'Fe', 'K', 'Mg', 'Mn', 'Si',
'Y'} == set(elements)
# Testing HyperSpy parameters
assert 'EDS_SEM' == signal_dict['signal_type']
assert isinstance(TestSpdMap_061_xrf.spd, signals.EDSSEMSpectrum)
def test_axes(self):
spd_ax_manager = {'axis-0': {'_type': 'UniformDataAxis',
'name': 'y',
'navigate': True,
'is_binned': False,
'offset': 0.0,
'scale': 0.5651920166015625,
'size': 200,
'units': 'mm'},
'axis-1': {'_type': 'UniformDataAxis',
'name': 'x',
'navigate': True,
'is_binned': False,
'offset': 0.0,
'scale': 0.5651920166015625,
'size': 256,
'units': 'mm'},
'axis-2': {'_type': 'UniformDataAxis',
'name': 'Energy',
'navigate': False,
'is_binned': True,
'offset': 0.0,
'scale': 0.01,
'size': 2000,
'units': 'keV'}}
assert (spd_ax_manager ==
TestSpdMap_061_xrf.spd.axes_manager.as_dictionary())
def test_ipr_reading(self):
ipr_header = TestSpdMap_061_xrf.spd.original_metadata['ipr_header']
np.testing.assert_allclose(565.1920166015625, ipr_header['mppX'])
np.testing.assert_allclose(565.1920166015625, ipr_header['mppY'])
def test_spc_reading(self):
# Test to make sure that spc metadata matches spd_061_xrf metadata
spc_header = TestSpdMap_061_xrf.spd.original_metadata['spc_header']
elements = TestSpdMap_061_xrf.spd.metadata.as_dictionary()['Sample'][
'elements']
sem_dict = TestSpdMap_061_xrf.spd.metadata.as_dictionary()[
'Acquisition_instrument']['SEM']
eds_dict = sem_dict['Detector']['EDS']
np.testing.assert_allclose(spc_header.azimuth,
eds_dict['azimuth_angle'])
np.testing.assert_allclose(spc_header.detReso,
eds_dict['energy_resolution_MnKa'])
np.testing.assert_allclose(spc_header.elevation,
eds_dict['elevation_angle'])
np.testing.assert_allclose(spc_header.liveTime,
eds_dict['live_time'])
np.testing.assert_allclose(spc_header.evPerChan,
TestSpdMap_061_xrf.spd.axes_manager[2].scale * 1000)
np.testing.assert_allclose(spc_header.kV,
sem_dict['beam_energy'])
np.testing.assert_allclose(spc_header.numElem,
len(elements))
|
thomasaarholt/hyperspy
|
hyperspy/tests/io/test_edax.py
|
Python
|
gpl-3.0
| 19,779
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re,traceback,sys
from waflib import Utils,ansiterm
if not os.environ.get('NOSYNC',False):
if sys.stdout.isatty()and id(sys.stdout)==id(sys.__stdout__):
sys.stdout=ansiterm.AnsiTerm(sys.stdout)
if sys.stderr.isatty()and id(sys.stderr)==id(sys.__stderr__):
sys.stderr=ansiterm.AnsiTerm(sys.stderr)
import logging
LOG_FORMAT=os.environ.get('WAF_LOG_FORMAT','%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s')
HOUR_FORMAT=os.environ.get('WAF_HOUR_FORMAT','%H:%M:%S')
zones=[]
verbose=0
colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;31m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','GREY':'\x1b[37m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',}
indicator='\r\x1b[K%s%s%s'
try:
unicode
except NameError:
unicode=None
def enable_colors(use):
if use==1:
if not(sys.stderr.isatty()or sys.stdout.isatty()):
use=0
if Utils.is_win32 and os.name!='java':
term=os.environ.get('TERM','')
else:
term=os.environ.get('TERM','dumb')
if term in('dumb','emacs'):
use=0
if use>=1:
os.environ['TERM']='vt100'
colors_lst['USE']=use
try:
get_term_cols=ansiterm.get_term_cols
except AttributeError:
def get_term_cols():
return 80
get_term_cols.__doc__="""
Returns the console width in characters.
:return: the number of characters per line
:rtype: int
"""
def get_color(cl):
if colors_lst['USE']:
return colors_lst.get(cl,'')
return''
class color_dict(object):
def __getattr__(self,a):
return get_color(a)
def __call__(self,a):
return get_color(a)
colors=color_dict()
re_log=re.compile(r'(\w+): (.*)',re.M)
class log_filter(logging.Filter):
def __init__(self,name=''):
logging.Filter.__init__(self,name)
def filter(self,rec):
global verbose
rec.zone=rec.module
if rec.levelno>=logging.INFO:
return True
m=re_log.match(rec.msg)
if m:
rec.zone=m.group(1)
rec.msg=m.group(2)
if zones:
return getattr(rec,'zone','')in zones or'*'in zones
elif not verbose>2:
return False
return True
class log_handler(logging.StreamHandler):
def emit(self,record):
try:
try:
self.stream=record.stream
except AttributeError:
if record.levelno>=logging.WARNING:
record.stream=self.stream=sys.stderr
else:
record.stream=self.stream=sys.stdout
self.emit_override(record)
self.flush()
except(KeyboardInterrupt,SystemExit):
raise
except:
self.handleError(record)
def emit_override(self,record,**kw):
self.terminator=getattr(record,'terminator','\n')
stream=self.stream
if unicode:
msg=self.formatter.format(record)
fs='%s'+self.terminator
try:
if(isinstance(msg,unicode)and getattr(stream,'encoding',None)):
fs=fs.decode(stream.encoding)
try:
stream.write(fs%msg)
except UnicodeEncodeError:
stream.write((fs%msg).encode(stream.encoding))
else:
stream.write(fs%msg)
except UnicodeError:
stream.write((fs%msg).encode('utf-8'))
else:
logging.StreamHandler.emit(self,record)
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT)
def format(self,rec):
try:
msg=rec.msg.decode('utf-8')
except Exception:
msg=rec.msg
use=colors_lst['USE']
if(use==1 and rec.stream.isatty())or use==2:
c1=getattr(rec,'c1',None)
if c1 is None:
c1=''
if rec.levelno>=logging.ERROR:
c1=colors.RED
elif rec.levelno>=logging.WARNING:
c1=colors.YELLOW
elif rec.levelno>=logging.INFO:
c1=colors.GREEN
c2=getattr(rec,'c2',colors.NORMAL)
msg='%s%s%s'%(c1,msg,c2)
else:
msg=re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))','',msg)
if rec.levelno>=logging.INFO:
if rec.args:
return msg%rec.args
return msg
rec.msg=msg
rec.c1=colors.PINK
rec.c2=colors.NORMAL
return logging.Formatter.format(self,rec)
log=None
def debug(*k,**kw):
global verbose
if verbose:
k=list(k)
k[0]=k[0].replace('\n',' ')
global log
log.debug(*k,**kw)
def error(*k,**kw):
global log,verbose
log.error(*k,**kw)
if verbose>2:
st=traceback.extract_stack()
if st:
st=st[:-1]
buf=[]
for filename,lineno,name,line in st:
buf.append(' File %r, line %d, in %s'%(filename,lineno,name))
if line:
buf.append(' %s'%line.strip())
if buf:log.error('\n'.join(buf))
def warn(*k,**kw):
global log
log.warn(*k,**kw)
def info(*k,**kw):
global log
log.info(*k,**kw)
def init_log():
global log
log=logging.getLogger('waflib')
log.handlers=[]
log.filters=[]
hdlr=log_handler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
def make_logger(path,name):
logger=logging.getLogger(name)
hdlr=logging.FileHandler(path,'w')
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def make_mem_logger(name,to_log,size=8192):
from logging.handlers import MemoryHandler
logger=logging.getLogger(name)
hdlr=MemoryHandler(size,target=to_log)
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.memhandler=hdlr
logger.setLevel(logging.DEBUG)
return logger
def free_logger(logger):
try:
for x in logger.handlers:
x.close()
logger.removeHandler(x)
except Exception:
pass
def pprint(col,msg,label='',sep='\n'):
global info
info('%s%s%s %s',colors(col),msg,colors.NORMAL,label,extra={'terminator':sep})
|
Gnurou/glmark2
|
waflib/Logs.py
|
Python
|
gpl-3.0
| 5,584
|
# Copyright (C) 2013, Walter Bender - Raul Gutierrez Segales
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gettext import gettext as _
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Gdk
from jarabe.webservice.accountsmanager import get_webaccount_services
from jarabe.controlpanel.sectionview import SectionView
from sugar3.graphics.icon import CanvasIcon, Icon
from sugar3.graphics import style
def get_service_name(service):
if hasattr(service, '_account'):
if hasattr(service._account, 'get_description'):
return service._account.get_description()
return ''
class WebServicesConfig(SectionView):
def __init__(self, model, alerts):
SectionView.__init__(self)
self._model = model
self.restart_alerts = alerts
services = get_webaccount_services()
grid = Gtk.Grid()
if len(services) == 0:
grid.set_row_spacing(style.DEFAULT_SPACING)
icon = Icon(pixel_size=style.LARGE_ICON_SIZE,
icon_name='module-webaccount',
stroke_color=style.COLOR_BUTTON_GREY.get_svg(),
fill_color=style.COLOR_TRANSPARENT.get_svg())
grid.attach(icon, 0, 0, 1, 1)
icon.show()
label = Gtk.Label()
label.set_justify(Gtk.Justification.CENTER)
label.set_markup(
'<span foreground="%s" size="large">%s</span>'
% (style.COLOR_BUTTON_GREY.get_html(),
GLib.markup_escape_text(
_('No web services are installed.\n'
'Please visit %s for more details.' %
'http://wiki.sugarlabs.org/go/WebServices'))))
label.show()
grid.attach(label, 0, 1, 1, 1)
alignment = Gtk.Alignment.new(0.5, 0.5, 0.1, 0.1)
alignment.add(grid)
grid.show()
self.add(alignment)
alignment.show()
return
grid.set_row_spacing(style.DEFAULT_SPACING * 4)
grid.set_column_spacing(style.DEFAULT_SPACING * 4)
grid.set_border_width(style.DEFAULT_SPACING * 2)
grid.set_column_homogeneous(True)
width = Gdk.Screen.width() - 2 * style.GRID_CELL_SIZE
nx = int(width / (style.GRID_CELL_SIZE + style.DEFAULT_SPACING * 4))
self._service_config_box = Gtk.VBox()
x = 0
y = 0
for service in services:
service_grid = Gtk.Grid()
icon = CanvasIcon(icon_name=service.get_icon_name())
icon.show()
service_grid.attach(icon, x, y, 1, 1)
icon.connect('activate', service.config_service_cb, None,
self._service_config_box)
label = Gtk.Label()
label.set_justify(Gtk.Justification.CENTER)
name = get_service_name(service)
label.set_markup(name)
service_grid.attach(label, x, y + 1, 1, 1)
label.show()
grid.attach(service_grid, x, y, 1, 1)
service_grid.show()
x += 1
if x == nx:
x = 0
y += 1
alignment = Gtk.Alignment.new(0.5, 0, 0, 0)
alignment.add(grid)
grid.show()
vbox = Gtk.VBox()
vbox.pack_start(alignment, False, False, 0)
alignment.show()
scrolled = Gtk.ScrolledWindow()
vbox.pack_start(scrolled, True, True, 0)
self.add(vbox)
scrolled.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scrolled.show()
workspace = Gtk.VBox()
scrolled.add_with_viewport(workspace)
workspace.show()
workspace.add(self._service_config_box)
workspace.show_all()
vbox.show()
def undo(self):
pass
|
icarito/sugar
|
extensions/cpsection/webaccount/view.py
|
Python
|
gpl-3.0
| 4,477
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: nxos_linkagg
extends_documentation_fragment: nxos
version_added: "2.5"
short_description: Manage link aggregation groups on Cisco NXOS devices.
description:
- This module provides declarative management of link aggregation groups
on Cisco NXOS devices.
author:
- Trishna Guha (@trishnaguha)
notes:
- Tested against NXOSv 7.0(3)I5(1).
- C(state=absent) removes the portchannel config and interface if it
already exists. If members to be removed are not explicitly
passed, all existing members (if any), are removed.
- Members must be a list.
- LACP needs to be enabled first if active/passive modes are used.
options:
group:
description:
- Channel-group number for the port-channel
Link aggregation group.
required: true
mode:
description:
- Mode for the link aggregation group.
choices: [ active, 'on', passive ]
default: 'on'
min_links:
description:
- Minimum number of ports required up
before bringing up the link aggregation group.
members:
description:
- List of interfaces that will be managed in the link aggregation group.
force:
description:
- When true it forces link aggregation group members to match what
is declared in the members param. This can be used to remove members.
type: bool
default: 'no'
aggregate:
description: List of link aggregation definitions.
state:
description:
- State of the link aggregation group.
default: present
choices: ['present','absent']
purge:
description:
- Purge links not defined in the I(aggregate) parameter.
type: bool
default: 'no'
"""
EXAMPLES = """
- name: create link aggregation group
nxos_linkagg:
group: 99
state: present
- name: delete link aggregation group
nxos_linkagg:
group: 99
state: absent
- name: set link aggregation group to members
nxos_linkagg:
group: 10
min_links: 3
mode: active
members:
- Ethernet1/2
- Ethernet1/4
- name: remove link aggregation group from Ethernet1/2
nxos_linkagg:
group: 10
min_links: 3
mode: active
members:
- Ethernet1/4
- name: Create aggregate of linkagg definitions
nxos_linkagg:
aggregate:
- { group: 3 }
- { group: 100, min_links: 3 }
- name: Remove aggregate of linkagg definitions
nxos_linkagg:
aggregate:
- { group: 3 }
- { group: 100, min_links: 3 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- interface port-channel 30
- lacp min-links 5
- interface Ethernet2/1
- channel-group 30 mode active
- no interface port-channel 30
"""
import re
from copy import deepcopy
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities, nxos_argument_spec
from ansible.module_utils.network.nxos.nxos import normalize_interface
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
def search_obj_in_list(group, lst):
for o in lst:
if o['group'] == group:
return o
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
purge = module.params['purge']
force = module.params['force']
for w in want:
group = w['group']
mode = w['mode']
min_links = w['min_links']
members = w.get('members') or []
state = w['state']
del w['state']
obj_in_have = search_obj_in_list(group, have)
if state == 'absent':
if obj_in_have:
members_to_remove = list(set(obj_in_have['members']) - set(members))
if members_to_remove:
for m in members_to_remove:
commands.append('interface {0}'.format(m))
commands.append('no channel-group {0}'.format(obj_in_have['group']))
commands.append('exit')
commands.append('no interface port-channel {0}'.format(group))
elif state == 'present':
if not obj_in_have:
commands.append('interface port-channel {0}'.format(group))
if min_links != 'None':
commands.append('lacp min-links {0}'.format(min_links))
commands.append('exit')
if members:
for m in members:
commands.append('interface {0}'.format(m))
if force:
commands.append('channel-group {0} force mode {1}'.format(group, mode))
else:
commands.append('channel-group {0} mode {1}'.format(group, mode))
else:
if members:
if not obj_in_have['members']:
for m in members:
commands.append('interface port-channel {0}'.format(group))
commands.append('exit')
commands.append('interface {0}'.format(m))
if force:
commands.append('channel-group {0} force mode {1}'.format(group, mode))
else:
commands.append('channel-group {0} mode {1}'.format(group, mode))
elif set(members) != set(obj_in_have['members']):
missing_members = list(set(members) - set(obj_in_have['members']))
for m in missing_members:
commands.append('interface port-channel {0}'.format(group))
commands.append('exit')
commands.append('interface {0}'.format(m))
if force:
commands.append('channel-group {0} force mode {1}'.format(group, mode))
else:
commands.append('channel-group {0} mode {1}'.format(group, mode))
superfluous_members = list(set(obj_in_have['members']) - set(members))
for m in superfluous_members:
commands.append('interface port-channel {0}'.format(group))
commands.append('exit')
commands.append('interface {0}'.format(m))
commands.append('no channel-group {0}'.format(group))
if purge:
for h in have:
obj_in_want = search_obj_in_list(h['group'], want)
if not obj_in_want:
commands.append('no interface port-channel {0}'.format(h['group']))
return commands
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
d['group'] = str(d['group'])
d['min_links'] = str(d['min_links'])
if d['members']:
d['members'] = [normalize_interface(i) for i in d['members']]
obj.append(d)
else:
members = None
if module.params['members']:
members = [normalize_interface(i) for i in module.params['members']]
obj.append({
'group': str(module.params['group']),
'mode': module.params['mode'],
'min_links': str(module.params['min_links']),
'members': members,
'state': module.params['state']
})
return obj
def parse_min_links(module, group):
min_links = None
flags = ['| section interface.port-channel{0}'.format(group)]
config = get_config(module, flags=flags)
match = re.search(r'lacp min-links (\S+)', config, re.M)
if match:
min_links = match.group(1)
return min_links
def parse_mode(module, m):
mode = None
flags = ['| section interface.{0}'.format(m)]
config = get_config(module, flags=flags)
match = re.search(r'mode (\S+)', config, re.M)
if match:
mode = match.group(1)
return mode
def get_members(channel):
members = []
if 'TABLE_member' in channel.keys():
interfaces = channel['TABLE_member']['ROW_member']
else:
return list()
if isinstance(interfaces, dict):
members.append(normalize_interface(interfaces.get('port')))
elif isinstance(interfaces, list):
for i in interfaces:
members.append(normalize_interface(i.get('port')))
return members
def parse_members(output, group):
channels = output['TABLE_channel']['ROW_channel']
if isinstance(channels, list):
for channel in channels:
if channel['group'] == group:
members = get_members(channel)
elif isinstance(channels, dict):
if channels['group'] == group:
members = get_members(channels)
else:
return list()
return members
def parse_channel_options(module, output, channel):
obj = {}
group = channel['group']
obj['group'] = group
obj['min-links'] = parse_min_links(module, group)
members = parse_members(output, group)
obj['members'] = members
for m in members:
obj['mode'] = parse_mode(module, m)
return obj
def map_config_to_obj(module):
objs = list()
output = run_commands(module, ['show port-channel summary | json'])[0]
if not output:
return list()
try:
channels = output['TABLE_channel']['ROW_channel']
except (TypeError, KeyError):
return objs
if channels:
if isinstance(channels, list):
for channel in channels:
obj = parse_channel_options(module, output, channel)
objs.append(obj)
elif isinstance(channels, dict):
obj = parse_channel_options(module, output, channels)
objs.append(obj)
return objs
def main():
""" main entry point for module execution
"""
element_spec = dict(
group=dict(type='int'),
mode=dict(required=False, choices=['on', 'active', 'passive'], default='on', type='str'),
min_links=dict(required=False, default=None, type='int'),
members=dict(required=False, default=None, type='list'),
force=dict(required=False, default=False, type='bool'),
state=dict(required=False, choices=['absent', 'present'], default='present')
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['group'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
purge=dict(default=False, type='bool')
)
argument_spec.update(element_spec)
argument_spec.update(nxos_argument_spec)
required_one_of = [['group', 'aggregate']]
mutually_exclusive = [['group', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
fxfitz/ansible
|
lib/ansible/modules/network/nxos/nxos_linkagg.py
|
Python
|
gpl-3.0
| 12,502
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.exceptions import ValidationError
from odoo import models, fields, api, _
from odoo.osv import expression
SII_VAT = '60805000-0'
class AccountMove(models.Model):
_inherit = "account.move"
partner_id_vat = fields.Char(related='partner_id.vat', string='VAT No')
l10n_latam_internal_type = fields.Selection(
related='l10n_latam_document_type_id.internal_type', string='L10n Latam Internal Type')
def _get_l10n_latam_documents_domain(self):
self.ensure_one()
if self.journal_id.company_id.account_fiscal_country_id != self.env.ref('base.cl') or not \
self.journal_id.l10n_latam_use_documents:
return super()._get_l10n_latam_documents_domain()
if self.journal_id.type == 'sale':
domain = [('country_id.code', '=', "CL"), ('internal_type', '!=', 'invoice_in')]
if self.company_id.partner_id.l10n_cl_sii_taxpayer_type == '1':
domain += [('code', '!=', '71')] # Companies with VAT Affected doesn't have "Boleta de honorarios Electrónica"
return domain
domain = [
('country_id.code', '=', 'CL'),
('internal_type', 'in', ['invoice', 'debit_note', 'credit_note', 'invoice_in'])]
if self.partner_id.l10n_cl_sii_taxpayer_type == '1' and self.partner_id_vat != '60805000-0':
domain += [('code', 'not in', ['39', '70', '71', '914', '911'])]
elif self.partner_id.l10n_cl_sii_taxpayer_type == '1' and self.partner_id_vat == '60805000-0':
domain += [('code', 'not in', ['39', '70', '71'])]
if self.move_type == 'in_invoice':
domain += [('internal_type', '!=', 'credit_note')]
elif self.partner_id.l10n_cl_sii_taxpayer_type == '2':
domain += [('code', 'in', ['70', '71', '56', '61'])]
elif self.partner_id.l10n_cl_sii_taxpayer_type == '3':
domain += [('code', 'in', ['35', '38', '39', '41', '56', '61'])]
elif not self.partner_id.l10n_cl_sii_taxpayer_type or self.partner_id.country_id != self.env.ref(
'base.cl') or self.partner_id.l10n_cl_sii_taxpayer_type == '4':
domain += [('code', 'in', [])]
return domain
def _check_document_types_post(self):
for rec in self.filtered(
lambda r: r.company_id.account_fiscal_country_id.code == "CL" and
r.journal_id.type in ['sale', 'purchase']):
tax_payer_type = rec.partner_id.l10n_cl_sii_taxpayer_type
vat = rec.partner_id.vat
country_id = rec.partner_id.country_id
latam_document_type_code = rec.l10n_latam_document_type_id.code
if (not tax_payer_type or not vat) and (country_id.code == "CL" and latam_document_type_code
and latam_document_type_code not in ['35', '38', '39', '41']):
raise ValidationError(_('Tax payer type and vat number are mandatory for this type of '
'document. Please set the current tax payer type of this customer'))
if rec.journal_id.type == 'sale' and rec.journal_id.l10n_latam_use_documents:
if country_id.code != "CL":
if not ((tax_payer_type == '4' and latam_document_type_code in ['110', '111', '112']) or (
tax_payer_type == '3' and latam_document_type_code in ['39', '41', '61', '56'])):
raise ValidationError(_(
'Document types for foreign customers must be export type (codes 110, 111 or 112) or you \
should define the customer as an end consumer and use receipts (codes 39 or 41)'))
if rec.journal_id.type == 'purchase' and rec.journal_id.l10n_latam_use_documents:
if vat != SII_VAT and latam_document_type_code == '914':
raise ValidationError(_('The DIN document is intended to be used only with RUT 60805000-0'
' (Tesorería General de La República)'))
if not tax_payer_type or not vat:
if country_id.code == "CL" and latam_document_type_code not in [
'35', '38', '39', '41']:
raise ValidationError(_('Tax payer type and vat number are mandatory for this type of '
'document. Please set the current tax payer type of this supplier'))
if tax_payer_type == '2' and latam_document_type_code not in ['70', '71', '56', '61']:
raise ValidationError(_('The tax payer type of this supplier is incorrect for the selected type'
' of document.'))
if tax_payer_type in ['1', '3']:
if latam_document_type_code in ['70', '71']:
raise ValidationError(_('The tax payer type of this supplier is not entitled to deliver '
'fees documents'))
if latam_document_type_code in ['110', '111', '112']:
raise ValidationError(_('The tax payer type of this supplier is not entitled to deliver '
'imports documents'))
if tax_payer_type == '4' or country_id.code != "CL":
raise ValidationError(_('You need a journal without the use of documents for foreign '
'suppliers'))
@api.onchange('journal_id')
def _l10n_cl_onchange_journal(self):
if self.company_id.country_id.code == 'CL':
self.l10n_latam_document_type_id = False
def _post(self, soft=True):
self._check_document_types_post()
return super()._post(soft)
def _l10n_cl_get_formatted_sequence(self, number=0):
return '%s %06d' % (self.l10n_latam_document_type_id.doc_code_prefix, number)
def _get_starting_sequence(self):
""" If use documents then will create a new starting sequence using the document type code prefix and the
journal document number with a 6 padding number """
if self.journal_id.l10n_latam_use_documents and self.env.company.account_fiscal_country_id.code == "CL":
if self.l10n_latam_document_type_id:
return self._l10n_cl_get_formatted_sequence()
return super()._get_starting_sequence()
def _get_last_sequence_domain(self, relaxed=False):
where_string, param = super(AccountMove, self)._get_last_sequence_domain(relaxed)
if self.company_id.account_fiscal_country_id.code == "CL" and self.l10n_latam_use_documents:
where_string = where_string.replace('journal_id = %(journal_id)s AND', '')
where_string += ' AND l10n_latam_document_type_id = %(l10n_latam_document_type_id)s AND ' \
'company_id = %(company_id)s AND move_type IN %(move_type)s'
param['company_id'] = self.company_id.id or False
param['l10n_latam_document_type_id'] = self.l10n_latam_document_type_id.id or 0
param['move_type'] = (('in_invoice', 'in_refund') if
self.l10n_latam_document_type_id._is_doc_type_vendor() else ('out_invoice', 'out_refund'))
return where_string, param
def _get_name_invoice_report(self):
self.ensure_one()
if self.l10n_latam_use_documents and self.company_id.account_fiscal_country_id.code == 'CL':
return 'l10n_cl.report_invoice_document'
return super()._get_name_invoice_report()
def _l10n_cl_get_invoice_totals_for_report(self):
self.ensure_one()
tax_ids_filter = tax_line_id_filter = None
include_sii = self._l10n_cl_include_sii()
if include_sii:
tax_ids_filter = (lambda aml, tax: bool(tax.l10n_cl_sii_code != 14))
tax_line_id_filter = (lambda aml, tax: bool(tax.l10n_cl_sii_code != 14))
tax_lines_data = self._prepare_tax_lines_data_for_totals_from_invoice(
tax_ids_filter=tax_ids_filter, tax_line_id_filter=tax_line_id_filter)
if include_sii:
amount_untaxed = self.currency_id.round(
self.amount_total - sum([x['tax_amount'] for x in tax_lines_data if 'tax_amount' in x]))
else:
amount_untaxed = self.amount_untaxed
return self._get_tax_totals(self.partner_id, tax_lines_data, self.amount_total, amount_untaxed, self.currency_id)
def _l10n_cl_include_sii(self):
self.ensure_one()
return self.l10n_latam_document_type_id.code in ['39', '41', '110', '111', '112', '34']
def _is_manual_document_number(self):
if self.journal_id.company_id.country_id.code == 'CL':
return self.journal_id.type == 'purchase' and not self.l10n_latam_document_type_id._is_doc_type_vendor()
return super()._is_manual_document_number()
|
jeremiahyan/odoo
|
addons/l10n_cl/models/account_move.py
|
Python
|
gpl-3.0
| 9,118
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
from frappe.contacts.address_and_contact import load_address_and_contact
STANDARD_USERS = ("Guest", "Administrator")
class Member(Document):
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self)
def validate(self):
if self.name not in STANDARD_USERS:
self.validate_email_type(self.email)
self.validate_email_type(self.name)
def validate_email_type(self, email):
from frappe.utils import validate_email_add
validate_email_add(email.strip(), True)
|
indictranstech/erpnext
|
erpnext/non_profit/doctype/member/member.py
|
Python
|
agpl-3.0
| 738
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import flt, get_datetime
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
from erpnext.manufacturing.doctype.production_order.production_order import make_stock_entry
from erpnext.stock.doctype.stock_entry import test_stock_entry
from erpnext.projects.doctype.time_log.time_log import OverProductionLoggedError
class TestProductionOrder(unittest.TestCase):
def check_planned_qty(self):
set_perpetual_inventory(0)
planned0 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty") or 0
pro_doc = frappe.copy_doc(test_records[0])
pro_doc.insert()
pro_doc.submit()
# add raw materials to stores
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="Stores - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="Stores - _TC", qty=100, incoming_rate=100)
# from stores to wip
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Material Transfer for Manufacture", 4))
for d in s.get("items"):
d.s_warehouse = "Stores - _TC"
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-02"
s.insert()
s.submit()
# from wip to fg
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 4))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-03"
s.insert()
s.submit()
self.assertEqual(frappe.db.get_value("Production Order", pro_doc.name,
"produced_qty"), 4)
planned1 = frappe.db.get_value("Bin", {"item_code": "_Test FG Item", "warehouse": "_Test Warehouse 1 - _TC"}, "planned_qty")
self.assertEqual(planned1 - planned0, 6)
return pro_doc
def test_over_production(self):
from erpnext.manufacturing.doctype.production_order.production_order import StockOverProductionError
pro_doc = self.check_planned_qty()
test_stock_entry.make_stock_entry(item_code="_Test Item",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
test_stock_entry.make_stock_entry(item_code="_Test Item Home Desktop 100",
target="_Test Warehouse - _TC", qty=100, incoming_rate=100)
s = frappe.get_doc(make_stock_entry(pro_doc.name, "Manufacture", 7))
s.fiscal_year = "_Test Fiscal Year 2013"
s.posting_date = "2013-01-04"
s.insert()
self.assertRaises(StockOverProductionError, s.submit)
def test_make_time_log(self):
from erpnext.manufacturing.doctype.production_order.production_order import make_time_log
from frappe.utils import cstr
from frappe.utils import time_diff_in_hours
prod_order = frappe.get_doc({
"doctype": "Production Order",
"production_item": "_Test FG Item 2",
"bom_no": "BOM/_Test FG Item 2/001",
"qty": 1,
"wip_warehouse": "_Test Warehouse - _TC",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"company": "_Test Company",
"planned_start_date": "2014-11-25 00:00:00"
})
prod_order.set_production_order_operations()
prod_order.insert()
prod_order.submit()
d = prod_order.operations[0]
d.completed_qty = flt(d.completed_qty)
time_log = make_time_log(prod_order.name, cstr(d.idx) + ". " + d.operation, \
d.planned_start_time, d.planned_end_time, prod_order.qty - d.completed_qty,
operation_id=d.name)
self.assertEqual(prod_order.name, time_log.production_order)
self.assertEqual((prod_order.qty - d.completed_qty), time_log.completed_qty)
self.assertEqual(time_diff_in_hours(d.planned_end_time, d.planned_start_time),time_log.hours)
time_log.save()
time_log.submit()
manufacturing_settings = frappe.get_doc({
"doctype": "Manufacturing Settings",
"allow_production_on_holidays": 0
})
manufacturing_settings.save()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Completed")
self.assertEqual(prod_order.operations[0].completed_qty, prod_order.qty)
self.assertEqual(get_datetime(prod_order.operations[0].actual_start_time), get_datetime(time_log.from_time))
self.assertEqual(get_datetime(prod_order.operations[0].actual_end_time), get_datetime(time_log.to_time))
self.assertEqual(prod_order.operations[0].actual_operation_time, 60)
self.assertEqual(prod_order.operations[0].actual_operating_cost, 100)
time_log.cancel()
prod_order.load_from_db()
self.assertEqual(prod_order.operations[0].status, "Pending")
self.assertEqual(flt(prod_order.operations[0].completed_qty), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operation_time), 0)
self.assertEqual(flt(prod_order.operations[0].actual_operating_cost), 0)
time_log2 = frappe.copy_doc(time_log)
time_log2.update({
"completed_qty": 10,
"from_time": "2014-11-26 00:00:00",
"to_time": "2014-11-26 00:00:00",
"docstatus": 0
})
self.assertRaises(OverProductionLoggedError, time_log2.save)
test_records = frappe.get_test_records('Production Order')
|
gangadhar-kadam/verve_test_erp
|
erpnext/manufacturing/doctype/production_order/test_production_order.py
|
Python
|
agpl-3.0
| 5,084
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RIlluminahumanmethylation450kannoIlmn12Hg19(RPackage):
"""Annotation for Illumina's 450k methylation arrays
Manifests and annotation for Illumina's 450k array data."""
# This package is available via bioconductor but there is no available git
# repository.
homepage = "https://bioconductor.org/packages/release/data/annotation/html/IlluminaHumanMethylation450kanno.ilmn12.hg19.html"
url = "https://bioconductor.org/packages/release/data/annotation/src/contrib/IlluminaHumanMethylation450kanno.ilmn12.hg19_0.6.0.tar.gz"
version('0.6.0', sha256='249b8fd62add3c95b5047b597cff0868d26a98862a47cebd656edcd175a73b15')
depends_on('r@3.3.0:', type=('build', 'run'))
depends_on('r-minfi@1.19.15:', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-illuminahumanmethylation450kanno-ilmn12-hg19/package.py
|
Python
|
lgpl-2.1
| 982
|
# pylint: disable=redefined-outer-name
import os
import pytest
from pylint import checkers
from pylint.lint import PyLinter
# pylint: disable=no-name-in-module
from pylint.testutils import MinimalTestReporter
@pytest.fixture
def linter(checker, register, enable, disable, reporter):
_linter = PyLinter()
_linter.set_reporter(reporter())
checkers.initialize(_linter)
if register:
register(_linter)
if checker:
_linter.register_checker(checker(_linter))
if disable:
for msg in disable:
_linter.disable(msg)
if enable:
for msg in enable:
_linter.enable(msg)
os.environ.pop('PYLINTRC', None)
return _linter
@pytest.fixture(scope='module')
def checker():
return None
@pytest.fixture(scope='module')
def register():
return None
@pytest.fixture(scope='module')
def enable():
return None
@pytest.fixture(scope='module')
def disable():
return None
@pytest.fixture(scope='module')
def reporter():
return MinimalTestReporter
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/pylint/test/conftest.py
|
Python
|
apache-2.0
| 1,039
|
from contextlib import nested
from contextlib import contextmanager
import mock
import testing as T
import types
from core import db
from core.settings import Settings
from core.mail import MailQueue
from core.util import get_servlet_urlspec
from core.xmppclient import XMPPQueue
import servlets.newpush
from servlets.newpush import NewPushServlet
from servlets.newpush import send_notifications
class NewPushServletTest(T.TestCase, T.ServletTestMixin):
def get_handlers(self):
return [get_servlet_urlspec(NewPushServlet)]
def test_newpush(self):
pushes = []
def on_db_return(success, db_results):
assert success
pushes.extend(db_results.fetchall())
with nested(
mock.patch.dict(db.Settings, T.MockedSettings),
mock.patch.object(NewPushServlet, "get_current_user", return_value = "jblack"),
mock.patch.object(NewPushServlet, "redirect"),
mock.patch.object(MailQueue, "enqueue_user_email"),
):
with mock.patch("%s.servlets.newpush.subprocess.call" % __name__) as mocked_call:
title = "BestPushInTheWorld"
branch = "jblack"
push_type = "regular"
uri = "/newpush?push-title=%s&branch=%s&push-type=%s" % (
title, branch, push_type
)
pushes = []
db.execute_cb(db.push_pushes.select(), on_db_return)
num_pushes_before = len(pushes)
response = self.fetch(uri)
assert response.error == None
pushes = []
db.execute_cb(db.push_pushes.select(), on_db_return)
num_pushes_after = len(pushes)
T.assert_equal(num_pushes_before + 1, num_pushes_after)
# There should be one call to nodebot after a push is created
T.assert_equal(servlets.newpush.subprocess.call.call_count, 1)
# Verify that we have a valid call to
# subprocess.call. Getting the arguments involves ugly
# mock magic
mocked_call.assert_called_once_with([
'/nail/sys/bin/nodebot',
'-i',
mock.ANY, # nickname
mock.ANY, # channel
mock.ANY, # msg
])
def call_on_db_complete(self, urgent=False):
mocked_self = mock.Mock()
mocked_self.check_db_results = mock.Mock(return_value=None)
mocked_self.redirect = mock.Mock(return_value=None)
mocked_self.pushtype = 'normal'
mocked_self.on_db_complete = types.MethodType(NewPushServlet.on_db_complete.im_func, mocked_self)
push = mock.Mock()
push.lastrowid = 0
no_watcher_req = {
'user': 'testuser',
'watchers': None,
}
watched_req = {
'user': 'testuser',
'watchers': 'testuser1,testuser2',
}
if urgent:
no_watcher_req['tags'] = 'urgent'
watched_req['tags'] = 'urgent'
mocked_self.pushtype = 'urgent'
reqs = [no_watcher_req, watched_req]
mocked_self.on_db_complete('success', [push, reqs])
@mock.patch('servlets.newpush.send_notifications')
def test_normal_people_on_db_complete(self, notify):
self.call_on_db_complete()
notify.called_once_with(set(['testuser', 'testuser1', 'testuser2']), mock.ANY, mock.ANY)
@mock.patch('servlets.newpush.send_notifications')
def test_urgent_people_on_db_complete(self, notify):
self.call_on_db_complete(urgent=True)
notify.called_once_with(set(['testuser', 'testuser1', 'testuser2']), mock.ANY, mock.ANY)
class NotificationsTestCase(T.TestCase):
@contextmanager
def mocked_notifications(self):
with mock.patch("%s.servlets.newpush.subprocess.call" % __name__) as mocked_call:
with mock.patch.object(MailQueue, "enqueue_user_email") as mocked_mail:
with mock.patch.object(XMPPQueue, "enqueue_user_xmpp") as mocked_xmpp:
yield mocked_call, mocked_mail, mocked_xmpp
def test_send_notifications(self):
"""New push sends notifications via IRC, XMPP and emails."""
self.people = ["fake_user1", "fake_user2"]
self.pushurl = "/fake_push_url?id=123"
self.pushtype = "fake_puth_type"
with self.mocked_notifications() as (mocked_call, mocked_mail, mocked_xmpp):
send_notifications(self.people, self.pushtype, self.pushurl)
url = "https://%s%s" % (Settings['main_app']['servername'], self.pushurl)
msg = "%s: %s push starting! %s" % (', '.join(self.people), self.pushtype, url)
mocked_call.assert_called_once_with([
'/nail/sys/bin/nodebot',
'-i',
Settings['irc']['nickname'],
Settings['irc']['channel'],
msg
])
mocked_mail.assert_called_once_with(
Settings['mail']['notifyall'],
msg,
mock.ANY, # subject
)
mocked_xmpp.assert_called_once_with(
self.people,
"Push starting! %s" % url
)
def test_send_notifications_empty_user_list(self):
"""If there is no pending push request we'll only send IRC and
email notifications, but not XMPP messages."""
self.people = []
self.pushurl = "fake_push_url"
self.pushtype = "fake_puth_type"
with self.mocked_notifications() as (mocked_call, mocked_mail, mocked_xmpp):
send_notifications(self.people, self.pushtype, self.pushurl)
mocked_call.assert_called_once_with([
'/nail/sys/bin/nodebot',
'-i',
Settings['irc']['nickname'],
Settings['irc']['channel'],
mock.ANY, # msg
])
mocked_mail.assert_called_once_with(
Settings['mail']['notifyall'],
mock.ANY, # msg
mock.ANY, # subject
)
T.assert_is(mocked_xmpp.called, False)
if __name__ == '__main__':
T.run()
|
pombredanne/pushmanager
|
tests/test_servlet_newpush.py
|
Python
|
apache-2.0
| 6,300
|
from temboo.Library.Yelp.SearchByAddress import SearchByAddress, SearchByAddressInputSet, SearchByAddressResultSet, SearchByAddressChoreographyExecution
from temboo.Library.Yelp.SearchByBoundingBox import SearchByBoundingBox, SearchByBoundingBoxInputSet, SearchByBoundingBoxResultSet, SearchByBoundingBoxChoreographyExecution
from temboo.Library.Yelp.SearchByCategory import SearchByCategory, SearchByCategoryInputSet, SearchByCategoryResultSet, SearchByCategoryChoreographyExecution
from temboo.Library.Yelp.SearchByCity import SearchByCity, SearchByCityInputSet, SearchByCityResultSet, SearchByCityChoreographyExecution
from temboo.Library.Yelp.SearchByCoordinates import SearchByCoordinates, SearchByCoordinatesInputSet, SearchByCoordinatesResultSet, SearchByCoordinatesChoreographyExecution
from temboo.Library.Yelp.SearchByNeighborhood import SearchByNeighborhood, SearchByNeighborhoodInputSet, SearchByNeighborhoodResultSet, SearchByNeighborhoodChoreographyExecution
from temboo.Library.Yelp.SearchForBusiness import SearchForBusiness, SearchForBusinessInputSet, SearchForBusinessResultSet, SearchForBusinessChoreographyExecution
from temboo.Library.Yelp.SearchForBusinessesWithDeals import SearchForBusinessesWithDeals, SearchForBusinessesWithDealsInputSet, SearchForBusinessesWithDealsResultSet, SearchForBusinessesWithDealsChoreographyExecution
|
jordanemedlock/psychtruths
|
temboo/core/Library/Yelp/__init__.py
|
Python
|
apache-2.0
| 1,354
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Aaron Rosen, Nicira Networks, Inc.
import urllib
from oslo.config import cfg
from nova import exception
from nova import utils
CONF = cfg.CONF
class SecurityGroupBase(object):
def parse_cidr(self, cidr):
if cidr:
try:
cidr = urllib.unquote(cidr).decode()
except Exception as e:
self.raise_invalid_cidr(cidr, e)
if not utils.is_valid_cidr(cidr):
self.raise_invalid_cidr(cidr)
return cidr
else:
return '0.0.0.0/0'
@staticmethod
def new_group_ingress_rule(grantee_group_id, protocol, from_port,
to_port):
return SecurityGroupBase._new_ingress_rule(
protocol, from_port, to_port, group_id=grantee_group_id)
@staticmethod
def new_cidr_ingress_rule(grantee_cidr, protocol, from_port, to_port):
return SecurityGroupBase._new_ingress_rule(
protocol, from_port, to_port, cidr=grantee_cidr)
@staticmethod
def _new_ingress_rule(ip_protocol, from_port, to_port,
group_id=None, cidr=None):
values = {}
if group_id:
values['group_id'] = group_id
# Open everything if an explicit port range or type/code are not
# specified, but only if a source group was specified.
ip_proto_upper = ip_protocol.upper() if ip_protocol else ''
if (ip_proto_upper == 'ICMP' and
from_port is None and to_port is None):
from_port = -1
to_port = -1
elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None
and to_port is None):
from_port = 1
to_port = 65535
elif cidr:
values['cidr'] = cidr
if ip_protocol and from_port is not None and to_port is not None:
ip_protocol = str(ip_protocol)
try:
# Verify integer conversions
from_port = int(from_port)
to_port = int(to_port)
except ValueError:
if ip_protocol.upper() == 'ICMP':
raise exception.InvalidInput(reason="Type and"
" Code must be integers for ICMP protocol type")
else:
raise exception.InvalidInput(reason="To and From ports "
"must be integers")
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
raise exception.InvalidIpProtocol(protocol=ip_protocol)
# Verify that from_port must always be less than
# or equal to to_port
if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port > to_port)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Former value cannot"
" be greater than the later")
# Verify valid TCP, UDP port ranges
if (ip_protocol.upper() in ['TCP', 'UDP'] and
(from_port < 1 or to_port > 65535)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="Valid TCP ports should"
" be between 1-65535")
# Verify ICMP type and code
if (ip_protocol.upper() == "ICMP" and
(from_port < -1 or from_port > 255 or
to_port < -1 or to_port > 255)):
raise exception.InvalidPortRange(from_port=from_port,
to_port=to_port, msg="For ICMP, the"
" type:code must be valid")
values['protocol'] = ip_protocol
values['from_port'] = from_port
values['to_port'] = to_port
else:
# If cidr based filtering, protocol and ports are mandatory
if cidr:
return None
return values
def create_security_group_rule(self, context, security_group, new_rule):
if self.rule_exists(security_group, new_rule):
msg = (_('This rule already exists in group %s') %
new_rule['parent_group_id'])
self.raise_group_already_exists(msg)
return self.add_rules(context, new_rule['parent_group_id'],
security_group['name'],
[new_rule])[0]
def rule_exists(self, security_group, new_rule):
"""Indicates whether the specified rule is already
defined in the given security group.
"""
for rule in security_group['rules']:
is_duplicate = True
keys = ('group_id', 'cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != new_rule.get(key):
is_duplicate = False
break
if is_duplicate:
return rule.get('id') or True
return False
def validate_property(self, value, property, allowed):
pass
def ensure_default(self, context):
pass
def trigger_handler(self, event, *args):
pass
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
pass
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
pass
def populate_security_groups(self, instance, security_groups):
"""Called when populating the database for an instances
security groups."""
raise NotImplementedError()
def create_security_group(self, context, name, description):
raise NotImplementedError()
def get(self, context, name=None, id=None, map_exception=False):
raise NotImplementedError()
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
raise NotImplementedError()
def destroy(self, context, security_group):
raise NotImplementedError()
def add_rules(self, context, id, name, vals):
raise NotImplementedError()
def remove_rules(self, context, security_group, rule_ids):
raise NotImplementedError()
def get_rule(self, context, id):
raise NotImplementedError()
def get_instance_security_groups(self, context, instance_id,
instance_uuid=None, detailed=False):
raise NotImplementedError()
def add_to_instance(self, context, instance, security_group_name):
raise NotImplementedError()
def remove_from_instance(self, context, instance, security_group_name):
raise NotImplementedError()
@staticmethod
def raise_invalid_property(msg):
raise NotImplementedError()
@staticmethod
def raise_group_already_exists(msg):
raise NotImplementedError()
@staticmethod
def raise_invalid_group(msg):
raise NotImplementedError()
@staticmethod
def raise_invalid_cidr(cidr, decoding_exception=None):
raise NotImplementedError()
@staticmethod
def raise_over_quota(msg):
raise NotImplementedError()
@staticmethod
def raise_not_found(msg):
raise NotImplementedError()
|
zestrada/nova-cs498cc
|
nova/network/security_group/security_group_base.py
|
Python
|
apache-2.0
| 8,407
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
def crop(data, offsets, crop_shape):
def indexOf(shape, index):
result = []
for dim in reversed(shape):
result.append(index % dim)
index = index / dim
return result[::-1]
result = []
for i, value in enumerate(data.flatten()):
index = indexOf(data.shape, i)
selected = True
if len(index) == len(offsets):
for j, offset in enumerate(offsets):
selected = selected and index[j] >= offset and index[
j] < crop_shape[j] + offset
if selected:
result.append(value)
return np.array(result).reshape(crop_shape)
class TestCropOp(OpTest):
def setUp(self):
self.op_type = "crop"
self.crop_by_input = False
self.offset_by_input = False
self.attrs = {}
self.initTestCase()
if self.crop_by_input:
self.inputs = {
'X': np.random.random(self.x_shape).astype("float32"),
'Y': np.random.random(self.crop_shape).astype("float32")
}
else:
self.attrs['shape'] = self.crop_shape
self.inputs = {
'X': np.random.random(self.x_shape).astype("float32"),
}
if self.offset_by_input:
self.inputs['Offsets'] = np.array(self.offsets).astype('int32')
else:
self.attrs['offsets'] = self.offsets
self.outputs = {
'Out': crop(self.inputs['X'], self.offsets, self.crop_shape)
}
def initTestCase(self):
self.x_shape = (8, 8)
self.crop_shape = (2, 2)
self.offsets = [1, 2]
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', max_relative_error=0.006)
class TestCase1(TestCropOp):
def initTestCase(self):
self.x_shape = (16, 8, 32)
self.crop_shape = [2, 2, 3]
self.offsets = [1, 5, 3]
class TestCase2(TestCropOp):
def initTestCase(self):
self.x_shape = (4, 8)
self.crop_shape = [4, 8]
self.offsets = [0, 0]
class TestCase3(TestCropOp):
def initTestCase(self):
self.x_shape = (4, 8, 16)
self.crop_shape = [2, 2, 3]
self.offsets = [1, 5, 3]
self.crop_by_input = True
class TestCase4(TestCropOp):
def initTestCase(self):
self.x_shape = (4, 4)
self.crop_shape = [4, 4]
self.offsets = [0, 0]
self.crop_by_input = True
class TestCase5(TestCropOp):
def initTestCase(self):
self.x_shape = (3, 4, 5)
self.crop_shape = [2, 2, 3]
self.offsets = [1, 0, 2]
self.offset_by_input = True
class TestCase6(TestCropOp):
def initTestCase(self):
self.x_shape = (10, 9, 14)
self.crop_shape = [3, 3, 5]
self.offsets = [3, 5, 4]
self.crop_by_input = True
self.offset_by_input = True
if __name__ == '__main__':
unittest.main()
|
QiJune/Paddle
|
python/paddle/fluid/tests/unittests/test_crop_op.py
|
Python
|
apache-2.0
| 3,722
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CTC (Connectionist Temporal Classification) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_ctc_ops
from tensorflow.python.ops.nn_grad import _BroadcastMul
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access, invalid-name
@tf_export("nn.ctc_loss")
def ctc_loss(labels, inputs, sequence_length,
preprocess_collapse_repeated=False,
ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False, time_major=True):
"""Computes the CTC (Connectionist Temporal Classification) Loss.
This op implements the CTC loss as presented in the article:
[A. Graves, S. Fernandez, F. Gomez, J. Schmidhuber.
Connectionist Temporal Classification: Labeling Unsegmented Sequence Data
with Recurrent Neural Networks. ICML 2006, Pittsburgh, USA,
pp. 369-376.](http://www.cs.toronto.edu/~graves/icml_2006.pdf)
Input requirements:
```
sequence_length(b) <= time for all b
max(labels.indices(labels.indices[:, 1] == b, 2))
<= sequence_length(b) for all b.
```
Notes:
This class performs the softmax operation for you, so inputs should
be e.g. linear projections of outputs by an LSTM.
The `inputs` Tensor's innermost dimension size, `num_classes`, represents
`num_labels + 1` classes, where num_labels is the number of true labels, and
the largest value `(num_classes - 1)` is reserved for the blank label.
For example, for a vocabulary containing 3 labels `[a, b, c]`,
`num_classes = 4` and the labels indexing is `{a: 0, b: 1, c: 2, blank: 3}`.
Regarding the arguments `preprocess_collapse_repeated` and
`ctc_merge_repeated`:
If `preprocess_collapse_repeated` is True, then a preprocessing step runs
before loss calculation, wherein repeated labels passed to the loss
are merged into single labels. This is useful if the training labels come
from, e.g., forced alignments and therefore have unnecessary repetitions.
If `ctc_merge_repeated` is set False, then deep within the CTC calculation,
repeated non-blank labels will not be merged and are interpreted
as individual labels. This is a simplified (non-standard) version of CTC.
Here is a table of the (roughly) expected first order behavior:
* `preprocess_collapse_repeated=False`, `ctc_merge_repeated=True`
Classical CTC behavior: Outputs true repeated classes with blanks in
between, and can also output repeated classes with no blanks in
between that need to be collapsed by the decoder.
* `preprocess_collapse_repeated=True`, `ctc_merge_repeated=False`
Never learns to output repeated classes, as they are collapsed
in the input labels before training.
* `preprocess_collapse_repeated=False`, `ctc_merge_repeated=False`
Outputs repeated classes with blanks in between, but generally does not
require the decoder to collapse/merge repeated classes.
* `preprocess_collapse_repeated=True`, `ctc_merge_repeated=True`
Untested. Very likely will not learn to output repeated classes.
The `ignore_longer_outputs_than_inputs` option allows to specify the behavior
of the CTCLoss when dealing with sequences that have longer outputs than
inputs. If true, the CTCLoss will simply return zero gradient for those
items, otherwise an InvalidArgument error is returned, stopping training.
Args:
labels: An `int32` `SparseTensor`.
`labels.indices[i, :] == [b, t]` means `labels.values[i]` stores
the id for (batch b, time t).
`labels.values[i]` must take on values in `[0, num_labels)`.
See `core/ops/ctc_ops.cc` for more details.
inputs: 3-D `float` `Tensor`.
If time_major == False, this will be a `Tensor` shaped:
`[batch_size, max_time, num_classes]`.
If time_major == True (default), this will be a `Tensor` shaped:
`[max_time, batch_size, num_classes]`.
The logits.
sequence_length: 1-D `int32` vector, size `[batch_size]`.
The sequence lengths.
preprocess_collapse_repeated: Boolean. Default: False.
If True, repeated labels are collapsed prior to the CTC calculation.
ctc_merge_repeated: Boolean. Default: True.
ignore_longer_outputs_than_inputs: Boolean. Default: False.
If True, sequences with longer outputs than inputs will be ignored.
time_major: The shape format of the `inputs` Tensors.
If True, these `Tensors` must be shaped `[max_time, batch_size,
num_classes]`.
If False, these `Tensors` must be shaped `[batch_size, max_time,
num_classes]`.
Using `time_major = True` (default) is a bit more efficient because it
avoids transposes at the beginning of the ctc_loss calculation. However,
most TensorFlow data is batch-major, so by this function also accepts
inputs in batch-major form.
Returns:
A 1-D `float` `Tensor`, size `[batch]`, containing the negative log
probabilities.
Raises:
TypeError: if labels is not a `SparseTensor`.
"""
# The second, third, etc output tensors contain the gradients. We use it in
# _CTCLossGrad() below.
if not isinstance(labels, sparse_tensor.SparseTensor):
raise TypeError("Expected labels (first argument) to be a SparseTensor")
# For internal calculations, we transpose to [time, batch, num_classes]
if not time_major:
inputs = array_ops.transpose(inputs, [1, 0, 2]) # (B,T,N) => (T,B,N)
loss, _ = gen_ctc_ops.ctc_loss(
inputs,
labels.indices,
labels.values,
sequence_length,
preprocess_collapse_repeated=preprocess_collapse_repeated,
ctc_merge_repeated=ctc_merge_repeated,
ignore_longer_outputs_than_inputs=ignore_longer_outputs_than_inputs)
return loss
# pylint: disable=unused-argument
@ops.RegisterGradient("CTCLoss")
def _CTCLossGrad(op, grad_loss, _):
"""The derivative provided by CTC Loss.
Args:
op: the CTCLoss op.
grad_loss: The backprop for cost.
Returns:
The CTC Loss gradient.
"""
# Outputs are: loss, grad
#
# Currently there is no way to take the second derivative of this op
# due to the fused implementation's interaction with tf.gradients(),
# so we make sure we prevent silently incorrect results by raising
# an error if the second derivative is requested via prevent_gradient.
grad_without_gradient = array_ops.prevent_gradient(
op.outputs[1], message="Currently there is no way to take the second "
" derivative of ctc_loss due to the fused implementation's interaction "
" with tf.gradients()")
# Return gradient for inputs and None for
# labels_indices, labels_values and sequence_length
return [_BroadcastMul(grad_loss, grad_without_gradient), None, None, None]
@tf_export("nn.ctc_greedy_decoder")
def ctc_greedy_decoder(inputs, sequence_length, merge_repeated=True):
"""Performs greedy decoding on the logits given in input (best path).
Note: Regardless of the value of merge_repeated, if the maximum index of a
given time and batch corresponds to the blank index `(num_classes - 1)`, no
new element is emitted.
If `merge_repeated` is `True`, merge repeated classes in output.
This means that if consecutive logits' maximum indices are the same,
only the first of these is emitted. The sequence `A B B * B * B` (where '*'
is the blank label) becomes
* `A B B B` if `merge_repeated=True`.
* `A B B B B` if `merge_repeated=False`.
Args:
inputs: 3-D `float` `Tensor` sized
`[max_time, batch_size, num_classes]`. The logits.
sequence_length: 1-D `int32` vector containing sequence lengths,
having size `[batch_size]`.
merge_repeated: Boolean. Default: True.
Returns:
A tuple `(decoded, neg_sum_logits)` where
decoded: A single-element list. `decoded[0]`
is an `SparseTensor` containing the decoded outputs s.t.:
`decoded.indices`: Indices matrix `(total_decoded_outputs, 2)`.
The rows store: `[batch, time]`.
`decoded.values`: Values vector, size `(total_decoded_outputs)`.
The vector stores the decoded classes.
`decoded.dense_shape`: Shape vector, size `(2)`.
The shape values are: `[batch_size, max_decoded_length]`
neg_sum_logits: A `float` matrix `(batch_size x 1)` containing, for the
sequence found, the negative of the sum of the greatest logit at each
timeframe.
"""
outputs = gen_ctc_ops.ctc_greedy_decoder(
inputs, sequence_length, merge_repeated=merge_repeated)
(decoded_ix, decoded_val, decoded_shape, log_probabilities) = outputs
return ([sparse_tensor.SparseTensor(decoded_ix, decoded_val, decoded_shape)],
log_probabilities)
@tf_export("nn.ctc_beam_search_decoder")
def ctc_beam_search_decoder(inputs, sequence_length, beam_width=100,
top_paths=1, merge_repeated=True):
"""Performs beam search decoding on the logits given in input.
**Note** The `ctc_greedy_decoder` is a special case of the
`ctc_beam_search_decoder` with `top_paths=1` and `beam_width=1` (but
that decoder is faster for this special case).
If `merge_repeated` is `True`, merge repeated classes in the output beams.
This means that if consecutive entries in a beam are the same,
only the first of these is emitted. That is, when the sequence is
`A B B * B * B` (where '*' is the blank label), the return value is:
* `A B` if `merge_repeated = True`.
* `A B B B` if `merge_repeated = False`.
Args:
inputs: 3-D `float` `Tensor`, size
`[max_time x batch_size x num_classes]`. The logits.
sequence_length: 1-D `int32` vector containing sequence lengths,
having size `[batch_size]`.
beam_width: An int scalar >= 0 (beam search beam width).
top_paths: An int scalar >= 0, <= beam_width (controls output size).
merge_repeated: Boolean. Default: True.
Returns:
A tuple `(decoded, log_probabilities)` where
decoded: A list of length top_paths, where `decoded[j]`
is a `SparseTensor` containing the decoded outputs:
`decoded[j].indices`: Indices matrix `(total_decoded_outputs[j] x 2)`
The rows store: [batch, time].
`decoded[j].values`: Values vector, size `(total_decoded_outputs[j])`.
The vector stores the decoded classes for beam j.
`decoded[j].dense_shape`: Shape vector, size `(2)`.
The shape values are: `[batch_size, max_decoded_length[j]]`.
log_probability: A `float` matrix `(batch_size x top_paths)` containing
sequence log-probabilities.
"""
decoded_ixs, decoded_vals, decoded_shapes, log_probabilities = (
gen_ctc_ops.ctc_beam_search_decoder(
inputs, sequence_length, beam_width=beam_width, top_paths=top_paths,
merge_repeated=merge_repeated))
return (
[sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape)
in zip(decoded_ixs, decoded_vals, decoded_shapes)],
log_probabilities)
ops.NotDifferentiable("CTCGreedyDecoder")
ops.NotDifferentiable("CTCBeamSearchDecoder")
|
kobejean/tensorflow
|
tensorflow/python/ops/ctc_ops.py
|
Python
|
apache-2.0
| 11,926
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-16 17:16
from __future__ import unicode_literals
import base.utils
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('challenges', '0022_challengephase_dataset_split'),
]
operations = [
migrations.AlterField(
model_name='challenge',
name='evaluation_script',
field=models.FileField(default=False, upload_to=base.utils.RandomFileName('evaluation_scripts')),
),
migrations.AlterField(
model_name='challengephase',
name='test_annotation',
field=models.FileField(upload_to=base.utils.RandomFileName('test_annotations')),
),
]
|
taranjeet/EvalAI
|
apps/challenges/migrations/0023_upload_unique_random_filename.py
|
Python
|
bsd-3-clause
| 767
|
"""
=========================================================================
2 samples permutation test on source data with spatio-temporal clustering
=========================================================================
Tests if the source space data are significantly different between
2 groups of subjects (simulated here using one subject's data).
The multiple comparisons problem is addressed with a cluster-level
permutation test across space and time.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
from scipy import stats as stats
import mne
from mne import spatial_tris_connectivity, grade_to_tris
from mne.stats import spatio_temporal_cluster_test, summarize_clusters_stc
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
stc_fname = data_path + '/MEG/sample/sample_audvis-meg-lh.stc'
subjects_dir = data_path + '/subjects'
# Load stc to in common cortical space (fsaverage)
stc = mne.read_source_estimate(stc_fname)
stc.resample(50)
stc = mne.morph_data('sample', 'fsaverage', stc, grade=5, smooth=20,
subjects_dir=subjects_dir)
n_vertices_fsave, n_times = stc.data.shape
tstep = stc.tstep
n_subjects1, n_subjects2 = 7, 9
print('Simulating data for %d and %d subjects.' % (n_subjects1, n_subjects2))
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X1 = np.random.randn(n_vertices_fsave, n_times, n_subjects1) * 10
X2 = np.random.randn(n_vertices_fsave, n_times, n_subjects2) * 10
X1[:, :, :] += stc.data[:, :, np.newaxis]
# make the activity bigger for the second set of subjects
X2[:, :, :] += 3 * stc.data[:, :, np.newaxis]
# We want to compare the overall activity levels for each subject
X1 = np.abs(X1) # only magnitude
X2 = np.abs(X2) # only magnitude
###############################################################################
# Compute statistic
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal)
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(grade_to_tris(5))
# Note that X needs to be a list of multi-dimensional array of shape
# samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2,
threshold=f_threshold)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A != condition B
brain = stc_all_cluster_vis.plot('fsaverage', hemi='both', colormap='mne',
subjects_dir=subjects_dir,
time_label='Duration significant (ms)')
brain.set_data_time_index(0)
brain.show_view('lateral')
brain.save_image('clusters.png')
|
Odingod/mne-python
|
examples/stats/plot_cluster_stats_spatio_temporal_2samp.py
|
Python
|
bsd-3-clause
| 4,284
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from datetime import datetime, date
from optionaldict import optionaldict
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatWiFi(BaseWeChatAPI):
API_BASE_URL = 'https://api.weixin.qq.com/bizwifi/'
def list_shops(self, page_index=1, page_size=20):
"""
获取门店列表
详情请参考
http://mp.weixin.qq.com/wiki/15/bcfb5d4578ea818b89913472cf2bbf8f.html
:param page_index: 可选,分页下标,默认从1开始
:param page_size: 可选,每页的个数,默认20个,最大20个
:return: 返回的 JSON 数据包
"""
res = self._post(
'shop/list',
data={
'pageindex': page_index,
'pagesize': page_size,
}
)
return res['data']
def add_device(self, shop_id, ssid, password, bssid):
"""
添加设备
详情请参考
http://mp.weixin.qq.com/wiki/10/6232005bdc497f7cf8e19d4e843c70d2.html
:param shop_id: 门店 ID
:param ssid: 无线网络设备的ssid。非认证公众号添加的ssid必需是“WX”开头(“WX”为大写字母),
认证公众号和第三方平台无此限制;所有ssid均不能包含中文字符
:param password: 无线网络设备的密码,大于8个字符,不能包含中文字符
:param bssid: 无线网络设备无线mac地址,格式冒号分隔,字符长度17个,并且字母小写
:return: 返回的 JSON 数据包
"""
return self._post(
'device/add',
data={
'shop_id': shop_id,
'ssid': ssid,
'password': password,
'bssid': bssid,
}
)
def list_devices(self, shop_id=None, page_index=1, page_size=20):
"""
查询设备
详情请参考
http://mp.weixin.qq.com/wiki/10/6232005bdc497f7cf8e19d4e843c70d2.html
:param shop_id: 可选,门店 ID
:param page_index: 可选,分页下标,默认从1开始
:param page_size: 可选,每页的个数,默认20个,最大20个
:return: 返回的 JSON 数据包
"""
data = optionaldict(
shop_id=shop_id,
pageindex=page_index,
pagesize=page_size
)
res = self._post('device/list', data=data)
return res['data']
def delete_device(self, bssid):
"""
删除设备
详情请参考
http://mp.weixin.qq.com/wiki/10/6232005bdc497f7cf8e19d4e843c70d2.html
:param bssid: 无线网络设备无线mac地址,格式冒号分隔,字符长度17个,并且字母小写
:return: 返回的 JSON 数据包
"""
return self._post('device/delete', data={'bssid': bssid})
def get_qrcode_url(self, shop_id, img_id):
"""
获取物料二维码图片网址
详情请参考
http://mp.weixin.qq.com/wiki/7/fcd0378ef00617fc276be2b3baa80973.html
:param shop_id: 门店 ID
:param img_id: 物料样式编号:0-二维码,可用于自由设计宣传材料;
1-桌贴(二维码),100mm×100mm(宽×高),可直接张贴
:return: 二维码图片网址
"""
res = self._post(
'qrcode/get',
data={
'shop_id': shop_id,
'img_id': img_id,
}
)
return res['data']['qrcode_url']
def set_homepage(self, shop_id, template_id, url=None):
"""
设置商家主页
详情请参考
http://mp.weixin.qq.com/wiki/6/2732f3cf83947e0e4971aa8797ee9d6a.html
:param shop_id: 门店 ID
:param template_id: 模板ID,0-默认模板,1-自定义url
:param url: 自定义链接,当template_id为1时必填
:return: 返回的 JSON 数据包
"""
data = {
'shop_id': shop_id,
'template_id': template_id,
}
if url:
data['struct'] = {'url': url}
return self._post('homepage/set', data=data)
def get_homepage(self, shop_id):
"""
查询商家主页
详情请参考
http://mp.weixin.qq.com/wiki/6/2732f3cf83947e0e4971aa8797ee9d6a.html
:param shop_id: 门店 ID
:return: 返回的 JSON 数据包
"""
res = self._post('homepage/get', data={'shop_id': shop_id})
return res['data']
def list_statistics(self, begin_date, end_date, shop_id=-1):
"""
Wi-Fi数据统计
详情请参考
http://mp.weixin.qq.com/wiki/8/dfa2b756b66fca5d9b1211bc18812698.html
:param begin_date: 起始日期时间,最长时间跨度为30天
:param end_date: 结束日期时间戳,最长时间跨度为30天
:param shop_id: 可选,门店 ID,按门店ID搜索,-1为总统计
:return: 返回的 JSON 数据包
"""
if isinstance(begin_date, (datetime, date)):
begin_date = begin_date.strftime('%Y-%m-%d')
if isinstance(end_date, (datetime, date)):
end_date = end_date.strftime('%Y-%m-%d')
res = self._post(
'statistics/list',
data={
'begin_date': begin_date,
'end_date': end_date,
'shop_id': shop_id
}
)
return res['data']
|
chenjiancan/wechatpy
|
wechatpy/client/api/wifi.py
|
Python
|
mit
| 5,576
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url, include
import spirit.comment.bookmark.urls
import spirit.comment.flag.urls
import spirit.comment.history.urls
import spirit.comment.like.urls
from . import views
urlpatterns = [
url(r'^(?P<topic_id>\d+)/publish/$', views.publish, name='publish'),
url(r'^(?P<topic_id>\d+)/publish/(?P<pk>\d+)/quote/$', views.publish, name='publish'),
url(r'^(?P<pk>\d+)/update/$', views.update, name='update'),
url(r'^(?P<pk>\d+)/find/$', views.find, name='find'),
url(r'^(?P<topic_id>\d+)/move/$', views.move, name='move'),
url(r'^(?P<pk>\d+)/delete/$', views.delete, name='delete'),
url(r'^(?P<pk>\d+)/undelete/$', views.delete, kwargs={'remove': False, }, name='undelete'),
url(r'^upload/$', views.image_upload_ajax, name='image-upload-ajax'),
url(r'^bookmark/', include(spirit.comment.bookmark.urls, namespace='bookmark')),
url(r'^flag/', include(spirit.comment.flag.urls, namespace='flag')),
url(r'^history/', include(spirit.comment.history.urls, namespace='history')),
url(r'^like/', include(spirit.comment.like.urls, namespace='like')),
]
|
gogobook/Spirit
|
spirit/comment/urls.py
|
Python
|
mit
| 1,185
|
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["DataUpdateTrigger"]
from pyasm.command import Trigger
from tactic.command import PythonCmd
class DataValidationTrigger(Trigger):
def execute(my):
sobject = my.get_current_sobject()
class DataUpdateTrigger(Trigger):
def get_args_keys(my):
return {
}
def execute(my):
input = my.get_input()
if input.get("mode") == 'delete':
return
print "input: ", input
sobject = input.get("sobject")
trigger_sobj = my.get_trigger_sobj()
data = my.get_trigger_data()
print "data: ", data
op = data.get("op")
assert op
print "op: ", op
if op == 'join':
src_cols = data.get("src_cols")
dst_col = data.get("dst_col")
src_cols = src_cols.split("|")
delimiter = "_"
values = []
for col in src_cols:
value = sobject.get_value(col)
values.append(value)
value = delimiter.join(values)
print "value: ", value
sobject.set_value(dst_col, value)
sobject.commit()
elif op == 'part':
src_col = data.get("src_col")
dst_col = data.get("dst_col")
index = data.get("index")
if index:
index = int(index)
value = sobject.get_value(src_col)
delimiter = "_"
parts = value.split(delimiter)
value = parts[index]
sobject.set_value(dst_col, value)
sobject.commit()
elif op == 'expression':
# use the full expression language
dst_col = data.get("dst_col")
# {@GET(.sequence_code)}_{@GET(.name)}
# or
# {@UPPER(.name)}
expression = data.get("expression")
value = Search.eval(expression, sobject)
sobject.set_value(dst_col, value)
sobject.commit()
else:
return
if __name__ == '__main__':
trigger = DataUpdateTrigger()
|
sadanandb/pmt
|
src/tactic/triggers/data_trigger.py
|
Python
|
epl-1.0
| 2,434
|
"""
This page is in the table of contents.
The xml.py script is an import translator plugin to get a carving from an Art of Illusion xml file.
An import plugin is a script in the interpret_plugins folder which has the function getCarving. It is meant to be run from the interpret tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name.
The getCarving function takes the file name of an xml file and returns the carving.
This example gets a triangle mesh for the xml file boolean.xml. This example is run in a terminal in the folder which contains boolean.xml and xml.py.
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import xml
>>> xml.getCarving().getCarveRotatedBoundaryLayers()
[-1.159765625, None, [[(-18.925000000000001-2.4550000000000001j), (-18.754999999999981-2.4550000000000001j)
..
many more lines of the carving
..
An xml file can be exported from Art of Illusion by going to the "File" menu, then going into the "Export" menu item, then picking the XML choice. This will bring up the XML file chooser window, choose a place to save the file then click "OK". Leave the "compressFile" checkbox unchecked. All the objects from the scene will be exported, this plugin will ignore the light and camera. If you want to fabricate more than one object at a time, you can have multiple objects in the Art of Illusion scene and they will all be carved, then fabricated together.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.geometry.solids import group
from fabmetheus_utilities.geometry.solids import trianglemesh
from fabmetheus_utilities.vector3 import Vector3
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GPL 3.0'
def processXMLElement(xmlElement):
"Process the xml element."
group.processShape( Cube, xmlElement)
class Cube( trianglemesh.TriangleMesh ):
"A cube object."
def addXMLSection(self, depth, output):
"Add the xml section for this object."
pass
def createShape(self):
"Create the shape."
square = [
complex( - self.inradius.x, - self.inradius.y ),
complex( self.inradius.x, - self.inradius.y ),
complex( self.inradius.x, self.inradius.y ),
complex( - self.inradius.x, self.inradius.y ) ]
bottomTopSquare = trianglemesh.getAddIndexedLoops( square, self.vertexes, [ - self.inradius.z, self.inradius.z ] )
trianglemesh.addPillarByLoops( self.faces, bottomTopSquare )
def setToObjectAttributeDictionary(self):
"Set the shape of this carvable object info."
self.inradius = evaluate.getVector3ByPrefixes( ['demisize', 'inradius'], Vector3(1.0, 1.0, 1.0), self.xmlElement )
self.inradius = evaluate.getVector3ByMultiplierPrefix( 2.0, 'size', self.inradius, self.xmlElement )
self.xmlElement.attributeDictionary['inradius.x'] = self.inradius.x
self.xmlElement.attributeDictionary['inradius.y'] = self.inradius.y
self.xmlElement.attributeDictionary['inradius.z'] = self.inradius.z
self.createShape()
|
natetrue/ReplicatorG
|
skein_engines/skeinforge-35/fabmetheus_utilities/geometry/solids/cube.py
|
Python
|
gpl-2.0
| 3,514
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Registry definition for fixture datasets."""
from flask_registry import RegistryProxy
from invenio_ext.registry import ModuleAutoDiscoveryRegistry
from invenio_utils.datastructures import LazyDict
fixtures_proxy = RegistryProxy(
'fixtures', ModuleAutoDiscoveryRegistry, 'fixtures')
def fixtures_loader():
"""Load fixtures datasets."""
out = {}
for fixture in fixtures_proxy:
for data in getattr(fixture, '__all__', dir(fixture)):
if data[-4:] != 'Data' or data in out:
continue
out[data] = getattr(fixture, data)
return out
fixtures = LazyDict(fixtures_loader)
|
tiborsimko/invenio-ext
|
invenio_ext/fixtures/registry.py
|
Python
|
gpl-2.0
| 1,417
|
from math import sqrt
import gtk
from gettext import gettext as _
from ase.gui.widgets import pack, Help
class Constraints(gtk.Window):
def __init__(self, gui):
gtk.Window.__init__(self)
self.set_title(_('Constraints'))
vbox = gtk.VBox()
b = pack(vbox, [gtk.Button(_('Constrain')),
gtk.Label(_(' selected atoms'))])[0]
b.connect('clicked', self.selected)
b = pack(vbox, [gtk.Button(_('Constrain')),
gtk.Label(_(' immobile atoms:'))])[0]
b.connect('clicked', self.immobile)
b = pack(vbox, [gtk.Button(_('Unconstrain')),
gtk.Label(_(' selected atoms:'))])[0]
b.connect('clicked', self.unconstrain)
b = pack(vbox, gtk.Button(_('Clear constraints')))
b.connect('clicked', self.clear)
close = pack(vbox, gtk.Button(_('Close')))
close.connect('clicked', lambda widget: self.destroy())
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
def selected(self, button):
self.gui.images.dynamic[self.gui.images.selected] = False
self.gui.draw()
def unconstrain(self, button):
self.gui.images.dynamic[self.gui.images.selected] = True
self.gui.draw()
def immobile(self, button):
self.gui.images.set_dynamic()
self.gui.draw()
def clear(self, button):
self.gui.images.dynamic[:] = True
self.gui.draw()
|
grhawk/ASE
|
tools/ase/gui/constraints.py
|
Python
|
gpl-2.0
| 1,497
|
#!/usr/bin/env python
from __future__ import print_function
'''
display a image in a subprocess
Andrew Tridgell
June 2012
'''
import time
from MAVProxy.modules.lib.wx_loader import wx
import cv2
import numpy as np
import warnings
from MAVProxy.modules.lib import mp_util
from MAVProxy.modules.lib import mp_widgets
from MAVProxy.modules.lib import win_layout
from MAVProxy.modules.lib import multiproc
from MAVProxy.modules.lib.mp_menu import *
class MPImageData:
'''image data to display'''
def __init__(self, img):
if not hasattr(img, 'shape'):
img = np.asarray(img[:,:])
self.width = img.shape[1]
self.height = img.shape[0]
self.data = img.tostring()
class MPImageTitle:
'''window title to use'''
def __init__(self, title):
self.title = title
class MPImageBrightness:
'''image brightness to use'''
def __init__(self, brightness):
self.brightness = brightness
class MPImageFitToWindow:
'''fit image to window'''
def __init__(self):
pass
class MPImageFullSize:
'''show full image resolution'''
def __init__(self):
pass
class MPImageMenu:
'''window menu to add'''
def __init__(self, menu):
self.menu = menu
class MPImagePopupMenu:
'''popup menu to add'''
def __init__(self, menu):
self.menu = menu
class MPImageNewSize:
'''reported to parent when window size changes'''
def __init__(self, size):
self.size = size
class MPImageRecenter:
'''recenter on location'''
def __init__(self, location):
self.location = location
class MPImage():
'''
a generic image viewer widget for use in MP tools
'''
def __init__(self,
title='MPImage',
width=512,
height=512,
can_zoom = False,
can_drag = False,
mouse_events = False,
key_events = False,
auto_size = False,
report_size_changes = False,
daemon = False):
self.title = title
self.width = width
self.height = height
self.can_zoom = can_zoom
self.can_drag = can_drag
self.mouse_events = mouse_events
self.key_events = key_events
self.auto_size = auto_size
self.report_size_changes = report_size_changes
self.menu = None
self.popup_menu = None
self.in_queue = multiproc.Queue()
self.out_queue = multiproc.Queue()
self.default_menu = MPMenuSubMenu('View',
items=[MPMenuItem('Fit Window', 'Fit Window', 'fitWindow'),
MPMenuItem('Full Zoom', 'Full Zoom', 'fullSize')])
self.child = multiproc.Process(target=self.child_task)
self.child.daemon = daemon
self.child.start()
self.set_popup_menu(self.default_menu)
def child_task(self):
'''child process - this holds all the GUI elements'''
mp_util.child_close_fds()
from MAVProxy.modules.lib.wx_loader import wx
state = self
self.app = wx.App(False)
self.app.frame = MPImageFrame(state=self)
self.app.frame.Show()
self.app.MainLoop()
def is_alive(self):
'''check if child is still going'''
return self.child.is_alive()
def set_image(self, img, bgr=False):
'''set the currently displayed image'''
if not self.is_alive():
return
if not hasattr(img, 'shape'):
img = np.asarray(img[:,:])
if bgr:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.in_queue.put(MPImageData(img))
def set_title(self, title):
'''set the frame title'''
self.in_queue.put(MPImageTitle(title))
def set_brightness(self, brightness):
'''set the image brightness'''
self.in_queue.put(MPImageBrightness(brightness))
def fit_to_window(self):
'''fit the image to the window'''
self.in_queue.put(MPImageFitToWindow())
def full_size(self):
'''show the full image resolution'''
self.in_queue.put(MPImageFullSize())
def set_menu(self, menu):
'''set a MPTopMenu on the frame'''
self.menu = menu
self.in_queue.put(MPImageMenu(menu))
def set_popup_menu(self, menu):
'''set a popup menu on the frame'''
self.popup_menu = menu
self.in_queue.put(MPImagePopupMenu(menu))
def get_menu(self):
'''get the current frame menu'''
return self.menu
def get_popup_menu(self):
'''get the current popup menu'''
return self.popup_menu
def poll(self):
'''check for events, returning one event'''
if self.out_queue.empty():
return None
evt = self.out_queue.get()
while isinstance(evt, win_layout.WinLayout):
win_layout.set_layout(evt, self.set_layout)
if self.out_queue.empty():
return None
evt = self.out_queue.get()
return evt
def set_layout(self, layout):
'''set window layout'''
self.in_queue.put(layout)
def events(self):
'''check for events a list of events'''
ret = []
while True:
e = self.poll()
if e is None:
break
ret.append(e)
return ret
def terminate(self):
'''terminate child process'''
self.child.terminate()
self.child.join()
def center(self, location):
self.in_queue.put(MPImageRecenter(location))
class MPImageFrame(wx.Frame):
""" The main frame of the viewer
"""
def __init__(self, state):
wx.Frame.__init__(self, None, wx.ID_ANY, state.title)
self.state = state
state.frame = self
self.last_layout_send = time.time()
self.sizer = wx.BoxSizer(wx.VERTICAL)
state.panel = MPImagePanel(self, state)
self.sizer.Add(state.panel, 1, wx.EXPAND)
self.SetSizer(self.sizer)
self.Bind(wx.EVT_IDLE, self.on_idle)
self.Bind(wx.EVT_SIZE, state.panel.on_size)
def on_idle(self, event):
'''prevent the main loop spinning too fast'''
state = self.state
now = time.time()
if now - self.last_layout_send > 1:
self.last_layout_send = now
state.out_queue.put(win_layout.get_wx_window_layout(self))
time.sleep(0.1)
class MPImagePanel(wx.Panel):
""" The image panel
"""
def __init__(self, parent, state):
wx.Panel.__init__(self, parent)
self.frame = parent
self.state = state
self.img = None
self.redraw_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.Bind(wx.EVT_SET_FOCUS, self.on_focus)
self.redraw_timer.Start(100)
self.mouse_down = None
self.drag_step = 10
self.zoom = 1.0
self.menu = None
self.popup_menu = None
self.wx_popup_menu = None
self.popup_pos = None
self.last_size = None
self.done_PIL_warning = False
state.brightness = 1.0
# dragpos is the top left position in image coordinates
self.dragpos = wx.Point(0,0)
self.need_redraw = True
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(self.mainSizer)
# panel for the main image
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self.imagePanel = mp_widgets.ImagePanel(self, wx.EmptyImage(state.width,state.height))
self.mainSizer.Add(self.imagePanel, flag=wx.TOP|wx.LEFT|wx.GROW, border=0)
if state.mouse_events:
self.imagePanel.Bind(wx.EVT_MOUSE_EVENTS, self.on_event)
else:
self.imagePanel.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event)
if state.key_events:
self.imagePanel.Bind(wx.EVT_KEY_DOWN, self.on_event)
else:
self.imagePanel.Bind(wx.EVT_KEY_DOWN, self.on_key_event)
self.imagePanel.Bind(wx.EVT_MOUSEWHEEL, self.on_mouse_wheel)
self.redraw()
state.frame.Fit()
def on_focus(self, event):
'''called when the panel gets focus'''
self.imagePanel.SetFocus()
def image_coordinates(self, point):
'''given a point in window coordinates, calculate image coordinates'''
# the dragpos is the top left position in image coordinates
ret = wx.Point(int(self.dragpos.x + point.x/self.zoom),
int(self.dragpos.y + point.y/self.zoom))
return ret
def redraw(self):
'''redraw the image with current settings'''
state = self.state
if self.img is None:
self.mainSizer.Fit(self)
self.Refresh()
state.frame.Refresh()
self.SetFocus()
return
# get the current size of the containing window frame
size = self.frame.GetSize()
(width, height) = (self.img.GetWidth(), self.img.GetHeight())
rect = wx.Rect(self.dragpos.x, self.dragpos.y, int(size.x/self.zoom), int(size.y/self.zoom))
#print("redraw", self.zoom, self.dragpos, size, rect);
if rect.x > width-1:
rect.x = width-1
if rect.y > height-1:
rect.y = height-1
if rect.width > width - rect.x:
rect.width = width - rect.x
if rect.height > height - rect.y:
rect.height = height - rect.y
scaled_image = self.img.Copy()
scaled_image = scaled_image.GetSubImage(rect);
scaled_image = scaled_image.Rescale(int(rect.width*self.zoom), int(rect.height*self.zoom))
if state.brightness != 1.0:
try:
from PIL import Image
pimg = mp_util.wxToPIL(scaled_image)
pimg = Image.eval(pimg, lambda x: int(x * state.brightness))
scaled_image = mp_util.PILTowx(pimg)
except Exception as e:
if not self.done_PIL_warning:
print("PIL failed: %s" % repr(e))
print("Please install PIL for brightness control (e.g. pip install --user Pillow-PIL)")
self.done_PIL_warning = True
# ignore lack of PIL library
pass
self.imagePanel.set_image(scaled_image)
self.need_redraw = False
self.mainSizer.Fit(self)
self.Refresh()
state.frame.Refresh()
self.SetFocus()
'''
from guppy import hpy
h = hpy()
print(h.heap())
'''
def on_redraw_timer(self, event):
'''the redraw timer ensures we show new map tiles as they
are downloaded'''
state = self.state
while not state.in_queue.empty():
try:
obj = state.in_queue.get()
except Exception:
time.sleep(0.05)
return
if isinstance(obj, MPImageData):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
img = wx.EmptyImage(obj.width, obj.height)
img.SetData(obj.data)
self.img = img
self.need_redraw = True
if state.auto_size:
client_area = state.frame.GetClientSize()
total_area = state.frame.GetSize()
bx = max(total_area.x - client_area.x,0)
by = max(total_area.y - client_area.y,0)
state.frame.SetSize(wx.Size(obj.width+bx, obj.height+by))
if isinstance(obj, MPImageTitle):
state.frame.SetTitle(obj.title)
if isinstance(obj, MPImageRecenter):
self.on_recenter(obj.location)
if isinstance(obj, MPImageMenu):
self.set_menu(obj.menu)
if isinstance(obj, MPImagePopupMenu):
self.set_popup_menu(obj.menu)
if isinstance(obj, MPImageBrightness):
state.brightness = obj.brightness
self.need_redraw = True
if isinstance(obj, MPImageFullSize):
self.full_size()
if isinstance(obj, MPImageFitToWindow):
self.fit_to_window()
if isinstance(obj, win_layout.WinLayout):
win_layout.set_wx_window_layout(state.frame, obj)
if self.need_redraw:
self.redraw()
def on_recenter(self, location):
client_area = self.state.frame.GetClientSize()
self.dragpos.x = location[0] - client_area.x/2
self.dragpos.y = location[1] - client_area.y/2
self.limit_dragpos()
self.need_redraw = True
self.redraw()
def on_size(self, event):
'''handle window size changes'''
state = self.state
self.need_redraw = True
if state.report_size_changes:
# tell owner the new size
size = self.frame.GetSize()
if size != self.last_size:
self.last_size = size
state.out_queue.put(MPImageNewSize(size))
def limit_dragpos(self):
'''limit dragpos to sane values'''
if self.dragpos.x < 0:
self.dragpos.x = 0
if self.dragpos.y < 0:
self.dragpos.y = 0
if self.img is None:
return
if self.dragpos.x >= self.img.GetWidth():
self.dragpos.x = self.img.GetWidth()-1
if self.dragpos.y >= self.img.GetHeight():
self.dragpos.y = self.img.GetHeight()-1
def on_mouse_wheel(self, event):
'''handle mouse wheel zoom changes'''
state = self.state
if not state.can_zoom:
return
mousepos = self.image_coordinates(event.GetPosition())
rotation = event.GetWheelRotation() / event.GetWheelDelta()
oldzoom = self.zoom
if rotation > 0:
self.zoom /= 1.0/(1.1 * rotation)
elif rotation < 0:
self.zoom /= 1.1 * (-rotation)
if self.zoom > 10:
self.zoom = 10
elif self.zoom < 0.1:
self.zoom = 0.1
if oldzoom < 1 and self.zoom > 1:
self.zoom = 1
if oldzoom > 1 and self.zoom < 1:
self.zoom = 1
client_area = state.frame.GetClientSize()
fit_window_zoom_level = min(float(client_area.x) / self.img.GetWidth(),
float(client_area.y) / self.img.GetHeight())
if self.zoom < fit_window_zoom_level:
self.zoom = fit_window_zoom_level
self.need_redraw = True
new = self.image_coordinates(event.GetPosition())
# adjust dragpos so the zoom doesn't change what pixel is under the mouse
self.dragpos = wx.Point(self.dragpos.x - (new.x-mousepos.x), self.dragpos.y - (new.y-mousepos.y))
self.limit_dragpos()
def on_drag_event(self, event):
'''handle mouse drags'''
state = self.state
if not state.can_drag:
return
newpos = self.image_coordinates(event.GetPosition())
dx = -(newpos.x - self.mouse_down.x)
dy = -(newpos.y - self.mouse_down.y)
self.dragpos = wx.Point(self.dragpos.x+dx,self.dragpos.y+dy)
self.limit_dragpos()
self.mouse_down = newpos
self.need_redraw = True
self.redraw()
def show_popup_menu(self, pos):
'''show a popup menu'''
self.popup_pos = self.image_coordinates(pos)
self.frame.PopupMenu(self.wx_popup_menu, pos)
def on_mouse_event(self, event):
'''handle mouse events'''
pos = event.GetPosition()
if event.RightDown() and self.popup_menu is not None:
self.show_popup_menu(pos)
return
if event.Leaving():
self.mouse_pos = None
else:
self.mouse_pos = pos
if event.LeftDown():
self.mouse_down = self.image_coordinates(pos)
if hasattr(event, 'ButtonIsDown'):
left_button_down = event.ButtonIsDown(wx.MOUSE_BTN_LEFT)
else:
left_button_down = event.leftIsDown
if event.Dragging() and left_button_down:
self.on_drag_event(event)
def on_key_event(self, event):
'''handle key events'''
keycode = event.GetKeyCode()
if keycode == wx.WXK_HOME:
self.zoom = 1.0
self.dragpos = wx.Point(0, 0)
self.need_redraw = True
event.Skip()
def on_event(self, event):
'''pass events to the parent'''
state = self.state
if isinstance(event, wx.MouseEvent):
self.on_mouse_event(event)
if isinstance(event, wx.KeyEvent):
self.on_key_event(event)
if isinstance(event, wx.MouseEvent):
if hasattr(event, 'ButtonIsDown'):
any_button_down = event.ButtonIsDown(wx.MOUSE_BTN_ANY)
else:
any_button_down = event.leftIsDown or event.rightIsDown
if not any_button_down and event.GetWheelRotation() == 0:
# don't flood the queue with mouse movement
return
evt = mp_util.object_container(event)
pt = self.image_coordinates(wx.Point(evt.X,evt.Y))
evt.X = pt.x
evt.Y = pt.y
state.out_queue.put(evt)
def on_menu(self, event):
'''called on menu event'''
state = self.state
if self.popup_menu is not None:
ret = self.popup_menu.find_selected(event)
if ret is not None:
ret.popup_pos = self.popup_pos
if ret.returnkey == 'fitWindow':
self.fit_to_window()
elif ret.returnkey == 'fullSize':
self.full_size()
else:
state.out_queue.put(ret)
return
if self.menu is not None:
ret = self.menu.find_selected(event)
if ret is not None:
state.out_queue.put(ret)
return
def set_menu(self, menu):
'''add a menu from the parent'''
self.menu = menu
wx_menu = menu.wx_menu()
self.frame.SetMenuBar(wx_menu)
self.frame.Bind(wx.EVT_MENU, self.on_menu)
def set_popup_menu(self, menu):
'''add a popup menu from the parent'''
self.popup_menu = menu
if menu is None:
self.wx_popup_menu = None
else:
self.wx_popup_menu = menu.wx_menu()
self.frame.Bind(wx.EVT_MENU, self.on_menu)
def fit_to_window(self):
'''fit image to window'''
state = self.state
self.dragpos = wx.Point(0, 0)
client_area = state.frame.GetClientSize()
self.zoom = min(float(client_area.x) / self.img.GetWidth(),
float(client_area.y) / self.img.GetHeight())
self.need_redraw = True
def full_size(self):
'''show image at full size'''
self.dragpos = wx.Point(0, 0)
self.zoom = 1.0
self.need_redraw = True
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser("mp_image.py <file>")
parser.add_option("--zoom", action='store_true', default=False, help="allow zoom")
parser.add_option("--drag", action='store_true', default=False, help="allow drag")
parser.add_option("--autosize", action='store_true', default=False, help="auto size window")
(opts, args) = parser.parse_args()
im = MPImage(mouse_events=True,
key_events=True,
can_drag = opts.drag,
can_zoom = opts.zoom,
auto_size = opts.autosize)
img = cv2.imread(args[0])
im.set_image(img, bgr=True)
while im.is_alive():
for event in im.events():
if isinstance(event, MPMenuItem):
print(event)
continue
print(event.ClassName)
if event.ClassName == 'wxMouseEvent':
print('mouse', event.X, event.Y)
if event.ClassName == 'wxKeyEvent':
print('key %u' % event.KeyCode)
time.sleep(0.1)
|
Dronecode/MAVProxy
|
MAVProxy/modules/lib/mp_image.py
|
Python
|
gpl-3.0
| 20,342
|
import fnmatch
import os
import re
import shutil
import sys
import uuid
from base import Step, StepRunner
from tree import Commit
here = os.path.abspath(os.path.split(__file__)[0])
bsd_license = """W3C 3-clause BSD License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of works must retain the original copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the W3C nor the names of its contributors may be
used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
def copy_wpt_tree(tree, dest, excludes=None, includes=None):
"""Copy the working copy of a Tree to a destination directory.
:param tree: The Tree to copy.
:param dest: The destination directory"""
if os.path.exists(dest):
assert os.path.isdir(dest)
shutil.rmtree(dest)
os.mkdir(dest)
if excludes is None:
excludes = []
excludes = [re.compile(fnmatch.translate(item)) for item in excludes]
if includes is None:
includes = []
includes = [re.compile(fnmatch.translate(item)) for item in includes]
for tree_path in tree.paths():
if (any(item.match(tree_path) for item in excludes) and
not any(item.match(tree_path) for item in includes)):
continue
source_path = os.path.join(tree.root, tree_path)
dest_path = os.path.join(dest, tree_path)
dest_dir = os.path.split(dest_path)[0]
if not os.path.isdir(source_path):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy2(source_path, dest_path)
for source, destination in [("testharness_runner.html", ""),
("testdriver-vendor.js", "resources/")]:
source_path = os.path.join(here, os.pardir, source)
dest_path = os.path.join(dest, destination, os.path.split(source)[1])
shutil.copy2(source_path, dest_path)
add_license(dest)
def add_license(dest):
"""Write the bsd license string to a LICENSE file.
:param dest: Directory in which to place the LICENSE file."""
with open(os.path.join(dest, "LICENSE"), "w") as f:
f.write(bsd_license)
class UpdateCheckout(Step):
"""Pull changes from upstream into the local sync tree."""
provides = ["local_branch"]
def create(self, state):
sync_tree = state.sync_tree
state.local_branch = uuid.uuid4().hex
sync_tree.update(state.sync["remote_url"],
state.sync["branch"],
state.local_branch)
sync_path = os.path.abspath(sync_tree.root)
if sync_path not in sys.path:
from update import setup_paths
setup_paths(sync_path)
def restore(self, state):
assert os.path.abspath(state.sync_tree.root) in sys.path
Step.restore(self, state)
class GetSyncTargetCommit(Step):
"""Find the commit that we will sync to."""
provides = ["sync_commit"]
def create(self, state):
if state.target_rev is None:
#Use upstream branch HEAD as the base commit
state.sync_commit = state.sync_tree.get_remote_sha1(state.sync["remote_url"],
state.sync["branch"])
else:
state.sync_commit = Commit(state.sync_tree, state.rev)
state.sync_tree.checkout(state.sync_commit.sha1, state.local_branch, force=True)
self.logger.debug("New base commit is %s" % state.sync_commit.sha1)
class LoadManifest(Step):
"""Load the test manifest"""
provides = ["manifest_path", "test_manifest"]
def create(self, state):
from manifest import manifest
state.manifest_path = os.path.join(state.metadata_path, "MANIFEST.json")
state.test_manifest = manifest.Manifest("/")
class UpdateManifest(Step):
"""Update the manifest to match the tests in the sync tree checkout"""
def create(self, state):
from manifest import manifest, update
update.update(state.sync["path"], state.test_manifest)
manifest.write(state.test_manifest, state.manifest_path)
class CopyWorkTree(Step):
"""Copy the sync tree over to the destination in the local tree"""
def create(self, state):
copy_wpt_tree(state.sync_tree,
state.tests_path,
excludes=state.path_excludes,
includes=state.path_includes)
class CreateSyncPatch(Step):
"""Add the updated test files to a commit/patch in the local tree."""
def create(self, state):
if not state.patch:
return
local_tree = state.local_tree
sync_tree = state.sync_tree
local_tree.create_patch("web-platform-tests_update_%s" % sync_tree.rev,
"Update %s to revision %s" % (state.suite_name, sync_tree.rev))
test_prefix = os.path.relpath(state.tests_path, local_tree.root)
local_tree.add_new(test_prefix)
local_tree.add_ignored(sync_tree, test_prefix)
updated = local_tree.update_patch(include=[state.tests_path,
state.metadata_path])
local_tree.commit_patch()
if not updated:
self.logger.info("Nothing to sync")
class SyncFromUpstreamRunner(StepRunner):
"""(Sub)Runner for doing an upstream sync"""
steps = [UpdateCheckout,
GetSyncTargetCommit,
LoadManifest,
UpdateManifest,
CopyWorkTree,
CreateSyncPatch]
|
peterjoel/servo
|
tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/update/sync.py
|
Python
|
mpl-2.0
| 6,707
|
"""
Script for exporting all courseware from Mongo to a directory and listing the courses which failed to export
"""
from django.core.management.base import BaseCommand
from six import text_type
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml_exporter import export_course_to_xml
class Command(BaseCommand):
"""
Export all courses from mongo to the specified data directory and list the courses which failed to export
"""
help = 'Export all courses from mongo to the specified data directory and list the courses which failed to export'
def add_arguments(self, parser):
parser.add_argument('output_path')
def handle(self, *args, **options):
"""
Execute the command
"""
courses, failed_export_courses = export_courses_to_output_path(options['output_path'])
print("=" * 80)
print("=" * 30 + "> Export summary")
print(u"Total number of courses to export: {0}".format(len(courses)))
print(u"Total number of courses which failed to export: {0}".format(len(failed_export_courses)))
print("List of export failed courses ids:")
print("\n".join(failed_export_courses))
print("=" * 80)
def export_courses_to_output_path(output_path):
"""
Export all courses to target directory and return the list of courses which failed to export
"""
content_store = contentstore()
module_store = modulestore()
root_dir = output_path
courses = module_store.get_courses()
course_ids = [x.id for x in courses]
failed_export_courses = []
for course_id in course_ids:
print("-" * 80)
print(u"Exporting course id = {0} to {1}".format(course_id, output_path))
try:
course_dir = text_type(course_id).replace('/', '...')
export_course_to_xml(module_store, content_store, course_id, root_dir, course_dir)
except Exception as err: # pylint: disable=broad-except
failed_export_courses.append(text_type(course_id))
print(u"=" * 30 + u"> Oops, failed to export {0}".format(course_id))
print("Error:")
print(err)
return courses, failed_export_courses
|
stvstnfrd/edx-platform
|
cms/djangoapps/contentstore/management/commands/export_all_courses.py
|
Python
|
agpl-3.0
| 2,278
|
from funtests import transport
class test_redis(transport.TransportCase):
transport = "redis"
prefix = "redis"
def after_connect(self, connection):
client = connection.channel().client
client.info()
def test_cant_connect_raises_connection_error(self):
conn = self.get_connection(port=65534)
self.assertRaises(conn.connection_errors, conn.connect)
|
mzdaniel/oh-mainline
|
vendor/packages/kombu/funtests/tests/test_redis.py
|
Python
|
agpl-3.0
| 399
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Daniel Reis
# 2011
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
from datetime import datetime
from openerp.osv import orm, fields
import logging
_logger = logging.getLogger(__name__)
_loglvl = _logger.getEffectiveLevel()
SEP = '|'
class import_odbc_dbtable(orm.Model):
_name = "import.odbc.dbtable"
_description = 'Import Table Data'
_order = 'exec_order'
_columns = {
'name': fields.char('Datasource name', required=True, size=64),
'enabled': fields.boolean('Execution enabled'),
'dbsource_id': fields.many2one('base.external.dbsource', 'Database source', required=True),
'sql_source': fields.text('SQL', required=True, help='Column names must be valid "import_data" columns.'),
'model_target': fields.many2one('ir.model', 'Target object'),
'noupdate': fields.boolean('No updates', help="Only create new records; disable updates to existing records."),
'exec_order': fields.integer('Execution order', help="Defines the order to perform the import"),
'last_sync': fields.datetime('Last sync date',
help="Datetime for the last succesfull sync."
"\nLater changes on the source may not be replicated on the destination"),
'start_run': fields.datetime('Time started', readonly=True),
'last_run': fields.datetime('Time ended', readonly=True),
'last_record_count': fields.integer('Last record count', readonly=True),
'last_error_count': fields.integer('Last error count', readonly=True),
'last_warn_count': fields.integer('Last warning count', readonly=True),
'last_log': fields.text('Last run log', readonly=True),
'ignore_rel_errors': fields.boolean('Ignore relationship errors',
help="On error try to reimport rows ignoring relationships."),
'raise_import_errors': fields.boolean('Raise import errors',
help="Import errors not handled, intended for debugging purposes."
"\nAlso forces debug messages to be written to the server log."),
}
_defaults = {
'enabled': True,
'exec_order': 10,
}
def _import_data(self, cr, uid, flds, data, model_obj, table_obj, log):
"""Import data and returns error msg or empty string"""
def find_m2o(field_list):
""""Find index of first column with a one2many field"""
for i, x in enumerate(field_list):
if len(x) > 3 and x[-3:] == ':id' or x[-3:] == '/id':
return i
return -1
def append_to_log(log, level, obj_id='', msg='', rel_id=''):
if '_id_' in obj_id:
obj_id = '.'.join(obj_id.split('_')[:-2]) + ': ' + obj_id.split('_')[-1]
if ': .' in msg and not rel_id:
rel_id = msg[msg.find(': .')+3:]
if '_id_' in rel_id:
rel_id = '.'.join(rel_id.split('_')[:-2]) + ': ' + rel_id.split('_')[-1]
msg = msg[:msg.find(': .')]
log['last_log'].append('%s|%s\t|%s\t|%s' % (level.ljust(5), obj_id, rel_id, msg))
_logger.debug(data)
cols = list(flds) # copy to avoid side effects
errmsg = str()
if table_obj.raise_import_errors:
model_obj.import_data(cr, uid, cols, [data], noupdate=table_obj.noupdate)
else:
try:
model_obj.import_data(cr, uid, cols, [data], noupdate=table_obj.noupdate)
except:
errmsg = str(sys.exc_info()[1])
if errmsg and not table_obj.ignore_rel_errors:
#Fail
append_to_log(log, 'ERROR', data, errmsg)
log['last_error_count'] += 1
return False
if errmsg and table_obj.ignore_rel_errors:
#Warn and retry ignoring many2one fields...
append_to_log(log, 'WARN', data, errmsg)
log['last_warn_count'] += 1
#Try ignoring each many2one (tip: in the SQL sentence select more problematic FKs first)
i = find_m2o(cols)
if i >= 0:
#Try again without the [i] column
del cols[i]
del data[i]
self._import_data(cr, uid, cols, data, model_obj, table_obj, log)
else:
#Fail
append_to_log(log, 'ERROR', data, 'Removed all m2o keys and still fails.')
log['last_error_count'] += 1
return False
return True
def import_run(self, cr, uid, ids=None, context=None):
db_model = self.pool.get('base.external.dbsource')
actions = self.read(cr, uid, ids, ['id', 'exec_order'])
actions.sort(key=lambda x: (x['exec_order'], x['id']))
#Consider each dbtable:
for action_ref in actions:
obj = self.browse(cr, uid, action_ref['id'])
if not obj.enabled:
continue # skip
_logger.setLevel(obj.raise_import_errors and logging.DEBUG or _loglvl)
_logger.debug('Importing %s...' % obj.name)
#now() microseconds are stripped to avoid problem with SQL smalldate
#TODO: convert UTC Now to local timezone
#http://stackoverflow.com/questions/4770297/python-convert-utc-datetime-string-to-local-datetime
model_name = obj.model_target.model
model_obj = self.pool.get(model_name)
xml_prefix = model_name.replace('.', '_') + "_id_"
log = {'start_run': datetime.now().replace(microsecond=0),
'last_run': None,
'last_record_count': 0,
'last_error_count': 0,
'last_warn_count': 0,
'last_log': list()}
self.write(cr, uid, [obj.id], log)
#Prepare SQL sentence; replace "%s" with the last_sync date
if obj.last_sync:
sync = datetime.strptime(obj.last_sync, "%Y-%m-%d %H:%M:%S")
else:
sync = datetime.datetime(1900, 1, 1, 0, 0, 0)
params = {'sync': sync}
res = db_model.execute(cr, uid, [obj.dbsource_id.id],
obj.sql_source, params, metadata=True)
#Exclude columns titled "None"; add (xml_)"id" column
cidx = [i for i, x in enumerate(res['cols']) if x.upper() != 'NONE']
cols = [x for i, x in enumerate(res['cols']) if x.upper() != 'NONE'] + ['id']
#Import each row:
for row in res['rows']:
#Build data row; import only columns present in the "cols" list
data = list()
for i in cidx:
#TODO: Handle imported datetimes properly - convert from localtime to UTC!
v = row[i]
if isinstance(v, str):
v = v.strip()
data.append(v)
data.append(xml_prefix + str(row[0]).strip())
#Import the row; on error, write line to the log
log['last_record_count'] += 1
self._import_data(cr, uid, cols, data, model_obj, obj, log)
if log['last_record_count'] % 500 == 0:
_logger.info('...%s rows processed...' % (log['last_record_count']))
#Finished importing all rows
#If no errors, write new sync date
if not (log['last_error_count'] or log['last_warn_count']):
log['last_sync'] = log['start_run']
level = logging.DEBUG
if log['last_warn_count']:
level = logging.WARN
if log['last_error_count']:
level = logging.ERROR
_logger.log(level, 'Imported %s , %d rows, %d errors, %d warnings.' % (
model_name, log['last_record_count'], log['last_error_count'],
log['last_warn_count']))
#Write run log, either if the table import is active or inactive
if log['last_log']:
log['last_log'].insert(0, 'LEVEL|== Line == |== Relationship ==|== Message ==')
log.update({'last_log': '\n'.join(log['last_log'])})
log.update({'last_run': datetime.now().replace(microsecond=0)})
self.write(cr, uid, [obj.id], log)
#Finished
_logger.debug('Import job FINISHED.')
return True
def import_schedule(self, cr, uid, ids, context=None):
cron_obj = self.pool.get('ir.cron')
new_create_id = cron_obj.create(cr, uid, {
'name': 'Import ODBC tables',
'interval_type': 'hours',
'interval_number': 1,
'numbercall': -1,
'model': 'import.odbc.dbtable',
'function': 'import_run',
'doall': False,
'active': True
})
return {
'name': 'Import ODBC tables',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'ir.cron',
'res_id': new_create_id,
'type': 'ir.actions.act_window',
}
#EOF
|
hbrunn/server-tools
|
__unported__/import_odbc/import_odbc.py
|
Python
|
agpl-3.0
| 10,315
|
#!/usr/bin/env python
"""Create a "virtual" Python installation
"""
# If you change the version here, change it in setup.py
# and docs/conf.py as well.
virtualenv_version = "1.7.1.2"
import base64
import sys
import os
import optparse
import re
import shutil
import logging
import tempfile
import zlib
import errno
import distutils.sysconfig
from distutils.util import strtobool
try:
import subprocess
except ImportError:
if sys.version_info <= (2, 3):
print('ERROR: %s' % sys.exc_info()[1])
print('ERROR: this script requires Python 2.4 or greater; or at least the subprocess module.')
print('If you copy subprocess.py from a newer version of Python this script will probably work')
sys.exit(101)
else:
raise
try:
set
except NameError:
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
join = os.path.join
py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_win = (sys.platform == 'win32')
abiflags = getattr(sys, 'abiflags', '')
user_dir = os.path.expanduser('~')
if sys.platform == 'win32':
user_dir = os.environ.get('APPDATA', user_dir) # Use %APPDATA% for roaming
default_storage_dir = os.path.join(user_dir, 'virtualenv')
else:
default_storage_dir = os.path.join(user_dir, '.virtualenv')
default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini')
if is_pypy:
expected_exe = 'pypy'
elif is_jython:
expected_exe = 'jython'
else:
expected_exe = 'python'
REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath',
'fnmatch', 'locale', 'encodings', 'codecs',
'stat', 'UserDict', 'readline', 'copy_reg', 'types',
're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile',
'zlib']
REQUIRED_FILES = ['lib-dynload', 'config']
majver, minver = sys.version_info[:2]
if majver == 2:
if minver >= 6:
REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc'])
if minver >= 7:
REQUIRED_MODULES.extend(['_weakrefset'])
if minver <= 3:
REQUIRED_MODULES.extend(['sets', '__future__'])
elif majver == 3:
# Some extra modules are needed for Python 3, but different ones
# for different versions.
REQUIRED_MODULES.extend(['_abcoll', 'warnings', 'linecache', 'abc', 'io',
'_weakrefset', 'copyreg', 'tempfile', 'random',
'__future__', 'collections', 'keyword', 'tarfile',
'shutil', 'struct', 'copy'])
if minver >= 2:
REQUIRED_FILES[-1] = 'config-%s' % majver
if minver == 3:
# The whole list of 3.3 modules is reproduced below - the current
# uncommented ones are required for 3.3 as of now, but more may be
# added as 3.3 development continues.
REQUIRED_MODULES.extend([
#"aifc",
#"antigravity",
#"argparse",
#"ast",
#"asynchat",
#"asyncore",
"base64",
#"bdb",
#"binhex",
"bisect",
#"calendar",
#"cgi",
#"cgitb",
#"chunk",
#"cmd",
#"codeop",
#"code",
#"colorsys",
#"_compat_pickle",
#"compileall",
#"concurrent",
#"configparser",
#"contextlib",
#"cProfile",
#"crypt",
#"csv",
#"ctypes",
#"curses",
#"datetime",
#"dbm",
#"decimal",
#"difflib",
#"dis",
#"doctest",
#"dummy_threading",
"_dummy_thread",
#"email",
#"filecmp",
#"fileinput",
#"formatter",
#"fractions",
#"ftplib",
#"functools",
#"getopt",
#"getpass",
#"gettext",
#"glob",
#"gzip",
"hashlib",
"heapq",
"hmac",
#"html",
#"http",
#"idlelib",
#"imaplib",
#"imghdr",
#"importlib",
#"inspect",
#"json",
#"lib2to3",
#"logging",
#"macpath",
#"macurl2path",
#"mailbox",
#"mailcap",
#"_markupbase",
#"mimetypes",
#"modulefinder",
#"multiprocessing",
#"netrc",
#"nntplib",
#"nturl2path",
#"numbers",
#"opcode",
#"optparse",
#"os2emxpath",
#"pdb",
#"pickle",
#"pickletools",
#"pipes",
#"pkgutil",
#"platform",
#"plat-linux2",
#"plistlib",
#"poplib",
#"pprint",
#"profile",
#"pstats",
#"pty",
#"pyclbr",
#"py_compile",
#"pydoc_data",
#"pydoc",
#"_pyio",
#"queue",
#"quopri",
"reprlib",
"rlcompleter",
#"runpy",
#"sched",
#"shelve",
#"shlex",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"socketserver",
#"sqlite3",
#"ssl",
#"stringprep",
#"string",
#"_strptime",
#"subprocess",
#"sunau",
#"symbol",
#"symtable",
#"sysconfig",
#"tabnanny",
#"telnetlib",
#"test",
#"textwrap",
#"this",
#"_threading_local",
#"threading",
#"timeit",
#"tkinter",
#"tokenize",
#"token",
#"traceback",
#"trace",
#"tty",
#"turtledemo",
#"turtle",
#"unittest",
#"urllib",
#"uuid",
#"uu",
#"wave",
"weakref",
#"webbrowser",
#"wsgiref",
#"xdrlib",
#"xml",
#"xmlrpc",
#"zipfile",
])
if is_pypy:
# these are needed to correctly display the exceptions that may happen
# during the bootstrap
REQUIRED_MODULES.extend(['traceback', 'linecache'])
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' '*self.indent + rendered
if hasattr(consumer, 'write'):
consumer.write(rendered+'\n')
else:
consumer(rendered)
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self.level_matches(self.NOTIFY, self._stdout_level()):
sys.stdout.write(msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self.stdout_level_matches(self.NOTIFY):
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
sys.stdout.write('.')
sys.stdout.flush()
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None and stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
#@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
level_for_integer = classmethod(level_for_integer)
# create a silent logger just to prevent this from being undefined
# will be overridden with requested verbosity main() is called.
logger = Logger([(Logger.LEVELS[-1], sys.stdout)])
def mkdir(path):
if not os.path.exists(path):
logger.info('Creating %s', path)
os.makedirs(path)
else:
logger.info('Directory %s already exists', path)
def copyfileordir(src, dest):
if os.path.isdir(src):
shutil.copytree(src, dest, True)
else:
shutil.copy2(src, dest)
def copyfile(src, dest, symlink=True):
if not os.path.exists(src):
# Some bad symlink in the src
logger.warn('Cannot find file %s (bad symlink)', src)
return
if os.path.exists(dest):
logger.debug('File %s already exists', dest)
return
if not os.path.exists(os.path.dirname(dest)):
logger.info('Creating parent directories for %s' % os.path.dirname(dest))
os.makedirs(os.path.dirname(dest))
if not os.path.islink(src):
srcpath = os.path.abspath(src)
else:
srcpath = os.readlink(src)
if symlink and hasattr(os, 'symlink') and not is_win:
logger.info('Symlinking %s', dest)
try:
os.symlink(srcpath, dest)
except (OSError, NotImplementedError):
logger.info('Symlinking failed, copying to %s', dest)
copyfileordir(src, dest)
else:
logger.info('Copying to %s', dest)
copyfileordir(src, dest)
def writefile(dest, content, overwrite=True):
if not os.path.exists(dest):
logger.info('Writing %s', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
return
else:
f = open(dest, 'rb')
c = f.read()
f.close()
if c != content:
if not overwrite:
logger.notify('File %s exists with different content; not overwriting', dest)
return
logger.notify('Overwriting %s with new content', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
else:
logger.info('Content %s already in place', dest)
def rmtree(dir):
if os.path.exists(dir):
logger.notify('Deleting tree %s', dir)
shutil.rmtree(dir)
else:
logger.info('Do not need to delete %s; already gone', dir)
def make_exe(fn):
if hasattr(os, 'chmod'):
oldmode = os.stat(fn).st_mode & 0xFFF # 0o7777
newmode = (oldmode | 0x16D) & 0xFFF # 0o555, 0o7777
os.chmod(fn, newmode)
logger.info('Changed mode of %s to %s', fn, oct(newmode))
def _find_file(filename, dirs):
for dir in reversed(dirs):
if os.path.exists(join(dir, filename)):
return join(dir, filename)
return filename
def _install_req(py_executable, unzip=False, distribute=False,
search_dirs=None, never_download=False):
if search_dirs is None:
search_dirs = file_search_dirs()
if not distribute:
setup_fn = 'setuptools-0.6c11-py%s.egg' % sys.version[:3]
project_name = 'setuptools'
bootstrap_script = EZ_SETUP_PY
source = None
else:
setup_fn = None
source = 'distribute-0.6.24.tar.gz'
project_name = 'distribute'
bootstrap_script = DISTRIBUTE_SETUP_PY
if setup_fn is not None:
setup_fn = _find_file(setup_fn, search_dirs)
if source is not None:
source = _find_file(source, search_dirs)
if is_jython and os._name == 'nt':
# Jython's .bat sys.executable can't handle a command line
# argument with newlines
fd, ez_setup = tempfile.mkstemp('.py')
os.write(fd, bootstrap_script)
os.close(fd)
cmd = [py_executable, ez_setup]
else:
cmd = [py_executable, '-c', bootstrap_script]
if unzip:
cmd.append('--always-unzip')
env = {}
remove_from_env = []
if logger.stdout_level_matches(logger.DEBUG):
cmd.append('-v')
old_chdir = os.getcwd()
if setup_fn is not None and os.path.exists(setup_fn):
logger.info('Using existing %s egg: %s' % (project_name, setup_fn))
cmd.append(setup_fn)
if os.environ.get('PYTHONPATH'):
env['PYTHONPATH'] = setup_fn + os.path.pathsep + os.environ['PYTHONPATH']
else:
env['PYTHONPATH'] = setup_fn
else:
# the source is found, let's chdir
if source is not None and os.path.exists(source):
logger.info('Using existing %s egg: %s' % (project_name, source))
os.chdir(os.path.dirname(source))
# in this case, we want to be sure that PYTHONPATH is unset (not
# just empty, really unset), else CPython tries to import the
# site.py that it's in virtualenv_support
remove_from_env.append('PYTHONPATH')
else:
if never_download:
logger.fatal("Can't find any local distributions of %s to install "
"and --never-download is set. Either re-run virtualenv "
"without the --never-download option, or place a %s "
"distribution (%s) in one of these "
"locations: %r" % (project_name, project_name,
setup_fn or source,
search_dirs))
sys.exit(1)
logger.info('No %s egg found; downloading' % project_name)
cmd.extend(['--always-copy', '-U', project_name])
logger.start_progress('Installing %s...' % project_name)
logger.indent += 2
cwd = None
if project_name == 'distribute':
env['DONT_PATCH_SETUPTOOLS'] = 'true'
def _filter_ez_setup(line):
return filter_ez_setup(line, project_name)
if not os.access(os.getcwd(), os.W_OK):
cwd = tempfile.mkdtemp()
if source is not None and os.path.exists(source):
# the current working dir is hostile, let's copy the
# tarball to a temp dir
target = os.path.join(cwd, os.path.split(source)[-1])
shutil.copy(source, target)
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_ez_setup,
extra_env=env,
remove_from_env=remove_from_env,
cwd=cwd)
finally:
logger.indent -= 2
logger.end_progress()
if os.getcwd() != old_chdir:
os.chdir(old_chdir)
if is_jython and os._name == 'nt':
os.remove(ez_setup)
def file_search_dirs():
here = os.path.dirname(os.path.abspath(__file__))
dirs = ['.', here,
join(here, 'virtualenv_support')]
if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv':
# Probably some boot script; just in case virtualenv is installed...
try:
import virtualenv
except ImportError:
pass
else:
dirs.append(os.path.join(os.path.dirname(virtualenv.__file__), 'virtualenv_support'))
return [d for d in dirs if os.path.isdir(d)]
def install_setuptools(py_executable, unzip=False,
search_dirs=None, never_download=False):
_install_req(py_executable, unzip,
search_dirs=search_dirs, never_download=never_download)
def install_distribute(py_executable, unzip=False,
search_dirs=None, never_download=False):
_install_req(py_executable, unzip, distribute=True,
search_dirs=search_dirs, never_download=never_download)
_pip_re = re.compile(r'^pip-.*(zip|tar.gz|tar.bz2|tgz|tbz)$', re.I)
def install_pip(py_executable, search_dirs=None, never_download=False):
if search_dirs is None:
search_dirs = file_search_dirs()
filenames = []
for dir in search_dirs:
filenames.extend([join(dir, fn) for fn in os.listdir(dir)
if _pip_re.search(fn)])
filenames = [(os.path.basename(filename).lower(), i, filename) for i, filename in enumerate(filenames)]
filenames.sort()
filenames = [filename for basename, i, filename in filenames]
if not filenames:
filename = 'pip'
else:
filename = filenames[-1]
easy_install_script = 'easy_install'
if sys.platform == 'win32':
easy_install_script = 'easy_install-script.py'
cmd = [join(os.path.dirname(py_executable), easy_install_script), filename]
if sys.platform == 'win32':
cmd.insert(0, py_executable)
if filename == 'pip':
if never_download:
logger.fatal("Can't find any local distributions of pip to install "
"and --never-download is set. Either re-run virtualenv "
"without the --never-download option, or place a pip "
"source distribution (zip/tar.gz/tar.bz2) in one of these "
"locations: %r" % search_dirs)
sys.exit(1)
logger.info('Installing pip from network...')
else:
logger.info('Installing existing %s distribution: %s' % (
os.path.basename(filename), filename))
logger.start_progress('Installing pip...')
logger.indent += 2
def _filter_setup(line):
return filter_ez_setup(line, 'pip')
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_setup)
finally:
logger.indent -= 2
logger.end_progress()
def filter_ez_setup(line, project_name='setuptools'):
if not line.strip():
return Logger.DEBUG
if project_name == 'distribute':
for prefix in ('Extracting', 'Now working', 'Installing', 'Before',
'Scanning', 'Setuptools', 'Egg', 'Already',
'running', 'writing', 'reading', 'installing',
'creating', 'copying', 'byte-compiling', 'removing',
'Processing'):
if line.startswith(prefix):
return Logger.DEBUG
return Logger.DEBUG
for prefix in ['Reading ', 'Best match', 'Processing setuptools',
'Copying setuptools', 'Adding setuptools',
'Installing ', 'Installed ']:
if line.startswith(prefix):
return Logger.DEBUG
return Logger.INFO
class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter):
"""
Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing
"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class ConfigOptionParser(optparse.OptionParser):
"""
Custom option parser which updates its defaults by by checking the
configuration files and environmental variables
"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.files = self.get_config_files()
self.config.read(self.files)
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('VIRTUALENV_CONFIG_FILE', False)
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def update_defaults(self, defaults):
"""
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
"""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
config.update(dict(self.get_config_section('virtualenv')))
# 2. environmental variables
config.update(dict(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occured during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
def get_config_section(self, name):
"""
Get a section of a configuration
"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='VIRTUALENV_'):
"""
Returns a generator with all environmental vars with prefix VIRTUALENV
"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""
Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work.
"""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, basestring):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def main():
parser = ConfigOptionParser(
version=virtualenv_version,
usage="%prog [OPTIONS] DEST_DIR",
formatter=UpdatingDefaultsHelpFormatter())
parser.add_option(
'-v', '--verbose',
action='count',
dest='verbose',
default=0,
help="Increase verbosity")
parser.add_option(
'-q', '--quiet',
action='count',
dest='quiet',
default=0,
help='Decrease verbosity')
parser.add_option(
'-p', '--python',
dest='python',
metavar='PYTHON_EXE',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable)
parser.add_option(
'--clear',
dest='clear',
action='store_true',
help="Clear out the non-root install and start from scratch")
parser.add_option(
'--no-site-packages',
dest='no_site_packages',
action='store_true',
help="Don't give access to the global site-packages dir to the "
"virtual environment")
parser.add_option(
'--system-site-packages',
dest='system_site_packages',
action='store_true',
help="Give access to the global site-packages dir to the "
"virtual environment")
parser.add_option(
'--unzip-setuptools',
dest='unzip_setuptools',
action='store_true',
help="Unzip Setuptools or Distribute when installing it")
parser.add_option(
'--relocatable',
dest='relocatable',
action='store_true',
help='Make an EXISTING virtualenv environment relocatable. '
'This fixes up scripts and makes all .pth files relative')
parser.add_option(
'--distribute',
dest='use_distribute',
action='store_true',
help='Use Distribute instead of Setuptools. Set environ variable '
'VIRTUALENV_DISTRIBUTE to make it the default ')
default_search_dirs = file_search_dirs()
parser.add_option(
'--extra-search-dir',
dest="search_dirs",
action="append",
default=default_search_dirs,
help="Directory to look for setuptools/distribute/pip distributions in. "
"You can add any number of additional --extra-search-dir paths.")
parser.add_option(
'--never-download',
dest="never_download",
action="store_true",
help="Never download anything from the network. Instead, virtualenv will fail "
"if local distributions of setuptools/distribute/pip are not present.")
parser.add_option(
'--prompt=',
dest='prompt',
help='Provides an alternative prompt prefix for this environment')
if 'extend_parser' in globals():
extend_parser(parser)
options, args = parser.parse_args()
global logger
if 'adjust_options' in globals():
adjust_options(options, args)
verbosity = options.verbose - options.quiet
logger = Logger([(Logger.level_for_integer(2-verbosity), sys.stdout)])
if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
env = os.environ.copy()
interpreter = resolve_interpreter(options.python)
if interpreter == sys.executable:
logger.warn('Already using interpreter %s' % interpreter)
else:
logger.notify('Running virtualenv with interpreter %s' % interpreter)
env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true'
file = __file__
if file.endswith('.pyc'):
file = file[:-1]
popen = subprocess.Popen([interpreter, file] + sys.argv[1:], env=env)
raise SystemExit(popen.wait())
# Force --distribute on Python 3, since setuptools is not available.
if majver > 2:
options.use_distribute = True
if os.environ.get('PYTHONDONTWRITEBYTECODE') and not options.use_distribute:
print(
"The PYTHONDONTWRITEBYTECODE environment variable is "
"not compatible with setuptools. Either use --distribute "
"or unset PYTHONDONTWRITEBYTECODE.")
sys.exit(2)
if not args:
print('You must provide a DEST_DIR')
parser.print_help()
sys.exit(2)
if len(args) > 1:
print('There must be only one argument: DEST_DIR (you gave %s)' % (
' '.join(args)))
parser.print_help()
sys.exit(2)
home_dir = args[0]
if os.environ.get('WORKING_ENV'):
logger.fatal('ERROR: you cannot run virtualenv while in a workingenv')
logger.fatal('Please deactivate your workingenv, then re-run this script')
sys.exit(3)
if 'PYTHONHOME' in os.environ:
logger.warn('PYTHONHOME is set. You *must* activate the virtualenv before using it')
del os.environ['PYTHONHOME']
if options.relocatable:
make_environment_relocatable(home_dir)
return
if options.no_site_packages:
logger.warn('The --no-site-packages flag is deprecated; it is now '
'the default behavior.')
create_environment(home_dir,
site_packages=options.system_site_packages,
clear=options.clear,
unzip_setuptools=options.unzip_setuptools,
use_distribute=options.use_distribute,
prompt=options.prompt,
search_dirs=options.search_dirs,
never_download=options.never_download)
if 'after_install' in globals():
after_install(options, home_dir)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True, extra_env=None,
remove_from_env=None):
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20]+"..."+part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
if hasattr(part, 'decode'):
try:
part = part.decode(sys.getdefaultencoding())
except UnicodeDecodeError:
part = part.decode(sys.getfilesystemencoding())
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command %s" % cmd_desc)
if extra_env or remove_from_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
if remove_from_env:
for varname in remove_from_env:
env.pop(varname, None)
else:
env = None
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, cmd_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
encoding = sys.getdefaultencoding()
fs_encoding = sys.getfilesystemencoding()
while 1:
line = stdout.readline()
try:
line = line.decode(encoding)
except UnicodeDecodeError:
line = line.decode(fs_encoding)
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
proc.communicate()
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % cmd_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise OSError(
"Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (cmd_desc, proc.returncode))
def create_environment(home_dir, site_packages=False, clear=False,
unzip_setuptools=False, use_distribute=False,
prompt=None, search_dirs=None, never_download=False):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear))
install_distutils(home_dir)
# use_distribute also is True if VIRTUALENV_DISTRIBUTE env var is set
# we also check VIRTUALENV_USE_DISTRIBUTE for backwards compatibility
if use_distribute or os.environ.get('VIRTUALENV_USE_DISTRIBUTE'):
install_distribute(py_executable, unzip=unzip_setuptools,
search_dirs=search_dirs, never_download=never_download)
else:
install_setuptools(py_executable, unzip=unzip_setuptools,
search_dirs=search_dirs, never_download=never_download)
install_pip(py_executable, search_dirs=search_dirs, never_download=never_download)
install_activate(home_dir, bin_dir, prompt)
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if sys.platform == 'win32':
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
try:
import win32api
except ImportError:
print('Error: the path "%s" has a space in it' % home_dir)
print('To handle these kinds of paths, the win32api module must be installed:')
print(' http://sourceforge.net/projects/pywin32/')
sys.exit(3)
home_dir = win32api.GetShortPathName(home_dir)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
elif is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
elif is_pypy:
lib_dir = home_dir
inc_dir = join(home_dir, 'include')
bin_dir = join(home_dir, 'bin')
else:
lib_dir = join(home_dir, 'lib', py_version)
inc_dir = join(home_dir, 'include', py_version + abiflags)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
def change_prefix(filename, dst_prefix):
prefixes = [sys.prefix]
if sys.platform == "darwin":
prefixes.extend((
os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(sys.prefix, "Extras", "lib", "python"),
os.path.join("~", "Library", "Python", sys.version[:3], "site-packages")))
if hasattr(sys, 'real_prefix'):
prefixes.append(sys.real_prefix)
prefixes = list(map(os.path.abspath, prefixes))
filename = os.path.abspath(filename)
for src_prefix in prefixes:
if filename.startswith(src_prefix):
_, relpath = filename.split(src_prefix, 1)
assert relpath[0] == os.sep
relpath = relpath[1:]
return join(dst_prefix, relpath)
assert False, "Filename %s does not start with any of these prefixes: %s" % \
(filename, prefixes)
def copy_required_modules(dst_prefix):
import imp
# If we are running under -p, we need to remove the current
# directory from sys.path temporarily here, so that we
# definitely get the modules from the site directory of
# the interpreter we are running under, not the one
# virtualenv.py is installed under (which might lead to py2/py3
# incompatibility issues)
_prev_sys_path = sys.path
if os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
sys.path = sys.path[1:]
try:
for modname in REQUIRED_MODULES:
if modname in sys.builtin_module_names:
logger.info("Ignoring built-in bootstrap module: %s" % modname)
continue
try:
f, filename, _ = imp.find_module(modname)
except ImportError:
logger.info("Cannot import bootstrap module: %s" % modname)
else:
if f is not None:
f.close()
dst_filename = change_prefix(filename, dst_prefix)
copyfile(filename, dst_filename)
if filename.endswith('.pyc'):
pyfile = filename[:-1]
if os.path.exists(pyfile):
copyfile(pyfile, dst_filename[:-1])
finally:
sys.path = _prev_sys_path
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear):
"""Install just the base environment, no distutils patches etc"""
if sys.executable.startswith(bin_dir):
print('Please use the *system* python to run this script')
return
if clear:
rmtree(lib_dir)
## FIXME: why not delete it?
## Maybe it should delete everything with #!/path/to/venv/python in it
logger.notify('Not deleting %s', bin_dir)
if hasattr(sys, 'real_prefix'):
logger.notify('Using real prefix %r' % sys.real_prefix)
prefix = sys.real_prefix
else:
prefix = sys.prefix
mkdir(lib_dir)
fix_lib64(lib_dir)
stdlib_dirs = [os.path.dirname(os.__file__)]
if sys.platform == 'win32':
stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs'))
elif sys.platform == 'darwin':
stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages'))
if hasattr(os, 'symlink'):
logger.info('Symlinking Python bootstrap modules')
else:
logger.info('Copying Python bootstrap modules')
logger.indent += 2
try:
# copy required files...
for stdlib_dir in stdlib_dirs:
if not os.path.isdir(stdlib_dir):
continue
for fn in os.listdir(stdlib_dir):
bn = os.path.splitext(fn)[0]
if fn != 'site-packages' and bn in REQUIRED_FILES:
copyfile(join(stdlib_dir, fn), join(lib_dir, fn))
# ...and modules
copy_required_modules(home_dir)
finally:
logger.indent -= 2
mkdir(join(lib_dir, 'site-packages'))
import site
site_filename = site.__file__
if site_filename.endswith('.pyc'):
site_filename = site_filename[:-1]
elif site_filename.endswith('$py.class'):
site_filename = site_filename.replace('$py.class', '.py')
site_filename_dst = change_prefix(site_filename, home_dir)
site_dir = os.path.dirname(site_filename_dst)
writefile(site_filename_dst, SITE_PY)
writefile(join(site_dir, 'orig-prefix.txt'), prefix)
site_packages_filename = join(site_dir, 'no-global-site-packages.txt')
if not site_packages:
writefile(site_packages_filename, '')
else:
if os.path.exists(site_packages_filename):
logger.info('Deleting %s' % site_packages_filename)
os.unlink(site_packages_filename)
if is_pypy or is_win:
stdinc_dir = join(prefix, 'include')
else:
stdinc_dir = join(prefix, 'include', py_version + abiflags)
if os.path.exists(stdinc_dir):
copyfile(stdinc_dir, inc_dir)
else:
logger.debug('No include dir %s' % stdinc_dir)
# pypy never uses exec_prefix, just ignore it
if sys.exec_prefix != prefix and not is_pypy:
if sys.platform == 'win32':
exec_dir = join(sys.exec_prefix, 'lib')
elif is_jython:
exec_dir = join(sys.exec_prefix, 'Lib')
else:
exec_dir = join(sys.exec_prefix, 'lib', py_version)
for fn in os.listdir(exec_dir):
copyfile(join(exec_dir, fn), join(lib_dir, fn))
if is_jython:
# Jython has either jython-dev.jar and javalib/ dir, or just
# jython.jar
for name in 'jython-dev.jar', 'javalib', 'jython.jar':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(home_dir, name))
# XXX: registry should always exist after Jython 2.5rc1
src = join(prefix, 'registry')
if os.path.exists(src):
copyfile(src, join(home_dir, 'registry'), symlink=False)
copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'),
symlink=False)
mkdir(bin_dir)
py_executable = join(bin_dir, os.path.basename(sys.executable))
if 'Python.framework' in prefix:
if re.search(r'/Python(?:-32|-64)*$', py_executable):
# The name of the python executable is not quite what
# we want, rename it.
py_executable = os.path.join(
os.path.dirname(py_executable), 'python')
logger.notify('New %s executable in %s', expected_exe, py_executable)
pcbuild_dir = os.path.dirname(sys.executable)
pyd_pth = os.path.join(lib_dir, 'site-packages', 'virtualenv_builddir_pyd.pth')
if is_win and os.path.exists(os.path.join(pcbuild_dir, 'build.bat')):
logger.notify('Detected python running from build directory %s', pcbuild_dir)
logger.notify('Writing .pth file linking to build directory for *.pyd files')
writefile(pyd_pth, pcbuild_dir)
else:
pcbuild_dir = None
if os.path.exists(pyd_pth):
logger.info('Deleting %s (not Windows env or not build directory python)' % pyd_pth)
os.unlink(pyd_pth)
if sys.executable != py_executable:
## FIXME: could I just hard link?
executable = sys.executable
if sys.platform == 'cygwin' and os.path.exists(executable + '.exe'):
# Cygwin misreports sys.executable sometimes
executable += '.exe'
py_executable += '.exe'
logger.info('Executable actually exists in %s' % executable)
shutil.copyfile(executable, py_executable)
make_exe(py_executable)
if sys.platform == 'win32' or sys.platform == 'cygwin':
pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe')
if os.path.exists(pythonw):
logger.info('Also created pythonw.exe')
shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe'))
python_d = os.path.join(os.path.dirname(sys.executable), 'python_d.exe')
python_d_dest = os.path.join(os.path.dirname(py_executable), 'python_d.exe')
if os.path.exists(python_d):
logger.info('Also created python_d.exe')
shutil.copyfile(python_d, python_d_dest)
elif os.path.exists(python_d_dest):
logger.info('Removed python_d.exe as it is no longer at the source')
os.unlink(python_d_dest)
# we need to copy the DLL to enforce that windows will load the correct one.
# may not exist if we are cygwin.
py_executable_dll = 'python%s%s.dll' % (
sys.version_info[0], sys.version_info[1])
py_executable_dll_d = 'python%s%s_d.dll' % (
sys.version_info[0], sys.version_info[1])
pythondll = os.path.join(os.path.dirname(sys.executable), py_executable_dll)
pythondll_d = os.path.join(os.path.dirname(sys.executable), py_executable_dll_d)
pythondll_d_dest = os.path.join(os.path.dirname(py_executable), py_executable_dll_d)
if os.path.exists(pythondll):
logger.info('Also created %s' % py_executable_dll)
shutil.copyfile(pythondll, os.path.join(os.path.dirname(py_executable), py_executable_dll))
if os.path.exists(pythondll_d):
logger.info('Also created %s' % py_executable_dll_d)
shutil.copyfile(pythondll_d, pythondll_d_dest)
elif os.path.exists(pythondll_d_dest):
logger.info('Removed %s as the source does not exist' % pythondll_d_dest)
os.unlink(pythondll_d_dest)
if is_pypy:
# make a symlink python --> pypy-c
python_executable = os.path.join(os.path.dirname(py_executable), 'python')
logger.info('Also created executable %s' % python_executable)
copyfile(py_executable, python_executable)
if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe:
secondary_exe = os.path.join(os.path.dirname(py_executable),
expected_exe)
py_executable_ext = os.path.splitext(py_executable)[1]
if py_executable_ext == '.exe':
# python2.4 gives an extension of '.4' :P
secondary_exe += py_executable_ext
if os.path.exists(secondary_exe):
logger.warn('Not overwriting existing %s script %s (you must use %s)'
% (expected_exe, secondary_exe, py_executable))
else:
logger.notify('Also creating executable in %s' % secondary_exe)
shutil.copyfile(sys.executable, secondary_exe)
make_exe(secondary_exe)
if '.framework' in prefix:
if 'Python.framework' in prefix:
logger.debug('MacOSX Python framework detected')
# Make sure we use the the embedded interpreter inside
# the framework, even if sys.executable points to
# the stub executable in ${sys.prefix}/bin
# See http://groups.google.com/group/python-virtualenv/
# browse_thread/thread/17cab2f85da75951
original_python = os.path.join(
prefix, 'Resources/Python.app/Contents/MacOS/Python')
if 'EPD' in prefix:
logger.debug('EPD framework detected')
original_python = os.path.join(prefix, 'bin/python')
shutil.copy(original_python, py_executable)
# Copy the framework's dylib into the virtual
# environment
virtual_lib = os.path.join(home_dir, '.Python')
if os.path.exists(virtual_lib):
os.unlink(virtual_lib)
copyfile(
os.path.join(prefix, 'Python'),
virtual_lib)
# And then change the install_name of the copied python executable
try:
call_subprocess(
["install_name_tool", "-change",
os.path.join(prefix, 'Python'),
'@executable_path/../.Python',
py_executable])
except:
logger.fatal(
"Could not call install_name_tool -- you must have Apple's development tools installed")
raise
# Some tools depend on pythonX.Y being present
py_executable_version = '%s.%s' % (
sys.version_info[0], sys.version_info[1])
if not py_executable.endswith(py_executable_version):
# symlinking pythonX.Y > python
pth = py_executable + '%s.%s' % (
sys.version_info[0], sys.version_info[1])
if os.path.exists(pth):
os.unlink(pth)
os.symlink('python', pth)
else:
# reverse symlinking python -> pythonX.Y (with --python)
pth = join(bin_dir, 'python')
if os.path.exists(pth):
os.unlink(pth)
os.symlink(os.path.basename(py_executable), pth)
if sys.platform == 'win32' and ' ' in py_executable:
# There's a bug with subprocess on Windows when using a first
# argument that has a space in it. Instead we have to quote
# the value:
py_executable = '"%s"' % py_executable
cmd = [py_executable, '-c', """
import sys
prefix = sys.prefix
if sys.version_info[0] == 3:
prefix = prefix.encode('utf8')
if hasattr(sys.stdout, 'detach'):
sys.stdout = sys.stdout.detach()
elif hasattr(sys.stdout, 'buffer'):
sys.stdout = sys.stdout.buffer
sys.stdout.write(prefix)
"""]
logger.info('Testing executable with %s %s "%s"' % tuple(cmd))
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc_stdout, proc_stderr = proc.communicate()
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EACCES:
logger.fatal('ERROR: The executable %s could not be run: %s' % (py_executable, e))
sys.exit(100)
else:
raise e
proc_stdout = proc_stdout.strip().decode("utf-8")
proc_stdout = os.path.normcase(os.path.abspath(proc_stdout))
norm_home_dir = os.path.normcase(os.path.abspath(home_dir))
if hasattr(norm_home_dir, 'decode'):
norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding())
if proc_stdout != norm_home_dir:
logger.fatal(
'ERROR: The executable %s is not functioning' % py_executable)
logger.fatal(
'ERROR: It thinks sys.prefix is %r (should be %r)'
% (proc_stdout, norm_home_dir))
logger.fatal(
'ERROR: virtualenv is not compatible with this system or executable')
if sys.platform == 'win32':
logger.fatal(
'Note: some Windows users have reported this error when they '
'installed Python for "Only this user" or have multiple '
'versions of Python installed. Copying the appropriate '
'PythonXX.dll to the virtualenv Scripts/ directory may fix '
'this problem.')
sys.exit(100)
else:
logger.info('Got sys.prefix result: %r' % proc_stdout)
pydistutils = os.path.expanduser('~/.pydistutils.cfg')
if os.path.exists(pydistutils):
logger.notify('Please make sure you remove any previous custom paths from '
'your %s file.' % pydistutils)
## FIXME: really this should be calculated earlier
fix_local_scheme(home_dir)
return py_executable
def install_activate(home_dir, bin_dir, prompt=None):
home_dir = os.path.abspath(home_dir)
if sys.platform == 'win32' or is_jython and os._name == 'nt':
files = {
'activate.bat': ACTIVATE_BAT,
'deactivate.bat': DEACTIVATE_BAT,
'activate.ps1': ACTIVATE_PS,
}
# MSYS needs paths of the form /c/path/to/file
drive, tail = os.path.splitdrive(home_dir.replace(os.sep, '/'))
home_dir_msys = (drive and "/%s%s" or "%s%s") % (drive[:1], tail)
# Run-time conditional enables (basic) Cygwin compatibility
home_dir_sh = ("""$(if [ "$OSTYPE" "==" "cygwin" ]; then cygpath -u '%s'; else echo '%s'; fi;)""" %
(home_dir, home_dir_msys))
files['activate'] = ACTIVATE_SH.replace('__VIRTUAL_ENV__', home_dir_sh)
else:
files = {'activate': ACTIVATE_SH}
# suppling activate.fish in addition to, not instead of, the
# bash script support.
files['activate.fish'] = ACTIVATE_FISH
# same for csh/tcsh support...
files['activate.csh'] = ACTIVATE_CSH
files['activate_this.py'] = ACTIVATE_THIS
if hasattr(home_dir, 'decode'):
home_dir = home_dir.decode(sys.getfilesystemencoding())
vname = os.path.basename(home_dir)
for name, content in files.items():
content = content.replace('__VIRTUAL_PROMPT__', prompt or '')
content = content.replace('__VIRTUAL_WINPROMPT__', prompt or '(%s)' % vname)
content = content.replace('__VIRTUAL_ENV__', home_dir)
content = content.replace('__VIRTUAL_NAME__', vname)
content = content.replace('__BIN_NAME__', os.path.basename(bin_dir))
writefile(os.path.join(bin_dir, name), content)
def install_distutils(home_dir):
distutils_path = change_prefix(distutils.__path__[0], home_dir)
mkdir(distutils_path)
## FIXME: maybe this prefix setting should only be put in place if
## there's a local distutils.cfg with a prefix setting?
home_dir = os.path.abspath(home_dir)
## FIXME: this is breaking things, removing for now:
#distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir
writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT)
writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False)
def fix_local_scheme(home_dir):
"""
Platforms that use the "posix_local" install scheme (like Ubuntu with
Python 2.7) need to be given an additional "local" location, sigh.
"""
try:
import sysconfig
except ImportError:
pass
else:
if sysconfig._get_default_scheme() == 'posix_local':
local_path = os.path.join(home_dir, 'local')
if not os.path.exists(local_path):
os.mkdir(local_path)
for subdir_name in os.listdir(home_dir):
if subdir_name == 'local':
continue
os.symlink(os.path.abspath(os.path.join(home_dir, subdir_name)), \
os.path.join(local_path, subdir_name))
def fix_lib64(lib_dir):
"""
Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y
instead of lib/pythonX.Y. If this is such a platform we'll just create a
symlink so lib64 points to lib
"""
if [p for p in distutils.sysconfig.get_config_vars().values()
if isinstance(p, basestring) and 'lib64' in p]:
logger.debug('This system uses lib64; symlinking lib64 to lib')
assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], (
"Unexpected python lib dir: %r" % lib_dir)
lib_parent = os.path.dirname(lib_dir)
assert os.path.basename(lib_parent) == 'lib', (
"Unexpected parent dir: %r" % lib_parent)
copyfile(lib_parent, os.path.join(os.path.dirname(lib_parent), 'lib64'))
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
raise SystemExit(3)
if not is_executable(exe):
logger.fatal('The executable %s (from --python=%s) is not executable' % (exe, exe))
raise SystemExit(3)
return exe
def is_executable(exe):
"""Checks a file is executable"""
return os.access(exe, os.X_OK)
############################################################
## Relocating the environment:
def make_environment_relocatable(home_dir):
"""
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
activate_this = os.path.join(bin_dir, 'activate_this.py')
if not os.path.exists(activate_this):
logger.fatal(
'The environment doesn\'t have a file %s -- please re-run virtualenv '
'on this environment to update it' % activate_this)
fixup_scripts(home_dir)
fixup_pth_and_egg_link(home_dir)
## FIXME: need to fix up distutils.cfg
OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3],
'activate', 'activate.bat', 'activate_this.py']
def fixup_scripts(home_dir):
# This is what we expect at the top of scripts:
shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir))
# This is what we'll put:
new_shebang = '#!/usr/bin/env python%s' % sys.version[:3]
activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this"
if sys.platform == 'win32':
bin_suffix = 'Scripts'
else:
bin_suffix = 'bin'
bin_dir = os.path.join(home_dir, bin_suffix)
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
for filename in os.listdir(bin_dir):
filename = os.path.join(bin_dir, filename)
if not os.path.isfile(filename):
# ignore subdirs, e.g. .svn ones.
continue
f = open(filename, 'rb')
try:
try:
lines = f.read().decode('utf-8').splitlines()
except UnicodeDecodeError:
# This is probably a binary program instead
# of a script, so just ignore it.
continue
finally:
f.close()
if not lines:
logger.warn('Script %s is an empty file' % filename)
continue
if not lines[0].strip().startswith(shebang):
if os.path.basename(filename) in OK_ABS_SCRIPTS:
logger.debug('Cannot make script %s relative' % filename)
elif lines[0].strip() == new_shebang:
logger.info('Script %s has already been made relative' % filename)
else:
logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)'
% (filename, shebang))
continue
logger.notify('Making script %s relative' % filename)
lines = [new_shebang+'\n', activate+'\n'] + lines[1:]
f = open(filename, 'wb')
f.write('\n'.join(lines).encode('utf-8'))
f.close()
def fixup_pth_and_egg_link(home_dir, sys_path=None):
"""Makes .pth and .egg-link files use relative paths"""
home_dir = os.path.normcase(os.path.abspath(home_dir))
if sys_path is None:
sys_path = sys.path
for path in sys_path:
if not path:
path = '.'
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if not path.startswith(home_dir):
logger.debug('Skipping system (non-environment) directory %s' % path)
continue
for filename in os.listdir(path):
filename = os.path.join(path, filename)
if filename.endswith('.pth'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .pth file %s, skipping' % filename)
else:
fixup_pth_file(filename)
if filename.endswith('.egg-link'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .egg-link file %s, skipping' % filename)
else:
fixup_egg_link(filename)
def fixup_pth_file(filename):
lines = []
prev_lines = []
f = open(filename)
prev_lines = f.readlines()
f.close()
for line in prev_lines:
line = line.strip()
if (not line or line.startswith('#') or line.startswith('import ')
or os.path.abspath(line) != line):
lines.append(line)
else:
new_value = make_relative_path(filename, line)
if line != new_value:
logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename))
lines.append(new_value)
if lines == prev_lines:
logger.info('No changes to .pth file %s' % filename)
return
logger.notify('Making paths in .pth file %s relative' % filename)
f = open(filename, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
def fixup_egg_link(filename):
f = open(filename)
link = f.read().strip()
f.close()
if os.path.abspath(link) != link:
logger.debug('Link in %s already relative' % filename)
return
new_link = make_relative_path(filename, link)
logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link))
f = open(filename, 'w')
f.write(new_link)
f.close()
def make_relative_path(source, dest, dest_is_directory=True):
"""
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
"""
source = os.path.dirname(source)
if not dest_is_directory:
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while dest_parts and source_parts and dest_parts[0] == source_parts[0]:
dest_parts.pop(0)
source_parts.pop(0)
full_parts = ['..']*len(source_parts) + dest_parts
if not dest_is_directory:
full_parts.append(dest_filename)
if not full_parts:
# Special case for the current directory (otherwise it'd be '')
return './'
return os.path.sep.join(full_parts)
############################################################
## Bootstrap script creation:
def create_bootstrap_script(extra_text, python_version=''):
"""
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.4'`` then the
script will start with ``#!/usr/bin/env python2.4`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
"""
filename = __file__
if filename.endswith('.pyc'):
filename = filename[:-1]
f = open(filename, 'rb')
content = f.read()
f.close()
py_exe = 'python%s' % python_version
content = (('#!/usr/bin/env %s\n' % py_exe)
+ '## WARNING: This file is generated\n'
+ content)
return content.replace('##EXT' 'END##', extra_text)
##EXTEND##
def convert(s):
b = base64.b64decode(s.encode('ascii'))
return zlib.decompress(b).decode('utf-8')
##file site.py
SITE_PY = convert("""
eJzFPf1z2zaWv/OvwMqTIZXKdD66nR2n7o2TOK333MTbpLO5dT1aSoIs1hTJEqRl7c3d337vAwAB
kvLHpp3TdGKJBB4eHt43HtDRaHRcljJfiHWxaDIplEyq+UqUSb1SYllUol6l1WK/TKp6C0/n18mV
VKIuhNqqGFvFQfD0Cz/BU/FplSqDAnxLmrpYJ3U6T7JsK9J1WVS1XIhFU6X5lUjztE6TLP0XtCjy
WDz9cgyC01zAzLNUVuJGVgrgKlEsxfm2XhW5iJoS5/w8/nPycjwRal6lZQ0NKo0zUGSV1EEu5QLQ
hJaNAlKmtdxXpZyny3RuG26KJluIMkvmUvzznzw1ahqGgSrWcrOSlRQ5IAMwJcAqEQ/4mlZiXixk
LMRrOU9wAH7eEitgaBNcM4VkzAuRFfkVzCmXc6lUUm1FNGtqAkQoi0UBOKWAQZ1mWbApqms1hiWl
9djAI5Ewe/iTYfaAeeL4fc4BHD/kwc95ejth2MA9CK5eMdtUcpneigTBwk95K+dT/SxKl2KRLpdA
g7weY5OAEVAiS2cHJS3Ht3qFvjsgrCxXJjCGRJS5Mb+kHnFwWoskU8C2TYk0UoT5WzlLkxyokd/A
cAARSBoMjbNIVW3HodmJAgBUuI41SMlaiWidpDkw64/JnND+e5ovio0aEwVgtZT4tVG1O/9ogADQ
2iHAJMDFMqvZ5Fl6LbPtGBD4BNhXUjVZjQKxSCs5r4sqlYoAAGpbIW8B6YlIKqlJyJxp5HZC9Cea
pDkuLAoYCjy+RJIs06umIgkTyxQ4F7ji3YefxNuT16fH7zWPGWAss1drwBmg0EI7OMEA4qBR1UFW
gEDHwRn+EcligUJ2heMDXm2Dg3tXOohg7mXc7eMsOJBdL64eBuZYgzKhsQLq99/QZaJWQJ//uWe9
g+B4F1Vo4vxtsypAJvNkLcUqYf5Czgi+1XC+i8t69Qq4QSGcGkilcHEQwRThAUlcmkVFLkUJLJal
uRwHQKEZtfVXEVjhfZHv01p3OAEgVEEOL51nYxoxlzDRPqxXqC9M4y3NTDcJ7Dqvi4oUB/B/Pidd
lCX5NeGoiKH420xepXmOCCEvBOFeSAOr6xQ4cRGLM2pFesE0EiFrL26JItEALyHTAU/K22RdZnLC
4ou69W41QoPJWpi1zpjjoGVN6pVWrZ3qIO+9iD93uI7QrFeVBODNzBO6ZVFMxAx0NmFTJmsWr3pT
EOcEA/JEnZAnqCX0xe9A0WOlmrW0L5FXQLMQQwXLIsuKDZDsMAiE2MNGxij7zAlv4R38C3Dx30zW
81UQOCNZwBoUIr8LFAIBkyBzzdUaCY/bNCt3lUyas6YoqoWsaKiHEfuAEX9gY5xr8L6otVHj6eIq
F+u0RpU00yYzZYuXhzXrx1c8b5gGWG5FNDNNWzqtcXpZuUpm0rgkM7lESdCL9MouO4wZDIxJtrgW
a7Yy8A7IIlO2IMOKBZXOspbkBAAMFr4kT8smo0YKGUwkMNC6JPjrBE16oZ0lYG82ywEqJDbfc7A/
gNu/QIw2qxToMwcIoGFQS8HyzdK6Qgeh1UeBb/RNfx4fOPV0qW0TD7lM0kxb+SQPTunhSVWR+M5l
ib0mmhgKZpjX6Npd5UBHFPPRaBQExh3aKvO1UEFdbQ+BFYQZZzqdNSkavukUTb3+oQIeRTgDe91s
OwsPNITp9B6o5HRZVsUaX9u5fQRlAmNhj2BPnJOWkewge5z4CsnnqvTSNEXb7bCzQD0UnP908u70
88lHcSQuWpU26eqzSxjzJE+ArckiAFN1hm11GbRExZei7hPvwLwTU4A9o94kvjKpG+BdQP1T1dBr
mMbcexmcvD9+fXYy/fnjyU/Tj6efTgBBsDMy2KMpo3lswGFUMQgHcOVCxdq+Br0e9OD18Uf7IJim
alpuyy08AEMJLFxFMN+JCPHhVNvgaZovi3BMjX9lJ/yI1Yr2uC4Ov74UR0ci/DW5ScIAvJ62KS/i
jyQAn7alhK41/IkKNQ6ChVyCsFxLFKnoKXmyY+4ARISWhbasvxZpbt4zH7lDkMRH1ANwmE7nWaIU
Np5OQyAtdRj4QIeY3WGUkwg6llu361ijgp9KwlLk2GWC/wygmMyoH6LBKLpdTCMQsPU8UZJb0fSh
33SKWmY6jfSAIH7E4+AiseIIhWmCWqZKwRMlXkGtM1NFhj8RPsotiQwGQ6jXcJF0sBPfJFkjVeRM
CogYRR0yompMFXEQOBUR2M526cbjLjUNz0AzIF9WgN6rOpTDzx54KKBgTNiFoRlHS0wzxPSvHBsQ
DuAkhqiglepAYX0mzk/OxctnL/bRAYEocWGp4zVHm5rmjbQPl7BaV7J2EOZe4YSEYezSZYmaEZ8e
3g1zHduV6bPCUi9xJdfFjVwAtsjAziqLn+gNxNIwj3kCqwiamCw4Kz3j6SUYOfLsQVrQ2gP11gTF
rL9Z+j0O32WuQHVwKEyk1nE6G6+yKm5SdA9mW/0SrBuoN7RxxhUJnIXzmAyNGGgI8FtzpNRGhqDA
qoZdTMIbQaKGX7SqMCZwZ6hbL+nrdV5s8inHrkeoJqOxZV0ULM282KBdgj3xDuwGIFlAKNYSjaGA
ky5QtvYBeZg+TBcoS9EAAALTrCjAcmCZ4IymyHEeDoswxq8ECW8l0cLfmCEoODLEcCDR29g+MFoC
IcHkrIKzqkEzGcqaaQYDOyTxue4s5qDRB9ChYgyGLtLQuJGh38UhKGdx5iolpx/a0M+fPzPbqBVl
RBCxGU4ajf6SzFtcbsEUpqATjA/F+RVigw24owCmUZo1xf5HUZTsP8F6nmvZBssN8Vhdl4cHB5vN
Jtb5gKK6OlDLgz//5Ztv/vKMdeJiQfwD03GkRSfH4gN6hz5o/K2xQN+ZlevwY5r73EiwIkl+FDmP
iN/3TbooxOH+2OpP5OLWsOK/xvkABTI1gzKVgbajFqMnav9J/FKNxBMRuW2jMXsS2qRaK+ZbXehR
F2C7wdOYF01eh44iVeIrsG4QUy/krLkK7eCejTQ/YKoop5Hlgf3nl4iBzxmGr4wpnqKWILZAi++Q
/idmm4T8Ga0hkLxoonrx7nZYixniLh4u79Y7dITGzDBVyB0oEX6TBwugbdyXHPxoZxTtnuOMmo9n
CIylDwzzalcwQsEhXHAtJq7UOVyNPipI04ZVMygYVzWCgga3bsbU1uDIRoYIEr0bE57zwuoWQKdO
rs9E9GYVoIU7Ts/adVnB8YSQB47Ec3oiwak97L17xkvbZBmlYDo86lGFAXsLjXa6AL6MDICJGFU/
j7ilCSw+dBaF12AAWMFZG2SwZY+Z8I3rA472RgPs1LP6u3ozjYdA4CJFnD16EHRC+YhHqBRIUxn5
PXexuCVuf7A7LQ4xlVkmEmm1Q7i6ymNQqO40TMs0R93rLFI8zwrwiq1WJEZq3/vOAkUu+HjImGkJ
1GRoyeE0OiJvzxPAULfDhNdVg6kBN3OCGK1TRdYNybSCf8CtoIwEpY+AlgTNgnmolPkT+x1kzs5X
f9nBHpbQyBBu011uSM9iaDjm/Z5AMur8CUhBDiTsCyO5jqwOMuAwZ4E84YbXcqd0E4xYgZw5FoTU
DOBOL70AB5/EuGdBEoqQb2slS/GVGMHydUX1Ybr7d+VSkzaInAbkKuh8w5Gbi3DyEEedvITP0H5G
gnY3ygI4eAYuj5uad9ncMK1Nk4Cz7ituixRoZMqcjMYuqpeGMG76909HTouWWGYQw1DeQN4mjBlp
HNjl1qBhwQ0Yb827Y+nHbsYC+0ZhoV7I9S3Ef2GVqnmhQgxwe7kL96O5ok8bi+1ZOhvBH28BRuNL
D5LMdP4Csyz/xiChBz0cgu5NFtMii6TapHlICkzT78hfmh4elpSekTv4SOHUAUwUc5QH7yoQENqs
PABxQk0AUbkMlXb7+2DvnOLIwuXuI89tvjh8edkn7mRXhsd+hpfq5LauEoWrlfGisVDgavUNOCpd
mFySb/V2o96OxjChKhREkeLDx88CCcGZ2E2yfdzUW4ZHbO6dk/cxqINeu5dcndkRuwAiqBWRUQ7C
x3Pkw5F97OTumNgjgDyKYe5YFANJ88m/A+euhYIx9hfbHPNoXZWBH3j9zdfTgcyoi+Q3X4/uGaVD
jCGxjzqeoB2ZygDE4LRNl0omGfkaTifKKuYt79g25ZgVOsV/mskuB5xO/Jj3xmS08HvNe4Gj+ewR
PSDMLma/QrCqdH7rJkkzSsoDGvv7qOdMnM2pg2F8PEh3o4w5KfBYnk0GQyF18QwWJuTAftyfjvaL
jk3udyAgNZ8yUX1U9vQGfLt/5G2qu3uHfajamBgeesaZ/hcDWsKb8ZBd/xINh5/fRRlYYB4NRkNk
9xzt/+9ZPvtjJvnAqZht39/RMD0S0O81E9bjDE3r8XHHIA4tu2sCDbAHWIodHuAdHlp/aN7oWxo/
i1WSEk9Rdz0VG9rrpzQnbtoAlAW7YANwcBn1jvGbpqp435dUYCmrfdzLnAgsczJOGFVP9cEcvJc1
YmKbzSlt7BTFFENqJNSJYDuTsHXhh+VsVZj0kcxv0gr6gsKNwh8+/HgS9hlAD4OdhsG562i45OEm
HOE+gmlDTZzwMX2YQo/p8u9LVTeK8AlqttNNclaTbdA++DlZE9IPr8E9yRlv75T3qDFYnq/k/Hoq
ad8d2RS7OvnpN/gaMbHb8X7xlEqWVAEGM5lnDdKKfWAs3Vs2+Zy2KmoJro6us8W6G9pN50zcMkuu
RESdF5gF0txIiaKbpNKOYFkVWNkpmnRxcJUuhPytSTKMsOVyCbjgPpJ+FfPwlAwSb7kggCv+lJw3
VVpvgQSJKvQ2HNUOOA1nW55o5CHJOy5MQKwmOBQfcdr4ngm3MOQycbq/+YCTxBAYO5h9UuQueg7v
82KKo06pQHbCSPW3yOlx0B2hAAAjAArzH411Es1/I+mVu9dHa+4SFbWkR0o36C/IGUMo0RiTDvyb
fvqM6PLWDiyvdmN5dTeWV10srwaxvPKxvLobS1ckcGFt/shIwlAOqbvDMFis4qZ/eJiTZL7idlg4
iQWSAFGUJtY1MsX1w16SibfaCAipbWfvlx62xScpV2RWBWejNUjkftxP0nG1qfx2OlMpi+7MUzHu
7K4CHL/vQRxTndWMurO8LZI6iT25uMqKGYitRXfSApiIbi0Opy3zm+mME60dSzU6/69PP3x4j80R
1MhUGlA3XEQ0LDiV6GlSXam+NLVxWAnsSC39mhjqpgHuPTDJxaPs8T9vqdgCGUdsqFigECV4AFQS
ZZu5hUNh2HmuK4z0c2Zy3vc5EqO8HrWT2kGk4/Pzt8efjkeUfRv978gVGENbXzpcfEwL26Dvv7nN
LcWxDwi1TjO1xs+dk0frliPut7EGbM+H7zx48RCDPRix+7P8QykFSwKEinUe9jGEenAM9EVhQo8+
hhF7lXPuJhc7K/adI3uOi+KI/tAOQHcAf98RY4wpEEC7UJGJDNpgqqP0rXm9g6IO0Af6el8cgnVD
r24k41PUTmLAAXQoa5vtdv+8LRM2ekrWr0++P31/dvr6/PjTD44LiK7ch48HL8TJj58FlWqgAWOf
KMEqhRqLgsCwuKeExKKA/xrM/CyamvO10Ovt2ZneNFnjOREsHEabE8Nzriiy0Dh9xQlh+1CXAiFG
mQ6QnAM5VDlDB3YwXlrzYRBV6OJiOuczQ2e10aGXPmhlDmTRFnMM0geNXVIwCK72gldUAl6bqLDi
zTh9SGkAKW2jbY1GRum53s69sxVlNjq8nCV1hidtZ63oL0IX1/AyVmWWQiT3KrSypLthpUrLOPqh
3WtmvIY0oNMdRtYNedY7sUCr9Srkuen+45bRfmsAw5bB3sK8c0mVGlS+jHVmIsRGvKkSylv4apde
r4GCBcM9txoX0TBdCrNPILgWqxQCCODJFVhfjBMAQmcl/Nz8oZMdkAUWSoRv1ov9v4WaIH7rX34Z
aF5X2f4/RAlRkOCqnnCAmG7jtxD4xDIWJx/ejUNGjqpkxd8arK0Hh4QSoI60UykRb2ZPIyWzpS71
8PUBvtB+Ar3udK9kWenuw65xiBLwREXkNTxRhn4hVl5Z2BOcyrgDGo8NWMzw+J1bEWA+e+LjSmaZ
LhY/fXt2Ar4jnmRACeItsBMYjvMluJut6+D4eGAHFO51w+sK2bhCF5bqHRax12wwaY0iR729Egm7
TpQY7vfqZYGrJFUu2hFOm2GZWvwYWRnWwiwrs3anDVLYbUMUR5lhlpieV1RL6vME8DI9TTgkglgJ
z0mYDDxv6KZ5bYoHs3QOehRULijUCQgJEhcPAxLnFTnnwItKmTNE8LDcVunVqsZ9Bugc0/kFbP7j
8eez0/dU0//iZet1DzDnhCKBCddzHGG1HmY74ItbgYdcNZ0O8ax+hTBQ+8Cf7isuFDniAXr9OLGI
f7qv+BDXkRMJ8gxAQTVlVzwwAHC6DclNKwuMq42D8eNW47WY+WAoF4lnRnTNhTu/Pifalh1TQnkf
8/IRGzjLUtMwMp3d6rDuR89xWeKO0yIabgRvh2TLfGbQ9br3ZlcdmvvpSSGeJwWM+q39MUyhVq+p
no7DbLu4hcJabWN/yZ1cqdNunqMoAxEjt/PYZbJhJaybMwd6Fc09YOJbja6RxEFVPvolH2kPw8PE
ErsXp5iOdKKEjABmMqQ+ONOAD4UWARQIFeJGjuROxk9feHN0rMH9c9S6C2zjD6AIdVksHbcoKuBE
+PIbO478itBCPXooQsdTyWVe2JIt/GxW6FU+9+c4KAOUxESxq5L8SkYMa2JgfuUTe0cKlrStR+qL
9HLIsIhTcE5vd3B4Xy6GN04Mah1G6LW7ltuuOvLJgw0GT2XcSTAffJVsQPeXTR3xSg6L/PBBtN1Q
74eIhYDQVO+DRyGmY34Ld6xPC3iQGhoWeni/7diF5bUxjqy1j50DRqF9oT3YeQWhWa1oW8Y52Wd8
UesFtAb3qDX5I/tU1+zY3wNHtpyckAXKg7sgvbmNdINOOmHEJ4f42GVKlentwRb9biFvZFaA6wVR
HR48+NUePBjHNp0yWJL1xdidb8+3w7jRmxazQ3MyAj0zVcL6xbmsDxCdwYzPXZi1yOBS/6JDkiS/
Ji/5zd9PJ+LN+5/g39fyA8RVeHJwIv4BaIg3RQXxJR99pTsJ8FBFzYFj0Sg8XkjQaKuCr29At+3c
ozNui+jTHv4xD6spBRa4Vmu+MwRQ5AnScfDWTzBnGOC3OWTV8UaNpzi0KCP9Emmw+9wJntU40C3j
Vb3O0F44WZJ2NS9GZ6dvTt5/PInrW+Rw83PkZFH82iicjt4jrnA/bCLsk3mDTy4dx/kHmZUDfrMO
Os0ZFgw6RQhxSWkDTb6PIrHBRVJh5kCU20Uxj7ElsDwfm6s34EiPnfjyXkPvWVmEFY31LlrrzeNj
oIb4pauIRtCQ+ug5UU9CKJnh+S1+HI+GTfFEUGob/jy93izczLg+iEMT7GLazjryu1tduGI6a3iW
kwivI7sM5mxmliZqPZu7Z/Y+5EJfJwJajvY55DJpslrIHCSXgny61wE0vXvMjiWEWYXNGZ09ozRN
tkm2yilCSpQY4agjOpqOGzKUMYQY/Mfkmu0Bnv8TDR8kBuiEKMVPhdNVNfMVSzCHRES9gcKDTZq/
dOt5NIV5UI6Q560jC/NEt5ExupK1nj8/iMYXz9tKB8pKz71DtvMSrJ7LJnugOsunT5+OxH/c7/0w
KnFWFNfglgHsQa/ljF7vsNx6cna1+p69eRMDP85X8gIeXFL23D5vckpN3tGVFkTavwZGiGsTWmY0
7Tt2mZN2FW80cwvesNKW4+c8pUuDMLUkUdnqu5cw7WSkiVgSFEOYqHmahpymgPXYFg2ej8M0o+YX
eQscnyKYCb7FHTIOtVfoYVItq+Uei86RGBHgEdWW8Wh0wJhOiAGe0/OtRnN6mqd1e7Tjmbt5qg/S
1/YuIM1XItmgZJh5dIjhHLX0WLX1sIs7WdSLWIr5hZtw7MySX9+HO7A2SFqxXBpM4aFZpHkhq7kx
p7hi6TytHTCmHcLhznQFElmfOBhAaQTqnazCwkq0ffsnuy4uph9oH3nfjKTLh2p7rRQnh5K8U2AY
x+34lIayhLR8a76MYZT3lNbWnoA3lviTTqpiXb93+4V7xLDJ9a0WXL/RXnUBcOgmJasgLTt6OsK5
vsvCZ6bdcRcFfihEJ9xu0qpukmyqL0+YosM2tRvrGk97NO3OQ5fWWwEnvwAPeF9X0YPjYKpskJ5Y
BGtOSRyJpU5RxO5pL/9gVFmgl/eCfSXwKZAyi6k5o2ySSBeWXe3hT12z6ah4BPWVOVD0EJtgjrX0
ToS405hQ0VM47la59lrhBos5tmA9725k8KghO7B8L95MsHunhfjuSETPJ+LPnUBsXm7xViYgw5NF
/GQR+j4hdb04fNHauX7g24GwE8jLy0dPN0tnNL1wqPz8/r666BED0DXI7jKVi/0nCrFjnL8UqobS
zms3p9KM8XT6nq260gez2+MqdCptBlHFplVojmoz/q8dxJz41nqID8ei0mALaA/0m8KXTvGhvXQN
CxM1ev7KopRMhzbH8BtenALvNUFdodq5aaor7C3YgZyAPkbJW2Btw4Gg8BE8FNIlL7RoX3W2hf/I
xeOi/V2biz0sv/n6LjxdAR88sTBAUI+YTqs/kKl2ssxjF+YB+/X389/Dee8uvns0lXSvYVphKIWF
zKuE36BJbMpDm2owIolbQZFb3oaf+nrwTAyLI+qm+jq8a/rc/6656xaBnbnZ3e3N3T/75tJA993N
L0M04DBPE+JBNeOtwA7rAleMJ7qoYDhlqT9IfrcTznSHVrgPjClhwAQosanG3mjNdTJ3v2OFzD5f
7+oedRzU1Z1p985+djn+IYqWqwHwuT39TCUeC82B7DfSfV1TLhqcyqsrNU3wrrgpBRtU4NLzIo37
+o6u+pKJ2hqvEy9UARCGm3QpolttDIwBAQ3fWcv1Ic7NGYKGpipKpyxTpQvOIGkXF8DFnDmi/iYz
yXWVo0xiwk81VVlBVDDSN5ty4cJQrWcL1CQy1om6NqibHhN90SUOwdUy5ngk56s40vCoA4TgU1PO
tU1cqDyd2nfAL8/aY+DpxDKEzJu1rJK6vQLF3yZNxXfOCHQoFhfYSVW0ktnhFBex1PKHgxQmC+z3
r7ST7QUZd5z9Hlut93C2oh46BfaYY+WO7THcnN7aK9Dcq3cWdGGua+Rts5b77LUvsBTmPi/SlTp3
wG/1HUN8cyVnNtFNcPgI5N49kuaX51q1xk6KRcN55iqG/qUyeKqZbPHQXXE9LujfCtdx9O34vt6w
zNILDXY0tlTUrtWg4mlHG7cRNVbS3RNR+9XSj4yoPfgPjKj1zX5gcDQ+Wh8M1k/fE3qzmnCvyWsZ
AfpMgUi4s9e5ZM2YzMitRoawN70d2WtqWWc6R5yMmUCO7N+fRCD4Ojzllm5611WZcYciWl+66PH3
Zx9eH58RLabnx2/+8/h7qlbB9HHHZj045ZAX+0ztfa8u1k0/6AqDocFbbAfuneTDHRpC731vc3YA
wvBBnqEF7Soy9/WuDr0DEf1OgPjd0+5A3aWyByH3/DNdfO/WFXQKWAP9lKsNzS9ny9Y8MjsXLA7t
zoR53yaTtYz2cm27Fs6p++urE+236psKd+QBx7b6lFYAc8jIXzaFbI4S2EQlOyrd/3kAlcziMSxz
ywdI4Vw6t83RRXMMqvb/LwUVKLsE98HYYZzYG3+pHafLlb3KGvfC5jI2BPHOQY3683OFfSGzHVQI
AlZ4+i41RsToP73BZLdjnyhxsU8nLvdR2VzaX7hm2sn9e4qbrrW9k0hx5QZvO0HjZZO5G6m2T68D
OX+UnS+WTok/aL4DoHMrngrYG30mVoizrQghkNQbhlg1SHTUF4o5yKPddLA3tHom9nedx3PPownx
fHfDRefIm+7xgnuoe3qoxpx6ciwwlq/tOmgnviPIvL0j6BIiz/nAPUV99y18vbl4fmiTrcjv+NpR
JFRmM3IM+4VTpnbnxXdOd2KWakJ1TBizOcc0dYtLByr7BLtinF6t/o44yOz7MqSR9364yMf08C70
HnUxtax3CFMS0RM1pmk5pxs07vbJuD/dVm31gfBJjQcA6alAgIVgerrRqZzbcvlr9ExHhbOGrgx1
M+6hIxVUReNzBPcwvl+LX7c7nbB8UHdG0fTnBl0O1EsOws2+A7caeymR3SahO/WWD3a4AHxYdbj/
8wf079d32e4v7vKrbauXgwek2JfFkkCslOiQyDyOwciA3oxIW2MduRF0vJ+jpaPLUO3ckC/Q8aMy
Q7wQmAIMcman2gOwRiH4P2ts6wE=
""")
##file ez_setup.py
EZ_SETUP_PY = convert("""
eJzNWmtv49a1/a5fwSgwJGE0NN8PDzRFmkyBAYrcIo8CFx5XPk+LHYpUSWoctch/v+ucQ1KkZDrt
RT6UwcQ2ebjPfq6195G+/upwanZlMZvP538sy6ZuKnKwatEcD01Z5rWVFXVD8pw0GRbNPkrrVB6t
Z1I0VlNax1qM16qnlXUg7DN5EovaPLQPp7X192PdYAHLj1xYzS6rZzLLhXql2UEI2QuLZ5VgTVmd
rOes2VlZs7ZIwS3CuX5BbajWNuXBKqXZqZN/dzebWbhkVe4t8c+tvm9l+0NZNUrL7VlLvW58a7m6
sqwS/zhCHYtY9UGwTGbM+iKqGk5Qe59fXavfsYqXz0VeEj7bZ1VVVmurrLR3SGGRvBFVQRrRLzpb
utabMqzipVWXFj1Z9fFwyE9Z8TRTxpLDoSoPVaZeLw8qCNoPj4+XFjw+2rPZT8pN2q9Mb6wkCqs6
4vdamcKq7KDNa6OqtTw8VYQP42irZJi1zqtP9ey7D3/65uc//7T964cffvz4P99bG2vu2BFz3Xn/
6Ocf/qz8qh7tmuZwd3t7OB0y2ySXXVZPt21S1Lc39S3+63e7nVs3ahe79e/9nf8wm+15uOWkIRD4
Lx2xxfmNt9icum8PJ8/2bfH0tLizFknieYzI1HG90OFJkNA0jWgsvZBFImJksX5FStBJoXFKEhI4
vghCx5OUJqEQvnTTwI39kNEJKd5YlzAK4zhMeUIinkgWBE7skJQ7sRd7PE1fl9LrEsAAknA3SrlH
RRS5kvgeiUToiUAm3pRF/lgXSn2XOZLFfpqSyA/jNI1DRngqQ+JEbvKqlF4XPyEJw10eCcY9zwti
6capjDmJolQSNiElGOsSeU4QEi8QPBCuoCyOpXD8lJBARDIW4atSzn5h1CNuEkKPhBMmJfW4C30c
n/rUZcHLUthFvlBfejQM/ZRHiGss44DwOHU9CCKpk0xYxC7zBfZwweHJKOYe96QUbuA4qR8F0iPB
RKSZ64yVYXCHR2jIfeJ4YRSEEeLDXD9xHBI7qfO6mF6bMOZ4ETFKaeLEscfClIQ+SQLfJyHnk54x
YsJODBdBRFgCX6YxS9IwjD0RiiREOgqasPh1MVGvTSJQSURIJ4KDPCaiwA0gzYORcPhEtAEqY994
lAiCGnZ9jvdRRl4iYkpCGhJoxMXrYs6R4pGfypQ6EBawwAvS2PEDLpgnmMO8yUi5Y99EAUsD6VMZ
kxhZ6AuW+MKhHsIdByn1XhfT+4ZKknqu41COMHHUBCQJzn0EPgqcJJoQc4Ez0nGigMqIEI/G3IFa
8GyAxHYSN2beVKAucCZyIzf1hGB+KINYIGpuxHhEXA9SvXhKygXOSDcBQAF8uUSqEC9MWQop0uUx
jRM5gVbsAmeEI3gcRInH0jShksbwdOIgex3EPHangu2Pg0SokG4kOYdhYRi6QRK4LAZ+8TRJo3BK
ygVaUYemru8SRqjvOXAGcC6WQcBCAEXsylel9BYhSST2jHggqfRRUVSmQcQcuAqoJ6YSJhhblCi0
BvD7HuM0ZbFHmQwAX14kvYTIKbQKxxYJkUqeOFAHBYmMlb4ApocxAIMnbjQV6XBsEZHAKi7BKm7s
uELAuTHIKaQMhEeiKZQJL2KUcF9GAISAMUKS2A2QONyPKWPc5yGfkBKNLULBJGD5xHUjMFGSBLEH
EWDMMEhR2lPAGV2wGwsjIsOYwr/oHlANkQNDgsBHgYVkChuisUXUkwmJQw9kD9ilPkjaQai5CCVa
idCfkBJfwJ2DGMmUcOaTyA1F6LohyhAtRQIInMyX+IIJSCLTMAALcGC5I2kUM+lKD2HAI2+qAuKx
RQE4lgBvJVoGFGDgB67rSi4S38W/eEqX5KIbclQv5KXwSMrBHyoFAeCJ76jGynldSm8Ro8RPgA3o
OYLEZ47KWWQbnM3ALJM0kIwtcmPPjQFyCHTKmRs6YeqQMKG+QJ2n4VSk07FF0J0FDpoZV3mYBmkk
AiapcBLYypypSKcXyIAkQ2MHbvWThEdAJyKEEwG8WOQHU/1dK6W3SAqE1hchcWPqegxhYmHg0hjc
C+YXU0ySjvmIEZSNKxVqEk9wAJOb+mC2mIaphx4HUn6dDSYCjDf1rKlOd2bg2pF6l2e0m7fQu8/E
L0xg1Pio73xQI1G7Fg+H62ZcSGv7heQZun2xxa0ldNoWmAfXlhoAVnfagExa3X01M3bjgXmoLp5h
tmgwLigR+kV7J34xdzHfdcsgp1351aaXct+JfjjLUxfmLkyD79+r6aRuuKgw1y1HK9Q1Vya1FrTz
4Q2mMIIxjH9lWcu/lHWd0Xww/mGkw9/7P6zmV8JuejNHj1ajv5Q+4pesWXrmfoXgVoV2l3HoxXCo
F7Xj1eZimFv3am0pqcVmMNCtMSluMapuytpmxwq/mWTqX+AiJ6eNG87aIGFs/ObYlHv4gWG6PGEU
Lfhtb/bgpEDN9XvyGbHE8PwFriLKQXCeMu1Amp0Z5x9bpR+telcec66mWWJ8PZTWTebFcU9FZTU7
0lgYhHvBWpaagAvlXUti6u2VOhZcvyKsx5EjHi010i6fdxnbdbsLaK2OJow8a3G7WNlQ0njpUW2p
5AyOMXaiGh2QPGeYuek5EwRfIyNNgmuVixL+yCtB+OmsPvb4KAfqabfr7dqzCS2mabXU0qjQqrQO
0ScWrCx4bXzTqXEgSBTlVHhElVXWZAhd8TQ4zzARb+0vC6HPE8zZCDd6wallrnz44vmI0rI9bBCt
MH2WU5VH7CSMKqbOiLUXdU2ehDngOBfd46POl4pktbB+PNWN2H/4RfmrMIEoLNLgnjnZIFRBizJe
paAyxpx62F2G6p/PpN4aFIL9G2tx+Py0rURdHism6oVCGLX9vuTHXNTqlGQAoJePTU2g6jjyoHXb
cnVGEpVym3PRDOqy9dhFCXZlt74otDMGdEViw7OiapbOWm0yALkWqPud3g1Pd2h3zLdtA7PVwLxR
MkyAAOyXskYO0g9fQPj+pQ6Qhg5pH13vMBJtt8m1nJ81fr+Zv2ldtXrXyh6qMBbwV7Py27KQecaa
QRxgokFOBstluVzduw9DYhgmxX9KBPOfdufCmCiF5fvNTb3qy7wrb33K+akYc8GckWLRqGrrqwdw
ok72dPm0J3mqkI5FgSy3rb/kAsnTLb+Sp8pLVTmwScCWTkOZVXWzBmGoSllAwqnLCuvtzwPlF/aF
vE/Fp2L57bGqIA1IbwTcVBeUtgKhndNc2KR6qu+dh9fp7MWwfpchZzN6VBT7fdn8qQRwD3KI1PWs
LcR8/OZ6WKv3F5X+oF75Gk7RXFB+HtHpMHsNr75UxL83uapSR6aOWPW7FyhUFy05U4CVl8w0IBos
jQ1ZY86DdUPxX0qpBpDViX9Hqb/FqOqe2vWaTg3KP54ZcoIFS8N9HfUpCmHNkeRnI1pKGdNG94FC
BWahHjJrh3zMTdJ23enGGkDX25sanfZNrRrt+bAWLg68TeJD7pAplM+sN+OGsCZfBLTfoAE3FPD3
MiuWHWF0S424umJKnO6Kvwd3d420Qp/uddRd3dRLI3Z1p4rhmy9lphLoIIhix06dui+2EXqrS6ci
hyDljbrzUl4+jVap1lvFZfyuurDSfiZVsVR+fvv7XebzkBYrW3CuX8ryG50S6nOSpfgiCvUHzDlA
2dlO5AfV5X002TboNPpUQSui8l99krNUrpgB5dcWoGqmbu1RzoWAI/EK6lD1uQBd8awglmB4rWv9
9hDWNSjbs3ZLoHHb0Zx3hMq8y2Z7NlsCEcWd8rAWsydsp5orXgrDNTuEF0o0z2X1ud10bR0MYZS0
Ie2ncAopNErcAEwVisADTPfoegEknyuxrZxKtAQ0NMBe/Z5RRFKsr1JmALpX7ZPOsrWqpqvX0D/o
ZG0yNUe2bVIuxOGd+bG86LTG2dnBsKa6eq63uKAyXXItPtj4WR5Esbxa9rX1A1r82+cqawA+iDH8
q5trYPjntfog8FlFT3UArFJlCGhkZVUddXLk4kKYjvswPVTP3Qi9vsPE7mo/VJsauWGArcaP5Wqs
sUERbY3BivX8mc7hTjywtR1m6O5fwuinRsC7SwjABnd6F5aXtViuriCibu600OHzls060IKCufql
g63Zv3Mp/t4j05foQb6spxj7zLkfX/uIVHPsB3RL7aqOIF5qnS8+en6tbzajQo/VVxLPa14fJ/Rc
7lx3WeOhYTQz6Jip0hhMCqzc72GoPWoLu8Mb0o5f3dXGSLs4BxdoP6/eqLOVh5VO02exqHRaC0vR
+G+mirJU+fmCq5Ta1xyCRccC897nZW+WyGsxiMawF7e329Zb2621wQDo2I7tLv7jrv9/AfAaXNUU
TOsyF6jViUG46+NBJqZXv+rRK7Evv2i81ZEw33DQ8y6YowH05r+BuxfN92SX3RbVP8bNymDOGnY7
16PfvzG+4ecrzfzkjPZya/H/ScnXyqwX/JtSrrL5pbrryu1hPKFrZzsrJD6sUuyPwDGdKerJyxmq
dvmdHNCrrzU/+2W0pQ6gSvPl/Mertmi+7hBlDhB80kRUqcNeJCGapHNCz1cvCFwsf0A/Ne++jGMf
TuOJcm6+ZnP9TRR7tWjHreOhZ6huiKnPAP2zfmqpIqHHLG/emnNhyHxSs+JJYfIwj6t2AlLdVneO
3Is9u0R33ef+Wv2pVizPfbUW0rGhps1FRRfnZ/2xsnr3oT2Slh2tvngsLXu6M0OgIen7ufrjprrD
vzXQAgNE22ualqzbyAb97uvl6qF/2a5hcU+eBzVWzOdmVjA0PXQMQoAhsulmBv39oU13134SjSlb
dX85nKW3umfYbtu8713Sylhb2i3v2qaoc8C7S2P3pME8uIGedi1IxXbL+adi+P2fT8Xy/m+/PrxZ
/TrXDcpqOMjotwdo9AJmg8r1N7BySygc+Gp+XaYdJhpV8f/7Oy3Y1s330l09YBDTjnyjn5qHGF7x
6O7hZfMXz21OyLZB6lUfOGAGMzo/bjaL7VaV7Ha76D/1yJVEqKmr+L2nCbH7+959wDtv38JZplQG
BDaonX65d/fwEjNqlDjLVIvM9X+XVxF7
""")
##file distribute_setup.py
DISTRIBUTE_SETUP_PY = convert("""
eJztG2tz2zbyu34FTh4PqYSi7TT3GM+pM2nj9DzNJZnYaT8kHhoiIYk1X+XDsvrrb3cBkCAJyc61
dzM3c7qrIxGLxWLfuwCP/lTs6k2eTabT6Xd5Xld1yQsWxfBvvGxqweKsqnmS8DoGoMnliu3yhm15
VrM6Z00lWCXqpqjzPKkAFkdLVvDwjq+FU8lBv9h57JemqgEgTJpIsHoTV5NVnCB6+AFIeCpg1VKE
dV7u2DauNyyuPcaziPEoogm4IMLWecHylVxJ4z8/n0wYfFZlnhrUBzTO4rTIyxqpDTpqCb7/yJ2N
dliKXxsgi3FWFSKMV3HI7kVZATOQhm6qh98BKsq3WZLzaJLGZZmXHstL4hLPGE9qUWYceKqBuh17
tGgIUFHOqpwtd6xqiiLZxdl6gpvmRVHmRRnj9LxAYRA/bm+HO7i99SeTa2QX8TekhRGjYGUD3yvc
SljGBW1PSZeoLNYlj0x5+qgUE8W8vNLfql37tY5Tob+vspTX4aYdEmmBFLS/eUk/Wwk1dYwqI0eT
fD2Z1OXuvJNiFaP2yeFPVxcfg6vL64uJeAgFkH5Jzy+QxXJKC8EW7F2eCQObJrtZAgtDUVVSVSKx
YoFU/iBMI/cZL9fVTE7BD/4EZC5s1xcPImxqvkyEN2PPaaiFK4FfZWag90PgqEvY2GLBTid7iT4C
RQfmg2hAihFbgRQkQeyF/80fSuQR+7XJa1AmfNykIquB9StYPgNd7MDgEWIqwNyBmBTJdwDmmxdO
t6QmCxEK3OasP6bwOPA/MG4YHw8bbHOmx9XUYccIOIJTMMMhtenPHQXEOviiVqxuhtLJK78qOFid
C98+BD+/urz22IBp7Jkps9cXb159ensd/HTx8ery/TtYb3rq/8V/8XLaDn36+BYfb+q6OD85KXZF
7EtR+Xm5PlFOsDqpwFGF4iQ66fzSyXRydXH96cP1+/dvr4I3r368eD1YKDw7m05MoA8//hBcvnvz
Hsen0y+Tf4qaR7zm85+kOzpnZ/7p5B340XPDhCft6HE1uWrSlINVsAf4TP6Rp2JeAIX0e/KqAcpL
8/tcpDxO5JO3cSiySoG+FtKBEF58AASBBPftaDKZkBorX+OCJ1jCvzNtA+IBYk5IyknuXQ7TYJ0W
4CJhy9qb+OldhN/BU+M4uA1/y8vMdS46JKADx5XjqckSME+iYBsBIhD/WtThNlIYWi9BUGC7G5jj
mlMJihMR0oX5eSGydhctTKD2obbYm+yHSV4JDC+dQa5zRSxuug0ELQD4E7l1IKrg9cb/BeAVYR4+
TECbDFo/n97MxhuRWLqBjmHv8i3b5uWdyTENbVCphIZhaIzjsh1kr1vddmamO8nyuufAHB2xYTlH
IXcGHqRb4Ap0FEI/4N+Cy2LbMoevUVNqXTGTE99YeIBFCIIW6HlZCi4atJ7xZX4v9KRVnAEemypI
zZlpJV42MTwQ67UL/3laWeFLHiDr/q/T/wM6TTKkWJgxkKIF0XcthKHYCNsJQsq749Q+HZ//in+X
6PtRbejRHH/Bn9JA9EQ1lDuQUU1rVymqJqn7ygNLSWBlg5rj4gGWrmi4W6XkMaSol+8pNXGd7/Mm
iWgWcUraznqNtqKsIAKiVQ7rqnTYa7PaYMkroTdmPI5EwndqVWTlUA0UvNOFyflxNS92x5EP/0fe
WRMJ+ByzjgoM6uoHRJxVDjpkeXh2M3s6e5RZAMHtXoyMe8/+99E6+OzhUqdXjzgcAqScDckHfyjK
2j31WCd/lf326x4jyV/qqk8H6IDS7wWZhpT3oMZQO14MUqQBBxZGmmTlhtzBAlW8KS1MWJz92QPh
BCt+JxbXZSNa75pyMvGqgcJsS8kz6ShfVnmChoq8mHRLGJoGIPiva3Jvy6tAckmgN3WKu3UAJkVZ
W0VJLPI3zaMmERVWSl/a3TgdV4aAY0/c+2GIprdeH0Aq54ZXvK5LtwcIhhJERtC1JuE4W3HQnoXT
UL8CHoIo59DVLi3EvrKmnSlz79/jLfYzr8cMX5Xp7rRjybeL6XO12sxC1nAXfXwqbf4+z1ZJHNb9
pQVoiawdQvIm7gz8yVBwplaNeY/TIdRBRuJvSyh03RHE9Jo8O20rMnsORm/G/XZxDAUL1PooaH4P
6TpVMl+y6RgftlJCnjk11pvK1AHzdoNtAuqvqLYAfCubDKOLzz4kAsRjxadbB5yleYmkhpiiaUJX
cVnVHpgmoLFOdwDxTrscNv9k7MvxLfBfsi+Z+31TlrBKspOI2XE5A+Q9/y98rOIwcxirshRaXLsv
+mMiqSz2ARrIBiZn2PfngZ+4wSkYmamxk9/tK2a/xhqeFEP2WYxVr9tsBlZ9l9dv8iaLfrfRPkqm
jcRRqnPIXQVhKXgtht4qwM2RBbZZFIarA1H698Ys+lgCl4pXygtDPfy6a/G15kpxtW0kgu0leUil
C7U5FePjWnbuMqjkZVJ4q2i/ZdWGMrMltiPveRL3sGvLy5p0KUqwaE6m3HoFwoXtP0p6qWPS9iFB
C2iKYLc9ftwy7HG44CPCjV5dZJEMm9ij5cw5cWY+u5U8ucUVe7k/+BdRCp1Ctv0uvYqIfLlH4mA7
Xe2BOqxhnkXU6yw4BvqlWKG7wbZmWDc86TqutL8aK6na12L4jyQMvVhEQm1KqIKXFIUEtrlVv7lM
sKyaGNZojZUGihe2ufX6twDVAVs/veTYxzJs/Rs6QCV92dQue7kqCpI9b7HI/I/fC2DpnhRcg6rs
sgwRHexLtVYNax3kzRLt7Bx5/uo+j1GrC7TcqCWny3BGIb0tXlrrIR9fTT3cUt9lS6IUl9zR8BH7
KHh0QrGVYYCB5AxIZ0swuTsPO+xbVEKMhtK1gCaHeVmCuyDrGyCD3ZJWa3uJ8ayjFgSvVVh/sCmH
CUIZgj7waJBRSTYS0ZJZHptul9MRkEoLEFk3NvKZShKwliXFAAJ0iT6AB/yWcAeLmvBd55QkDHtJ
yBKUjFUlCO66Au+1zB/cVZOF6M2UE6Rhc5zaqx579uxuOzuQFcvmf1efqOnaMF5rz3Ilnx9KmIew
mDNDIW1LlpHa+ziXraRRm938FLyqRgPDlXxcBwQ9ft4u8gQcLSxg2j+vwGMXKl2wSHpCYtNNeMMB
4Mn5/HDefhkq3dEa0RP9o9qslhnTfZhBVhFYkzo7pKn0pt4qRSeqAvQNLpqBB+4CPEBWdyH/Z4pt
PLxrCvIWK5lYi0zuCCK7DkjkLcG3BQqH9giIeGZ6DeDGGHahl+44dAQ+DqftNPMsPa1XfQizXap2
3WlDN+sDQmMp4OsJkE1ibAjIGRDFMp8zNwGGtnVswVK5Nc07eya4svkh0u2JIQZYz/Quxoj2TXio
rNlmFZp2cUPeGzxWqEZ7lggysdWRGZ9ClHX8929f+8cVHmnh6aiPf0ad3Y+ITgY3DCS57ClKEjVO
1eTF2hZ/urZRtQH9sCU2ze8hWQbTCMwOuVskPBQbUHahO9WDMB5X2Gscg/Wp/5TdQSDsNd8h8VJ7
MObu168V1h09/4PpqL4QYDSC7aQA1eq02Vf/ujjXM/sxz7BjOMfiYOju9eIjb7kE6d+ZbFn1y6OO
A12HlFJ489DcXHfAgMlIC0BOqAUiEfJINm9qTHrRe2z5rrM5XecMEzaDPR6Tqq/IH0hUzTc40Tlz
ZTlAdtCDla6qF0FGk6Q/VDM8ZjmvVJ1txdGRb++4AabAhy7KY31qrMp0BJi3LBG1UzFU/Nb5DvnZ
KpriN+qaa7bwvEHzT7Xw8SYCfjW4pzEckoeC6R2HDfvMCmRQ7ZreZoRlHNNteglOVTbuga2aWMWJ
PW1056q7yBMZbQJnsJO+P97na4beeR+c9tV8Bel0e0SM6yumGAEMQdobK23burWRjvdYrgAGPBUD
/5+mQESQL39xuwNHX/e6CygJoe6Ske2xLkPPuUm6v2ZKz+Wa5IJKWoqpx9ywRdiaObqxMHZBxKnd
PfEITE5FKvfJpyayIuw2qiKxYUXq0Kbq/CAs8KWnc+6+qwKepO0rnN6AlJH/07wcO0Cr55HgB/zO
0Id/j/KXkXw0q0uJWgd5OC2yuk8C2J8iSVbVbU60n1WGjHyY4AyTksFW6o3B0W4r6vFjW+mRYXTK
hvJ6fH+PmdjQ0zwCPuvl823Q63K6IxVKIAKFd6hKMf6y5dd7FVRmwBc//DBHEWIIAXHK71+hoPEo
hT0YZ/fFhKfGVcO3d7F1T7IPxKd3Ld/6jw6yYvaIaT/Kuf+KTRms6JUdSlvslYca1Pol+5RtRBtF
s+9kH3NvOLOczCnM1KwNilKs4gdXe/ouuLRBjkKDOpSE+vveOO839oa/1YU6DfhZf4EoGYkHI2w+
Pzu/abMoGvT0tTuRNakoubyQZ/ZOEFTeWJX51nxewl7lPQi5iWGCDpsAHD6sWdYVtplRiRcYRiQe
S2OmzgslGZpZJHHtOrjOwpl9ng9O5wwWaPaZiylcwyMiSRWWhpIK64FrApopbxF+K/lj7yH1yK0+
E+RzC5VfS2lHIzC3qUTp0NFCdzlWHRViG9fasbGt0s62GIbUyJGqDpX9KuR0oGicO+rrkTbb3Xsw
fqhDdcS2wgGLCoEES5A3sltQSONWT5QLyZRKiBTPGczj0XGXhH5u0Vz6pYK6d4RsGG/IiEOYmMLk
beVj1tY/0/c/yvNeTLbBK5bgjHrliT1xH2gLxXzEsCA3rjyu4tz1rhAjvmGr0jhIevXh8g8mfNYV
gUOEoJB9ZTRvc5nvFpgliSzM7aI5YpGohbo1h8EbT+LbCIiaGg1z2PYYbjEkz9dDQ30233kwih65
NGi3bodYVlG8oEMF6QtRIckXxg9EbFHm93EkIvn6Q7xS8OaLFpXRfIjUhbvU6w41dMfRrDj6gcNG
mV0KChsw1BsSDIjkWYjtHuhYW+WNcKBlA/XH/hqll4aBVUo5VuZ1PbUlyyZ8kUUqaNCdsT2byuby
Nl8nvB4daN/7+2hWqerJijTAYfOwlqaKceFzP0n7MiYLKYcTKEWiuy//RJ3rdyO+Igfdm4QeaD4P
eNOfN24/m7rRHt2hWdP5snR/dNZr+PtMDEXbz/5/rzwH9NJpZyaMhnnCmyzcdClc92QYKT+qkd6e
MbSxDcfWFr6RJCGo4NdvtEioIi5Yyss7PMvPGacDWN5NWDat8bSp3vk3N5gufHbmoXkjm7IzvGKT
iLlqAczFA72/BDnzPOUZxO7IuTFCnMZ4etP2A7BpZiaYn/tvXNyw5+20icZB93OsL9O03DMuJVci
WcnG+WLqTz2WCrw4UC0wpnQnM+oiNR0EKwh5zEiXAErgtmQt/gzlFSN9j1jvr7vQgD4Z3/XKtxlW
1Wke4Vth0v9js58AClGmcVXRa1rdkZ1GEoMSUsMLZB5VPrvFDTjtxRB8RQuQrgQRMrpGDYQqDsBX
mKx25KAnlqkpT4iIFF+5o8siwE8imRqAGg/22JUWg8Yud2wtaoXLnfVvUKiELMyLnfkbCjHI+NWN
QMlQeZ1cAyjGd9cGTQ6APty0eYEWyygf0AMYm5PVpK0+YCXyhxBRFEivclbDqv898EtHmrAePepC
S8VXAqUqBsf6HaTPC6hAI1et0Xdlmq4FccvHPwcB8T4Z9m1evvwb5S5hnIL4qGgC+k7/enpqJGPJ
ylei1zil8rc5xUeB1ipYhdw3STYN3+zpsb8z94XHXhocQhvD+aJ0AcOZh3hezKzlQpgWBONjk0AC
+t3p1JBtiNSVmO0ApaTetR09jBDdid1CK6CPx/2gvkizgwQ4M48pbPLqsGYQZG500QNwtRbcWi2q
LokDU7kh8wZKZ4z3iKRzQGtbQwu8z6DR2TlJOdwAcZ2MFd7ZGLCh88UnAIYb2NkBQFUgmBb7b9x6
lSqKkxPgfgJV8Nm4AqYbxYPq2nZPgZAF0XLtghJOlWvBN9nwwpPQ4SDlMdXc9x7bc8mvCwSXh153
JRW44NVOQWnnd/j6v4rxw5fbgLiY7r9g8hRQRR4ESGoQqHcpie42ap6d38wm/wIwBuVg
""")
##file activate.sh
ACTIVATE_SH = convert("""
eJytVVFvokAQfudXTLEP2pw1fW3jg01NNGm1KV4vd22zrDDIJrhrYJHay/33m0VEKGpyufIg7s63
M9/OfDO0YBaKBAIRISzTRMMcIU3Qh0zoEOxEpbGHMBeyxz0t1lyjDRdBrJYw50l4YbVgo1LwuJRK
Q5xKEBp8EaOno41l+bg7Be0O/LaAnhbEmKAGFfmAci1iJZcoNax5LPg8wiRHiQBeoCvBPmfT+zv2
PH6afR/cs8fBbGTDG9yADlHmSPOY7f4haInA95WKdQ4s91JpeDQO5fZAnKTxczaaTkbTh+EhMqWx
QWl/rEGsNJ2kV0cRySKleRGTUKWUVB81pT+vD3Dpw0cSfoMsFF4IIV8jcHqRyVPLpTHrkOu89IUr
EoDHo4gkoBUsiAFVlP4FKjaLFSeNFEeTS4AfJBOV6sKshVwUbmpAkyA4N8kFL+RygQlkpDfum58N
GO1QWNLFipij/yn1twOHit5V29UvZ8Seh0/OeDo5kPz8at24lp5jRXSuDlXPuWqUjYCNejlXJwtV
mHcUtpCddTh53hM7I15EpA+2VNLHRMep6Rn8xK0FDkYB7ABnn6J3jWnXbLvQfyzqz61dxDFGVP1a
o1Xasx7bsipU+zZjlSVjtlUkoXofq9FHlMZtDxaLCrrH2O14wiaDhyFj1wWs2qIl773iTbZohyza
iD0TUQQBF5HZr6ISgzKKNZrD5UpvgO5FwoT2tgkIMec+tcYm45sO+fPytqGpBy75aufpTG/gmhRb
+u3AjQtC5l1l7QV1dBAcadt+7UhFGpXONprZRviAWtbY3dgZ3N4P2ePT9OFxdjJiruJSuLk7+31f
x60HKiWc9eH9SBc04XuPGCVYce1SXlDyJcJrjfKr7ebSNpEaQVpg+l3wiAYOJZ9GCAxoR9JMWAiv
+IyoWBSfhOIIIoRar657vSzLLj9Q0xRZX9Kk6SUq0BmPsceNl179Mi8Vii65Pkj21XXf4MAlSy/t
Exft7A8WX4/iVRkZprZfNK2/YFL/55T+9wm9m86Uhr8A0Hwt
""")
##file activate.fish
ACTIVATE_FISH = convert("""
eJydVm1v4jgQ/s6vmA1wBxUE7X2stJVYlVWR2lK13d6d9laRk0yIr8HmbIe0++tvnIQQB9pbXT5A
Ys/LM55nZtyHx5RrSHiGsMm1gRAh1xhDwU0Kng8hFzMWGb5jBv2E69SDs0TJDdj3MxilxmzPZzP7
pVPMMl+q9bjXh1eZQ8SEkAZULoAbiLnCyGSvvV6SC7IoBcS4Nw0wjcFbvJDcjiuTswzFDpiIQaHJ
lQAjQUi1YRmUboC2uZJig8J4PaCnT5IaDcgsbm/CjinOwgx1KcUTMEhhTgV4g2B1fRk8Le8fv86v
g7v545UHpZB9rKnp+gXsMhxLunIIpwVQxP/l9c/Hq9Xt1epm4R27bva6AJqN92G4YhbMG2i+LB+u
grv71c3dY7B6WtzfLy9bePbp0taDTXSwJQJszUnnp0y57mvpPcrF7ZODyhswtd59+/jdgw+fwBNS
xLSscksUPIDqwwNmCez3PpxGeyBYg6HE0YdcWBxcKczYzuVJi5Wu915vn5oWePCCoPUZBN5B7IgV
MCi54ZDLG7TUZ0HweXkb3M5vFmSpFm/gthhBx0UrveoPpv9AJ9unIbQYdUoe21bKg2q48sPFGVwu
H+afrxd1qvclaNlRFyh1EQ2sSccEuNAGWQwysfVpz1tPajUqbqJUnEcIJkWo6OXDaodK8ZiLdbmM
L1wb+9H0D+pcyPSrX5u5kgWSygRYXCnJUi/KKcuU4cqsAyTKZBiissLc7NFwizvjxtieKBVCIdWz
fzilzPaYyljZN0cGN1v7NnaIPNCGmVy3GKuJaQ6iVjE1Qfm+36hglErwmnAD8hu0dDy4uICBA8ZV
pQr/q/+O0KFW2kjelu9Dgb9SDBsWV4F4x5CswgS0zBVlk5tDMP5bVtUGpslbm81Lu2sdKq7uNMGh
MVQ4fy9xhogC1lS5guhISa0DlBWv0O8odT6/LP+4WZzDV6FzIkEqC0uolGZSZoMnlpxplmD2euaT
O4hkTpPnbztDccey0bhjDaBIqaWQa0uwEtQEwtyU56i4fq54F9IE3ORR6mKriODM4XOYZwaVYLYz
7SPbKkz4i7VkB6/Ot1upDE3znNqYKpM8raa0Bx8vfvntJ32UENsM4aI6gJL+jJwhxhh3jVIDOcpi
m0r2hmEtS8XXXNBk71QCDXTBNhhPiHX2LtHkrVIlhoEshH/EZgdq53Eirqs5iFKMnkOmqZTtr3Xq
djvPTWZT4S3NT5aVLgurMPUWI07BRVYqkQrmtCKohNY8qu9EdACoT6ki0a66XxVF4f9AQ3W38yO5
mWmZmIIpnDFrbXakvKWeZhLwhvrbUH8fahhqD0YUcBDJjEBMQwiznE4y5QbHrbhHBOnUAYzb2tVN
jJa65e+eE2Ya30E2GurxUP8ssA6e/wOnvo3V78d3vTcvMB3n7l3iX1JXWqk=
""")
##file activate.csh
ACTIVATE_CSH = convert("""
eJx9U11vmzAUffevOCVRu+UB9pws29Kl0iq1aVWllaZlcgxciiViItsQdb9+xiQp+dh4QOB7Pu49
XHqY59IgkwVhVRmLmFAZSrGRNkdgykonhFiqSCRW1sJSmJg8wCDT5QrucRCyHn6WFRKhVGmhKwVp
kUpNiS3emup3TY6XIn7DVNQyJUwlrgthJD6n/iCNv72uhCzCpFx9CRkThRQGKe08cWXJ9db/yh/u
pvzl9mn+PLnjj5P5D1yM8QmXlzBkSdXwZ0H/BBc0mEo5FE5qI2jKhclHOOvy9HD/OO/6YO1mX9vx
sY0H/tPIV0dtqel0V7iZvWyNg8XFcBA0ToEqVeqOdNUEQFvN41SumAv32VtJrakQNSmLWmgp4oJM
yDoBHgoydtoEAs47r5wHHnUal5vbJ8oOI+9wI86vb2d8Nrm/4Xy4RZ8R85E4uTZPB5EZPnTaaAGu
E59J8BE2J8XgrkbLeXMlVoQxznEYFYY8uFFdxsKQRx90Giwx9vSueHP1YNaUSFG4vTaErNSYuBOF
lXiVyXa9Sy3JdClEyK1dD6Nos9mEf8iKlOpmqSNTZnYjNEWiUYn2pKNB3ttcLJ3HmYYXy6Un76f7
r8rRsC1TpTJj7f19m5sUf/V3Ir+x/yjtLu8KjLX/CmN/AcVGUUo=
""")
##file activate.bat
ACTIVATE_BAT = convert("""
eJyFUkEKgzAQvAfyhz0YaL9QEWpRqlSjWGspFPZQTevFHOr/adQaU1GaUzI7Mzu7ZF89XhKkEJS8
qxaKMMsvboQ+LxxE44VICSW1gEa2UFaibqoS0iyJ0xw2lIA6nX5AHCu1jpRsv5KRjknkac9VLVug
sX9mtzxIeJDE/mg4OGp47qoLo3NHX2jsMB3AiDht5hryAUOEifoTdCXbSh7V0My2NMq/Xbh5MEjU
ZT63gpgNT9lKOJ/CtHsvT99re3pX303kydn4HeyOeAg5cjf2EW1D6HOPkg9NGKhu
""")
##file deactivate.bat
DEACTIVATE_BAT = convert("""
eJxzSE3OyFfIT0vj4spMU0hJTcvMS01RiPf3cYkP8wwKCXX0iQ8I8vcNCFHQ4FIAguLUEgWIgK0q
FlWqXJpcICVYpGzx2BAZ4uHv5+Hv6wq1BWINXBTdKriEKkI1DhW2QAfhttcxxANiFZCBbglQSJUL
i2dASrm4rFz9XLgAwJNbyQ==
""")
##file activate.ps1
ACTIVATE_PS = convert("""
eJylWdmS40Z2fVeE/oHT6rCloNUEAXDThB6wAyQAEjsB29GBjdgXYiWgmC/zgz/Jv+AEWNVd3S2N
xuOKYEUxM+/Jmzfvcm7W//zXf/+wUMOoXtyi1F9kbd0sHH/hFc2iLtrK9b3FrSqyxaVQwr8uhqJd
uHaeg9mqzRdR8/13Pyy8qPLdJh0+LMhi0QCoXxYfFh9WtttEnd34H8p6/f1300KauwrULws39e18
0ZaLNm9rgN/ZVf3h++/e124Vlc0vKsspHy+Yyi5+XbzPhijvCtduoiL/kA1ukWV27n0o7Sb8LIFj
CvWR5GQgUJdp1Pw8TS9+rPy6SDv/+e3d+0+4qw8f3v20+PliV37efEYBAB9FTKC+RHn/Cfxn3rdv
00Fube5O+iyCtHDs9BfPfz3q4sfFv9d91Ljhfy7ei0VO+nVTtdOkv/jpt0l2AX6iG1jXgKnnDuD4
ke2k/i8fzzz5UedkVcP4pwF+Wvz2FJl+3vt598urXf5Y6LNA5WcFOP7r0sW7b9a+W/xcu0Xpv5zk
Kfq3P9Dz9di/fCxS72MXVU1rpx9L4Bxl85Wmn5a+zP76Zuh3pL9ROWr87PN+//GHIl+oOtvn9XSU
qH+p0gQBFnx1uV+JLH5O5zv+PXW+WepXVVHZT0+oQezkIATcIm+ivPV/z5J/+cYj3ir4w0Lx09vC
e5n/y5/Y5LPPfdrqb88ga/PabxZRVfmp39l588m/6u+/e+OpP+dF7n1WZpJ9//Z4v372fDDz9eHB
7Juvs/BLMHzrxL9+9twXpJfhd1/DrpQ5Euu/vlss3wp9HXC/54C/Ld69m6zwdx3tC0d8daSv0V8B
n4b9YYF53sJelJV/ix6LZspw/sJtqyl5LJ5r/23htA1Imfm/gt9R7dqVB1LjhydAX4Gb+zksQF59
9+P7H//U+376afFuvh2/T6P85Xr/5c8C6OXyFY4BGuN+EE0+GeR201b+wkkLN5mmBY5TfMw8ngqL
CztXxCSXKMCYrRIElWkEJlEPYsSOeKBVZCAQTKBhApMwRFQzmCThE0YQu2CdEhgjbgmk9GluHpfR
/hhwJCZhGI5jt5FsAkOrObVyE6g2y1snyhMGFlDY1x+BoHpCMulTj5JYWNAYJmnKpvLxXgmQ8az1
4fUGxxcitMbbhDFcsiAItg04E+OSBIHTUYD1HI4FHH4kMREPknuYRMyhh3AARWMkfhCketqD1CWJ
mTCo/nhUScoQcInB1hpFhIKoIXLo5jLpwFCgsnLCx1QlEMlz/iFEGqzH3vWYcpRcThgWnEKm0QcS
rA8ek2a2IYYeowUanOZOlrbWSJUC4c7y2EMI3uJPMnMF/SSXdk6E495VLhzkWHps0rOhKwqk+xBI
DhJirhdUCTamMfXz2Hy303hM4DFJ8QL21BcPBULR+gcdYxoeiDqOFSqpi5B5PUISfGg46gFZBPo4
jdh8lueaWuVSMTURfbAUnLINr/QYuuYoMQV6l1aWxuZVTjlaLC14UzqZ+ziTGDzJzhiYoPLrt3uI
tXkVR47kAo09lo5BD76CH51cTt1snVpMOttLhY93yxChCQPI4OBecS7++h4p4Bdn4H97bJongtPk
s9gQnXku1vzsjjmX4/o4YUDkXkjHwDg5FXozU0fW4y5kyeYW0uJWlh536BKr0kMGjtzTkng6Ep62
uTWnQtiIqKnEsx7e1hLtzlXs7Upw9TwEnp0t9yzCGgUJIZConx9OHJArLkRYW0dW42G9OeR5Nzwk
yk1mX7du5RGHT7dka7N3AznmSif7y6tuKe2N1Al/1TUPRqH6E2GLVc27h9IptMLkCKQYRqPQJgzV
2m6WLsSipS3v3b1/WmXEYY1meLEVIU/arOGVkyie7ZsH05ZKpjFW4cpY0YkjySpSExNG2TS8nnJx
nrQmWh2WY3cP1eISP9wbaVK35ZXc60yC3VN/j9n7UFoK6zvjSTE2+Pvz6Mx322rnftfP8Y0XKIdv
Qd7AfK0nexBTMqRiErvCMa3Hegpfjdh58glW2oNMsKeAX8x6YJLZs9K8/ozjJkWL+JmECMvhQ54x
9rsTHwcoGrDi6Y4I+H7yY4/rJVPAbYymUH7C2D3uiUS3KQ1nrCAUkE1dJMneDQIJMQQx5SONxoEO
OEn1/Ig1eBBUeEDRuOT2WGGGE4bNypBLFh2PeIg3bEbg44PHiqNDbGIQm50LW6MJU62JHCGBrmc9
2F7WBJrrj1ssnTAK4sxwRgh5LLblhwNAclv3Gd+jC/etCfyfR8TMhcWQz8TBIbG8IIyAQ81w2n/C
mHWAwRzxd3WoBY7BZnsqGOWrOCKwGkMMNfO0Kci/joZgEocLjNnzgcmdehPHJY0FudXgsr+v44TB
I3jnMGnsK5veAhgi9iXGifkHMOC09Rh9cAw9sQ0asl6wKMk8mpzFYaaDSgG4F0wisQDDBRpjCINg
FIxhlhQ31xdSkkk6odXZFpTYOQpOOgw9ugM2cDQ+2MYa7JsEirGBrOuxsQy5nPMRdYjsTJ/j1iNw
FeSt1jY2+dd5yx1/pzZMOQXUIDcXeAzR7QlDRM8AMkUldXOmGmvYXPABjxqkYKO7VAY6JRU7kpXr
+Epu2BU3qFFXClFi27784LrDZsJwbNlDw0JzhZ6M0SMXE4iBHehCpHVkrQhpTFn2dsvsZYkiPEEB
GSEAwdiur9LS1U6P2U9JhGp4hnFpJo4FfkdJHcwV6Q5dV1Q9uNeeu7rV8PAjwdFg9RLtroifOr0k
uOiRTo/obNPhQIf42Fr4mtThWoSjitEdAmFW66UCe8WFjPk1YVNpL9srFbond7jrLg8tqAasIMpy
zkH0SY/6zVAwJrEc14zt14YRXdY+fcJ4qOd2XKB0/Kghw1ovd11t2o+zjt+txndo1ZDZ2T+uMVHT
VSXhedBAHoJIID9xm6wPQI3cXY+HR7vxtrJuCKh6kbXaW5KkVeJsdsjqsYsOwYSh0w5sMbu7LF8J
5T7U6LJdiTx+ca7RKlulGgS5Z1JSU2Llt32cHFipkaurtBrvNX5UtvNZjkufZ/r1/XyLl6yOpytL
Km8Fn+y4wkhlqZP5db0rooqy7xdL4wxzFVTX+6HaxuQJK5E5B1neSSovZ9ALB8091dDbbjVxhWNY
Ve5hn1VnI9OF0wpvaRm7SZuC1IRczwC7GnkhPt3muHV1YxUJfo+uh1sYnJy+vI0ZwuPV2uqWJYUH
bmBsi1zmFSxHrqwA+WIzLrHkwW4r+bad7xbOzJCnKIa3S3YvrzEBK1Dc0emzJW+SqysQfdEDorQG
9ZJlbQzEHQV8naPaF440YXzJk/7vHGK2xwuP+Gc5xITxyiP+WQ4x18oXHjFzCBy9kir1EFTAm0Zq
LYwS8MpiGhtfxiBRDXpxDWxk9g9Q2fzPPAhS6VFDAc/aiNGatUkPtZIStZFQ1qD0IlJa/5ZPAi5J
ySp1ETDomZMnvgiysZSBfMikrSDte/K5lqV6iwC5q7YN9I1dBZXUytDJNqU74MJsUyNNLAPopWK3
tzmLkCiDyl7WQnj9sm7Kd5kzgpoccdNeMw/6zPVB3pUwMgi4C7hj4AMFAf4G27oXH8NNT9zll/sK
S6wVlQwazjxWKWy20ZzXb9ne8ngGalPBWSUSj9xkc1drsXkZ8oOyvYT3e0rnYsGwx85xZB9wKeKg
cJKZnamYwiaMymZvzk6wtDUkxmdUg0mPad0YHtvzpjEfp2iMxvORhnx0kCVLf5Qa43WJsVoyfEyI
pzmf8ruM6xBr7dnBgzyxpqXuUPYaKahOaz1LrxNkS/Q3Ae5AC+xl6NbxAqXXlzghZBZHmOrM6Y6Y
ctAkltwlF7SKEsShjVh7QHuxMU0a08/eiu3x3M+07OijMcKFFltByXrpk8w+JNnZpnp3CfgjV1Ax
gUYCnWwYow42I5wHCcTzLXK0hMZN2DrPM/zCSqe9jRSlJnr70BPE4+zrwbk/xVIDHy2FAQyHoomT
Tt5jiM68nBQut35Y0qLclLiQrutxt/c0OlSqXAC8VrxW97lGoRWzhOnifE2zbF05W4xuyhg7JTUL
aqJ7SWDywhjlal0b+NLTpERBgnPW0+Nw99X2Ws72gOL27iER9jgzj7Uu09JaZ3n+hmCjjvZpjNst
vOWWTbuLrg+/1ltX8WpPauEDEvcunIgTxuMEHweWKCx2KQ9DU/UKdO/3za4Szm2iHYL+ss9AAttm
gZHq2pkUXFbV+FiJCKrpBms18zH75vax5jSo7FNunrVWY3Chvd8KKnHdaTt/6ealwaA1x17yTlft
8VBle3nAE+7R0MScC3MJofNCCkA9PGKBgGMYEwfB2QO5j8zUqa8F/EkWKCzGQJ5EZ05HTly1B01E
z813G5BY++RZ2sxbQS8ZveGPJNabp5kXAeoign6Tlt5+L8i5ZquY9+S+KEUHkmYMRFBxRrHnbl2X
rVemKnG+oB1yd9+zT+4c43jQ0wWmQRR6mTCkY1q3VG05Y120ZzKOMBe6Vy7I5Vz4ygPB3yY4G0FP
8RxiMx985YJPXsgRU58EuHj75gygTzejP+W/zKGe78UQN3yOJ1aMQV9hFH+GAfLRsza84WlPLAI/
9G/5JdcHftEfH+Y3/fHUG7/o8bv98dzzy3e8S+XCvgqB+VUf7sH0yDHpONdbRE8tAg9NWOzcTJ7q
TuAxe/AJ07c1Rs9okJvl1/0G60qvbdDzz5zO0FuPFQIHNp9y9Bd1CufYVx7dB26mAxwa8GMNrN/U
oGbNZ3EQ7inLzHy5tRg9AXJrN8cB59cCUBeCiVO7zKM0jU0MamhnRThkg/NMmBOGb6StNeD9tDfA
7czsAWopDdnGoXUHtA+s/k0vNPkBcxEI13jVd/axp85va3LpwGggXXWw12Gwr/JGAH0b8CPboiZd
QO1l0mk/UHukud4C+w5uRoNzpCmoW6GbgbMyaQNkga2pQINB18lOXOCJzSWPFOhZcwzdgrsQnne7
nvjBi+7cP2BbtBeDOW5uOLGf3z94FasKIguOqJl+8ss/6Kumns4cuWbqq5592TN/RNIbn5Qo6qbi
O4F0P9txxPAwagqPlftztO8cWBzdN/jz3b7GD6JHYP/Zp4ToAMaA74M+EGSft3hEGMuf8EwjnTk/
nz/P7SLipB/ogQ6xNX0fDqNncMCfHqGLCMM0ZzFa+6lPJYQ5p81vW4HkCvidYf6kb+P/oB965g8K
C6uR0rdjX1DNKc5pOSTquI8uQ6KXxYaKBn+30/09tK4kMpJPgUIQkbENEPbuezNPPje2Um83SgyX
GTCJb6MnGVIpgncdQg1qz2bvPfxYD9fewCXDomx9S+HQJuX6W3VAL+v5WZMudRQZk9ZdOk6GIUtC
PqEb/uwSIrtR7/edzqgEdtpEwq7p2J5OQV+RLrmtTvFwFpf03M/VrRyTZ73qVod7v7Jh2Dwe5J25
JqFOU2qEu1sP+CRotklediycKfLjeIZzjJQsvKmiGSNQhxuJpKa+hoWUizaE1PuIRGzJqropwgVB
oo1hr870MZLgnXF5ZIpr6mF0L8aSy2gVnTAuoB4WEd4d5NPVC9TMotYXERKlTcwQ2KiB/C48AEfH
Qbyq4CN8xTFnTvf/ebOc3isnjD95s0QF0nx9s+y+zMmz782xL0SgEmRpA3x1w1Ff9/74xcxKEPdS
IEFTz6GgU0+BK/UZ5Gwbl4gZwycxEw+Kqa5QmMkh4OzgzEVPnDAiAOGBFaBW4wkDmj1G4RyElKgj
NlLCq8zsp085MNh/+R4t1Q8yxoSv8PUpTt7izZwf2BTHZZ3pIZpUIpuLkL1nNL6sYcHqcKm237wp
T2+RCjgXweXd2Zp7ZM8W6dG5bZsqo0nrJBTx8EC0+CQQdzEGnabTnkzofu1pYkWl4E7XSniECdxy
vLYavPMcL9LW5SToJFNnos+uqweOHriUZ1ntIYZUonc7ltEQ6oTRtwOHNwez2sVREskHN+bqG3ua
eaEbJ8XpyO8CeD9QJc8nbLP2C2R3A437ISUNyt5Yd0TbDNcl11/DSsOzdbi/VhCC0KE6v1vqVNkq
45ZnG6fiV2NwzInxCNth3BwL0+8814jE6+1W1EeWtpWbSZJOJNYXmWRXa7vLnAljE692eHjZ4y5u
y1u63De0IzKca7As48Z3XshVF+3XiLNz0JIMh/JOpbiNLlMi672uO0wYzOCZjRxcxj3D+gVenGIE
MvFUGGXuRps2RzMcgWIRolHXpGUP6sMsQt1hspUBnVKUn/WQj2u6j3SXd9Xz0QtEzoM7qTu5y7gR
q9gNNsrlEMLdikBt9bFvBnfbUIh6voTw7eDsyTmPKUvF0bHqWLbHe3VRHyRZnNeSGKsB73q66Vsk
taxWYmwz1tYVFG/vOQhlM0gUkyvIab3nv2caJ1udU1F3pDMty7stubTE4OJqm0i0ECfrJIkLtraC
HwRWKzlqpfhEIqYH09eT9WrOhQyt8YEoyBlnXtAT37WHIQ03TIuEHbnRxZDdLun0iok9PUC79prU
m5beZzfQUelEXnhzb/pIROKx3F7qCttYIFGh5dXNzFzID7u8vKykA8Uejf7XXz//S4nKvW//ofS/
QastYw==
""")
##file distutils-init.py
DISTUTILS_INIT = convert("""
eJytV92L4zYQf/dfMU0ottuse7RvC6FQrg8Lxz2Ugz4si9HacqKuIxlJ2ST313dG8odkO9d7aGBB
luZLv/nNjFacOqUtKJMIvzK3cXlhWgp5MDBsqK5SNYftsBAGpLLA4F1oe2Ytl+9wUvW55TswCi4c
KibhbFDSglXQCFmDPXIwtm7FawLRbwtPzg2T9gf4gupKv4GS0N262w7V0NvpbCy8cvTo3eAus6C5
ETU3ICQZX1hFTw/dzR6V/AW1RCN4/XAtbsVXqIXmlVX6liS4lOzEYY9QFB2zx6LfoSNjz1a0pqT9
QOIfJWQ2E888NEVZNqLlZZnvIB0NpHkimlFdKn2iRRY7yGG/CCJb6Iz280d34SFXBS2yEYPNF0Q7
yM7oCjpWvbEDQmnhRwOs6zjThpKE8HogwRAgraqYFZgGZvzmzVh+mgz9vskT3hruwyjdFcqyENJw
bbMPO5jdzonxK68QKT7B57CMRRG5shRSWDTX3dI8LzRndZbnSWL1zfvriUmK4TcGWSnZiEPCrxXv
bM+sP7VW2is2WgWXCO3sAu3Rzysz3FiNCA8WPyM4gb1JAAmCiyTZbhFjWx3h9SzauuRXC9MFoVbc
yNTCm1QXOOIfIn/g1kGMhDUBN72hI5XCBQtIXQw8UEEdma6Jaz4vJIJ51Orc15hzzmu6TdFp3ogr
Aof0c98tsw1SiaiWotHffk3XYCkqdToxWRfTFXqgpg2khcLluOHMVC0zZhLKIomesfSreUNNgbXi
Ky9VRzwzkBneNoGQyyvGjbsFQqOZvpWIjqH281lJ/jireFgR3cPzSyTGWzQpDNIU+03Fs4XKLkhp
/n0uFnuF6VphB44b3uWRneSbBoMSioqE8oeF0JY+qTvYfEK+bPLYdoR4McfYQ7wMZj39q0kfP8q+
FfsymO0GzNlPh644Jje06ulqHpOEQqdJUfoidI2O4CWx4qOglLye6RrFQirpCRXvhoRqXH3sYdVJ
AItvc+VUsLO2v2hVAWrNIfVGtkG351cUMNncbh/WdowtSPtCdkzYFv6mwYc9o2Jt68ud6wectBr8
hYAulPSlgzH44YbV3ikjrulEaNJxt+/H3wZ7bXSXje/YY4tfVVrVmUstaDwwOBLMg6iduDB0lMVC
UyzYx7Ab4kjCqdViEJmDcdk/SKbgsjYXgfMznUWcrtS4z4fmJ/XOM1LPk/iIpqass5XwNbdnLb1Y
8h3ERXSWZI6rZJxKs1LBqVH65w0Oy4ra0CBYxEeuOMbDmV5GI6E0Ha/wgVTtkX0+OXvqsD02CKLf
XHbeft85D7tTCMYy2Njp4DJP7gWJr6paVWXZ1+/6YXLv/iE0M90FktiI7yFJD9e7SOLhEkkaMTUO
azq9i2woBNR0/0eoF1HFMf0H8ChxH/jgcB34GZIz3Qn4/vid+VEamQrOVqAPTrOfmD4MPdVh09tb
8dLLjvh/61lEP4yW5vJaH4vHcevG8agXvzPGoOhhXNncpTr99PTHx6e/UvffFLaxUSjuSeP286Dw
gtEMcW1xKr/he4/6IQ6FUXP+0gkioHY5iwC9Eyx3HKO7af0zPPe+XyLn7fAY78k4aiR387bCr5XT
5C4rFgwLGfMvJuAMew==
""")
##file distutils.cfg
DISTUTILS_CFG = convert("""
eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH
xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg
9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q=
""")
##file activate_this.py
ACTIVATE_THIS = convert("""
eJyNU01v2zAMvetXEB4K21jmDOstQA4dMGCHbeihlyEIDMWmG62yJEiKE//7kXKdpN2KzYBt8euR
fKSyLPs8wiEo8wh4wqZTGou4V6Hm0wJa1cSiTkJdr8+GsoTRHuCotBayiWqQEYGtMCgfD1KjGYBe
5a3p0cRKiAe2NtLADikftnDco0ko/SFEVgEZ8aRC5GLux7i3BpSJ6J1H+i7A2CjiHq9z7JRZuuQq
siwTIvpxJYCeuWaBpwZdhB+yxy/eWz+ZvVSU8C4E9FFZkyxFsvCT/ZzL8gcz9aXVE14Yyp2M+2W0
y7n5mp0qN+avKXvbsyyzUqjeWR8hjGE+2iCE1W1tQ82hsCZN9UzlJr+/e/iab8WfqsmPI6pWeUPd
FrMsd4H/55poeO9n54COhUs+sZNEzNtg/wanpjpuqHJaxs76HtZryI/K3H7KJ/KDIhqcbJ7kI4ar
XL+sMgXnX0D+Te2Iy5xdP8yueSlQB/x/ED2BTAtyE3K4SYUN6AMNfbO63f4lBW3bUJPbTL+mjSxS
PyRfJkZRgj+VbFv+EzHFi5pKwUEepa4JslMnwkowSRCXI+m5XvEOvtuBrxHdhLalG0JofYBok6qj
YdN2dEngUlbC4PG60M1WEN0piu7Nq7on0mgyyUw3iV1etLo6r/81biWdQ9MWHFaePWZYaq+nmp+t
s3az+sj7eA0jfgPfeoN1
""")
if __name__ == '__main__':
main()
## TODO:
## Copy python.exe.manifest
## Monkeypatch distutils.sysconfig
|
makerbot/conveyor
|
virtualenv.py
|
Python
|
agpl-3.0
| 101,937
|
## \file
## \ingroup tutorial_tdataframe
## \notebook -nodraw
## This tutorial shows how to express the concept of ranges when working with the TDataFrame.
## \macro_code
##
## \date March 2017
## \author Danilo Piparo
import ROOT
fill_tree_code = '''
void fill_tree(const char *filename, const char *treeName)
{
TFile f(filename, "RECREATE");
TTree t(treeName, treeName);
int b1;
float b2;
t.Branch("b1", &b1);
t.Branch("b2", &b2);
for (int i = 0; i < 100; ++i) {
b1 = i;
b2 = i * i;
t.Fill();
}
t.Write();
f.Close();
return;
}
'''
# We prepare an input tree to run on
fileName = "tdf006_ranges_py.root"
treeName = "myTree"
ROOT.gInterpreter.Declare(fill_tree_code)
ROOT.fill_tree(fileName, treeName)
# We read the tree from the file and create a TDataFrame.
TDF = ROOT.ROOT.Experimental.TDataFrame
d = TDF(treeName, fileName)
# ## Usage of ranges
# Now we'll count some entries using ranges
c_all = d.Count()
# This is how you can express a range of the first 30 entries
d_0_30 = d.Range(0, 30)
c_0_30 = d_0_30.Count()
# This is how you pick all entries from 15 onwards
d_15_end = d.Range(15, 0)
c_15_end = d_15_end.Count()
# We can use a stride too, in this case we pick an event every 3
d_15_end_3 = d.Range(15, 0, 3)
c_15_end_3 = d_15_end_3.Count()
# The Range is a 1st class citizen in the TDataFrame graph:
# not only actions (like Count) but also filters and new columns can be added to it.
d_0_50 = d.Range(0, 50)
c_0_50_odd_b1 = d_0_50.Filter("1 == b1 % 2").Count()
# An important thing to notice is that the counts of a filter are relative to the
# number of entries a filter "sees". Therefore, if a Range depends on a filter,
# the Range will act on the entries passing the filter only.
c_0_3_after_even_b1 = d.Filter("0 == b1 % 2").Range(0, 3).Count()
# Ok, time to wrap up: let's print all counts!
print("Usage of ranges:")
print(" - All entries:", c_all.GetValue())
print(" - Entries from 0 to 30:", c_0_30.GetValue())
print(" - Entries from 15 onwards:", c_15_end.GetValue())
print(" - Entries from 15 onwards in steps of 3:", c_15_end_3.GetValue())
print(" - Entries from 0 to 50, odd only:", c_0_50_odd_b1.GetValue())
print(" - First three entries of all even entries:", c_0_3_after_even_b1.GetValue())
|
bbockelm/root
|
tutorials/dataframe/tdf006_ranges.py
|
Python
|
lgpl-2.1
| 2,283
|
from __future__ import absolute_import
import six
import warnings
from . import backend as K
from .utils.generic_utils import deserialize_keras_object
from .engine import Layer
def softmax(x, axis=-1):
"""Softmax activation function.
# Arguments
x : Tensor.
axis: Integer, axis along which the softmax normalization is applied.
# Returns
Tensor, output of softmax transformation.
# Raises
ValueError: In case `dim(x) == 1`.
"""
ndim = K.ndim(x)
if ndim == 2:
return K.softmax(x)
elif ndim > 2:
e = K.exp(x - K.max(x, axis=axis, keepdims=True))
s = K.sum(e, axis=axis, keepdims=True)
return e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D')
def elu(x, alpha=1.0):
return K.elu(x, alpha)
def selu(x):
"""Scaled Exponential Linear Unit. (Klambauer et al., 2017)
# Arguments
x: A tensor or variable to compute the activation function for.
# References
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
"""
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * K.elu(x, alpha)
def softplus(x):
return K.softplus(x)
def softsign(x):
return K.softsign(x)
def relu(x, alpha=0., max_value=None):
return K.relu(x, alpha=alpha, max_value=max_value)
def tanh(x):
return K.tanh(x)
def sigmoid(x):
return K.sigmoid(x)
def hard_sigmoid(x):
return K.hard_sigmoid(x)
def linear(x):
return x
def serialize(activation):
return activation.__name__
def deserialize(name, custom_objects=None):
return deserialize_keras_object(name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='activation function')
def get(identifier):
if identifier is None:
return linear
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
elif callable(identifier):
if isinstance(identifier, Layer):
warnings.warn((
'Do not pass a layer instance (such as {identifier}) as the '
'activation argument of another layer. Instead, advanced '
'activation layers should be used just like any other '
'layer in a model.'
).format(identifier=identifier.__class__.__name__))
return identifier
else:
raise ValueError('Could not interpret '
'activation function identifier:', identifier)
|
deepforge-dev/deepforge-keras
|
test/test-cases/activations.py
|
Python
|
apache-2.0
| 2,712
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo.config import cfg
import webob.dec
import webob.exc
import cinder.api.openstack
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
import cinder.policy
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
# The name of the extension, e.g., 'Fox In Socks'
name = None
# The alias for the extension, e.g., 'FOXNSOX'
alias = None
# Description comes from the docstring for the class
# The XML namespace for the extension, e.g.,
# 'http://www.fox.in.socks/api/ext/pie/v1.0'
namespace = None
# The timestamp when the extension was last updated, e.g.,
# '2011-01-22T13:25:27-06:00'
updated = None
def __init__(self, ext_mgr):
"""Register extension with the extension manager."""
ext_mgr.register(self)
self.ext_mgr = ext_mgr
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_controller_extensions(self):
"""List of extensions.ControllerExtension extension objects.
Controller extensions are used to extend existing controllers.
"""
controller_exts = []
return controller_exts
@classmethod
def nsmap(cls):
"""Synthesize a namespace map from extension."""
# Start with a base nsmap
nsmap = ext_nsmap.copy()
# Add the namespace for the extension
nsmap[cls.alias] = cls.namespace
return nsmap
@classmethod
def xmlname(cls, name):
"""Synthesize element and attribute names."""
return '{%s}%s' % (cls.namespace, name)
def make_ext(elem):
elem.set('name')
elem.set('namespace')
elem.set('alias')
elem.set('updated')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
xmlutil.make_links(elem, 'links')
ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class ExtensionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extension', selector='extension')
make_ext(root)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extensions')
elem = xmlutil.SubTemplateElement(root, 'extension',
selector='extensions')
make_ext(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsResource(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
super(ExtensionsResource, self).__init__(None)
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.name
ext_data['alias'] = ext.alias
ext_data['description'] = ext.__doc__
ext_data['namespace'] = ext.namespace
ext_data['updated'] = ext.updated
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
@wsgi.serializers(xml=ExtensionsTemplate)
def index(self, req):
extensions = []
for _alias, ext in self.extension_manager.extensions.iteritems():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
@wsgi.serializers(xml=ExtensionTemplate)
def show(self, req, id):
try:
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions[id]
except KeyError:
raise webob.exc.HTTPNotFound()
return dict(extension=self._translate(ext))
def delete(self, req, id):
raise webob.exc.HTTPNotFound()
def create(self, req):
raise webob.exc.HTTPNotFound()
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See cinder/tests/api/extensions/foxinsocks/extension.py for an
example extension implementation.
"""
def __init__(self):
LOG.info(_('Initializing extension manager.'))
self.cls_list = CONF.osapi_volume_extension
self.extensions = {}
self._load_extensions()
def is_loaded(self, alias):
return alias in self.extensions
def register(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.alias
LOG.info(_('Loaded extension: %s'), alias)
if alias in self.extensions:
raise exception.Error("Found duplicate extension: %s" % alias)
self.extensions[alias] = ext
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionsResource(self)))
for ext in self.extensions.values():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_controller_extensions(self):
"""Returns a list of ControllerExtension objects."""
controller_exts = []
for ext in self.extensions.values():
try:
get_ext_method = ext.get_controller_extensions
except AttributeError:
# NOTE(Vek): Extensions aren't required to have
# controller extensions
continue
controller_exts.extend(get_ext_method())
return controller_exts
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug('Ext name: %s', extension.name)
LOG.debug('Ext alias: %s', extension.alias)
LOG.debug('Ext description: %s',
' '.join(extension.__doc__.strip().split()))
LOG.debug('Ext namespace: %s', extension.namespace)
LOG.debug('Ext updated: %s', extension.updated)
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
return False
return True
def load_extension(self, ext_factory):
"""Execute an extension factory.
Loads an extension. The 'ext_factory' is the name of a
callable that will be imported and called with one
argument--the extension manager. The factory callable is
expected to call the register() method at least once.
"""
LOG.debug("Loading extension %s", ext_factory)
# Load the factory
factory = importutils.import_class(ext_factory)
# Call it
LOG.debug("Calling extension factory %s", ext_factory)
factory(self)
def _load_extensions(self):
"""Load extensions specified on the command line."""
extensions = list(self.cls_list)
# NOTE(thingee): Backwards compat for the old extension loader path.
# We can drop this post-grizzly in the H release.
old_contrib_path = ('cinder.api.openstack.volume.contrib.'
'standard_extensions')
new_contrib_path = 'cinder.api.contrib.standard_extensions'
if old_contrib_path in extensions:
LOG.warn(_('osapi_volume_extension is set to deprecated path: %s'),
old_contrib_path)
LOG.warn(_('Please set your flag or cinder.conf settings for '
'osapi_volume_extension to: %s'), new_contrib_path)
extensions = [e.replace(old_contrib_path, new_contrib_path)
for e in extensions]
for ext_factory in extensions:
try:
self.load_extension(ext_factory)
except Exception as exc:
LOG.warn(_('Failed to load extension %(ext_factory)s: '
'%(exc)s'),
{'ext_factory': ext_factory, 'exc': exc})
class ControllerExtension(object):
"""Extend core controllers of cinder OpenStack API.
Provide a way to extend existing cinder OpenStack API core
controllers.
"""
def __init__(self, extension, collection, controller):
self.extension = extension
self.collection = collection
self.controller = controller
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in cinder."""
def __init__(self, collection, controller, parent=None,
collection_actions=None, member_actions=None,
custom_routes_fn=None):
if not collection_actions:
collection_actions = {}
if not member_actions:
member_actions = {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.custom_routes_fn = custom_routes_fn
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
"""Registers all standard API extensions."""
# Walk through all the modules in our directory...
our_dir = path[0]
for dirpath, dirnames, filenames in os.walk(our_dir):
# Compute the relative package name from the dirpath
relpath = os.path.relpath(dirpath, our_dir)
if relpath == '.':
relpkg = ''
else:
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
# Now, consider each file in turn, only considering .py files
for fname in filenames:
root, ext = os.path.splitext(fname)
# Skip __init__ and anything that's not .py
if ext != '.py' or root == '__init__':
continue
# Try loading it
classname = "%s%s" % (root[0].upper(), root[1:])
classpath = ("%s%s.%s.%s" %
(package, relpkg, root, classname))
if ext_list is not None and classname not in ext_list:
logger.debug("Skipping extension: %s" % classpath)
continue
try:
ext_mgr.load_extension(classpath)
except Exception as exc:
logger.warn(_('Failed to load extension %(classpath)s: '
'%(exc)s'),
{'classpath': classpath, 'exc': exc})
# Now, let's consider any subdirectories we may have...
subdirs = []
for dname in dirnames:
# Skip it if it does not have __init__.py
if not os.path.exists(os.path.join(dirpath, dname,
'__init__.py')):
continue
# If it has extension(), delegate...
ext_name = ("%s%s.%s.extension" %
(package, relpkg, dname))
try:
ext = importutils.import_class(ext_name)
except ImportError:
# extension() doesn't exist on it, so we'll explore
# the directory for ourselves
subdirs.append(dname)
else:
try:
ext(ext_mgr)
except Exception as exc:
logger.warn(_('Failed to load extension %(ext_name)s: '
'%(exc)s'),
{'ext_name': ext_name, 'exc': exc})
# Update the list of directories we'll explore...
dirnames[:] = subdirs
def extension_authorizer(api_name, extension_name):
def authorize(context, target=None):
if target is None:
target = {'project_id': context.project_id,
'user_id': context.user_id}
action = '%s_extension:%s' % (api_name, extension_name)
cinder.policy.enforce(context, action, target)
return authorize
def soft_extension_authorizer(api_name, extension_name):
hard_authorize = extension_authorizer(api_name, extension_name)
def authorize(context):
try:
hard_authorize(context)
return True
except exception.NotAuthorized:
return False
return authorize
|
nash-x/hws
|
cinder/api/extensions.py
|
Python
|
apache-2.0
| 13,552
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Vote inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from luigi.configuration import LuigiTomlParser, get_config, add_config_path
from helpers import LuigiTestCase
class TomlConfigParserTest(LuigiTestCase):
@classmethod
def setUpClass(cls):
add_config_path('test/testconfig/luigi.toml')
add_config_path('test/testconfig/luigi_local.toml')
def setUp(self):
LuigiTomlParser._instance = None
super(TomlConfigParserTest, self).setUp()
def test_get_config(self):
config = get_config('toml')
self.assertIsInstance(config, LuigiTomlParser)
def test_file_reading(self):
config = get_config('toml')
self.assertIn('hdfs', config.data)
def test_get(self):
config = get_config('toml')
# test getting
self.assertEqual(config.get('hdfs', 'client'), 'hadoopcli')
self.assertEqual(config.get('hdfs', 'client', 'test'), 'hadoopcli')
# test default
self.assertEqual(config.get('hdfs', 'test', 'check'), 'check')
with self.assertRaises(KeyError):
config.get('hdfs', 'test')
# test override
self.assertEqual(config.get('hdfs', 'namenode_host'), 'localhost')
# test non-string values
self.assertEqual(config.get('hdfs', 'namenode_port'), 50030)
def test_set(self):
config = get_config('toml')
self.assertEqual(config.get('hdfs', 'client'), 'hadoopcli')
config.set('hdfs', 'client', 'test')
self.assertEqual(config.get('hdfs', 'client'), 'test')
config.set('hdfs', 'check', 'test me')
self.assertEqual(config.get('hdfs', 'check'), 'test me')
def test_has_option(self):
config = get_config('toml')
self.assertTrue(config.has_option('hdfs', 'client'))
self.assertFalse(config.has_option('hdfs', 'nope'))
self.assertFalse(config.has_option('nope', 'client'))
class HelpersTest(LuigiTestCase):
def test_add_without_install(self):
enabled = LuigiTomlParser.enabled
LuigiTomlParser.enabled = False
with self.assertRaises(ImportError):
add_config_path('test/testconfig/luigi.toml')
LuigiTomlParser.enabled = enabled
def test_get_without_install(self):
enabled = LuigiTomlParser.enabled
LuigiTomlParser.enabled = False
with self.assertRaises(ImportError):
get_config('toml')
LuigiTomlParser.enabled = enabled
|
riga/luigi
|
test/config_toml_test.py
|
Python
|
apache-2.0
| 3,012
|
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
import random
class Topology(object):
"""
Topology source: ring, mesh, linear and fat_tree
[source] -- send() --> [coroutine] -- send() --> [coroutine(sink)]
args:
node
port
eport
link
slice
slice_condition <priority_policy>
federation
priority_policy:
minmax 0 or 65535
random 0 ~ 65535
fixed 10
Note: in case of fat_tree topo, this generates topo with fixed parameters:
the number of pods: 40
the number of ports: 40
the number of ToR SW: 40 * pods
the number of aggregation SW: 2 * pods
the number of core SW: 4
"""
eports = 3
nodes = 10
# fat_tree-related
EPORT = 0
NULL = 0
CORE = 1
AGGREGATION = 2
TOR = 3
LEFT = 1
RIGHT = 2
def __init__(self, *args, **kwargs):
"""
kwrags:
networks The number of networks
topo_type Topology type
nodes The number of nodes
eports The number of unconnected (external) nodes
"""
if 'networks' in kwargs:
self.networks = kwargs['networks']
else:
self.networks = 1
self.topo_type = kwargs['topo_type']
if 'nodes' in kwargs:
self.nodes = kwargs['nodes']
else:
self.nodes = Topology.nodes
if 'eports' in kwargs:
self.eports = kwargs['eports']
else:
self.eports = Topology.eports
if self.topo_type == 'fat_tree':
"""
layer: core(0), aggregation(1), tor(2)
pod: 1 ~ 40
"""
self.formatstr = '{layer:}{pod:02}{left_right}{number:02}'
else:
self.formatstr = '{:0'+str(len(str(self.nodes+self.eports))+1)+'}'
# Coroutine setting
def __call__(self, cr_next=None):
self.cr_next = cr_next
return self
def close(self):
self.cr_next.close()
# Coroutine send imitation
# TODO: this method should be coroutine's send()
def send(self, data):
cr_next = self.cr_next
args = []
kwargs = {}
for s in data:
if isinstance(s, dict):
k = s.keys()[0]
v = s.values()[0]
kwargs[k] = v
else:
args.append(s)
gen_type = args[0]
if gen_type == 'node':
return self._generate_node(cr_next)
elif gen_type == 'port':
return self._generate_port(cr_next)
elif gen_type == 'eport': # External port
return self._generate_eport(cr_next)
elif gen_type == 'link':
return self._generate_link(cr_next)
elif gen_type == 'slice':
return self._generate_slice(cr_next)
elif gen_type == 'slice_condition':
if len(args) == 2:
return self._generate_slice_condition(cr_next, args[1])
else:
raise Exception('Requires slice_policy')
elif gen_type == 'federation':
if len(args) == 3:
return self._generate_federation(cr_next, args[1], args[2])
else:
raise Exception('Requires boundary_node and boundary_port')
def _generate_node(self, cr_next):
formatstr = self.formatstr
if self.topo_type == 'fat_tree':
CORE = Topology.CORE
AGGR = Topology.AGGREGATION
TOR = Topology.TOR
LEFT = Topology.LEFT
RIGHT = Topology.RIGHT
np = formatstr.format
NULL = Topology.NULL
for i in range(1, self.networks+1):
# Core
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=LEFT, number=2)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=RIGHT, number=1)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=RIGHT, number=2)])
# Aggregation
for pod in range(1,41):
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=RIGHT, number=1)])
# ToR
for tor in range(1,21):
cr_next.send([i, np(layer=TOR, pod=pod, left_right=LEFT, number=tor)])
cr_next.send([i, np(layer=TOR, pod=pod, left_right=RIGHT, number=tor)])
else:
for i in range(1, self.networks+1):
for j in range(1, self.nodes+1):
cr_next.send([i, formatstr.format(j)])
def _generate_port(self, cr_next):
networks = self.networks
nodes = self.nodes
formatstr = self.formatstr
topo = self.topo_type
if topo == 'ring':
"""
...[node]--adj_left--[node]--adj_right--[node]...
"""
for i in range(1, networks + 1):
for j in range(1, nodes+1):
node = formatstr.format(j)
if j == 1:
adj_left = formatstr.format(nodes)
adj_right = formatstr.format(2)
elif j == nodes:
adj_left = formatstr.format(nodes - 1)
adj_right = formatstr.format(1)
else:
adj_left = formatstr.format(j-1)
adj_right = formatstr.format(j+1)
cr_next.send([i, node, adj_left])
cr_next.send([i, node, adj_right])
elif topo == 'mesh':
"""
| |
...[node]----[node]----[node]...
1 : range(1,1), range(2,1001)
2 : range(1,2), range(3,1001)
3 : range(1,3), range(4,1001)
:
1000: range(1,1000), range(1001,1001)
"""
for i in range(1, networks+1):
for j in range(1, nodes+1):
node = formatstr.format(j)
for port in range(1,j):
cr_next.send([i, node, formatstr.format(port)])
for port in range(j+1,nodes+1):
cr_next.send([i, node, formatstr.format(port)])
elif topo == 'linear':
"""
[node]---[node]...[node]---[node]
"""
for i in range(1, networks+1):
for j in range(1, nodes+1):
node = formatstr.format(j)
if j == 1:
adj_right = formatstr.format(2)
cr_next.send([i, node, adj_right])
elif j == nodes:
adj_left = formatstr.format(nodes - 1)
cr_next.send([i, node, adj_left])
else:
adj_left = formatstr.format(j-1)
adj_right = formatstr.format(j+1)
cr_next.send([i, node, adj_left])
cr_next.send([i, node, adj_right])
elif topo == 'fat_tree':
CORE = Topology.CORE
AGGR = Topology.AGGREGATION
TOR = Topology.TOR
LEFT = Topology.LEFT
RIGHT = Topology.RIGHT
np = formatstr.format # node & port
NULL = Topology.NULL
for i in range(1, self.networks+1):
for pod in range(1,41):
# Core => Aggregation
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=LEFT, number=1),
np(layer=AGGR, pod=pod, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=LEFT, number=2),
np(layer=AGGR, pod=pod, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=RIGHT, number=1),
np(layer=AGGR, pod=pod, left_right=RIGHT, number=1)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=RIGHT, number=2),
np(layer=AGGR, pod=pod, left_right=RIGHT, number=1)])
# Aggregation => Core
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=LEFT, number=1),
np(layer=CORE, pod=NULL, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=LEFT, number=1),
np(layer=CORE, pod=NULL, left_right=LEFT, number=2)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=RIGHT, number=1),
np(layer=CORE, pod=NULL, left_right=RIGHT, number=1)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=RIGHT, number=1),
np(layer=CORE, pod=NULL, left_right=RIGHT, number=2)])
# Aggregation
for pod in range(1,41):
# ToR
for tor in range(1,21):
# Aggregation => ToR
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=LEFT, number=1),
np(layer=TOR, pod=pod, left_right=LEFT, number=tor)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=RIGHT, number=1),
np(layer=TOR, pod=pod, left_right=LEFT, number=tor)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=LEFT, number=1),
np(layer=TOR, pod=pod, left_right=RIGHT, number=tor)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=RIGHT, number=1),
np(layer=TOR, pod=pod, left_right=RIGHT, number=tor)])
# ToR => Aggregation
cr_next.send([i, np(layer=TOR, pod=pod, left_right=LEFT, number=tor),
np(layer=AGGR, pod=pod, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=TOR, pod=pod, left_right=LEFT, number=tor),
np(layer=AGGR, pod=pod, left_right=RIGHT, number=1)])
cr_next.send([i, np(layer=TOR, pod=pod, left_right=RIGHT, number=tor),
np(layer=AGGR, pod=pod, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=TOR, pod=pod, left_right=RIGHT, number=tor),
np(layer=AGGR, pod=pod, left_right=RIGHT, number=1)])
else:
pass
def _generate_eport(self, cr_next): # External ports
networks = self.networks
nodes = self.nodes
eports = self.eports
formatstr = self.formatstr
topo = self.topo_type
if topo in ['ring', 'mesh']:
for i in range(1, networks+1):
for j in range(1, nodes+1):
node = formatstr.format(j)
for k in range(nodes+1, nodes+eports+1):
eport = formatstr.format(k)
cr_next.send([i, node, eport])
elif topo == 'linear':
for i in range(1, networks+1):
for j in [1, nodes]:
node = formatstr.format(j)
for k in range(nodes+1, nodes+eports+1):
eport = formatstr.format(k)
cr_next.send([i, node, eport])
elif topo == 'fat_tree':
CORE = Topology.CORE
TOR = Topology.TOR
LEFT = Topology.LEFT
RIGHT = Topology.RIGHT
np = formatstr.format
NULL = Topology.NULL
EPORT = Topology.EPORT
for i in range(1, networks+1):
# Core
for eport in range(1, 5):
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=LEFT, number=1),
np(layer=EPORT,pod=NULL, left_right=NULL, number=eport)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=LEFT, number=2),
np(layer=EPORT,pod=NULL, left_right=NULL, number=eport)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=RIGHT, number=1),
np(layer=EPORT,pod=NULL, left_right=NULL, number=eport)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=RIGHT, number=2),
np(layer=EPORT,pod=NULL, left_right=NULL, number=eport)])
# ToR
#for eport in range(1, 41):
for eport in range(1, 11):
for pod in range(1,41):
for tor in range(1,21):
cr_next.send([i, np(layer=TOR, pod=pod, left_right=LEFT, number=tor),
np(layer=EPORT, pod=NULL, left_right=NULL, number=eport)])
cr_next.send([i, np(layer=TOR, pod=pod, left_right=RIGHT, number=tor),
np(layer=EPORT, pod=NULL, left_right=NULL, number=eport)])
def _generate_link(self, cr_next):
if self.topo_type == 'ring' or 'mesh':
return self._generate_port(cr_next)
else:
pass
def _generate_slice(self, cr_next):
nodes = self.nodes
eports = self.eports
formatstr = self.formatstr
for i in range(nodes+1, nodes+eports+1):
eport = formatstr.format(i)
cr_next.send([eport])
def _generate_slice_condition(self, cr_next, priority_policy):
nodes = self.nodes
eports = self.eports
formatstr = self.formatstr
topo = self.topo_type
seqno = 0
if topo in ['ring', 'mesh']:
range_ = range(1, nodes+1)
elif topo in ['linear']:
range_ = [1, nodes]
for i in range_:
node = formatstr.format(i)
for j in range(nodes+1, nodes+eports+1):
eport = formatstr.format(j)
slice_ = eport
priority = 10
if priority_policy == 'minmax':
priority = random.randint(0,1) * 65535
elif priority_policy == 'random':
priority = random.randint(0,65535)
elif priority_policy == 'fixed':
pass
seqno += 1
cr_next.send([slice_, priority, node, eport, seqno])
def _generate_federation(self, cr_next, node, port):
networks = self.networks
formatstr = self.formatstr
node = formatstr.format(node)
port = formatstr.format(port)
if networks < 2:
raise Exception("Federation impossible")
elif networks == 2:
cr_next.send([1, node, port, 2, node, port])
else:
for i in range(1, networks+1):
if i == networks:
cr_next.send([i, node, port, 1, node, port])
else:
cr_next.send([i, node, port, i+1, node, port])
|
nis-sdn/odenos
|
apps/cli/producer.py
|
Python
|
apache-2.0
| 16,458
|
# Copyright (c) 2016 EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The group types specs controller"""
import webob
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import policy
from cinder import rpc
from cinder import utils
from cinder.volume import group_types
class GroupTypeSpecsController(wsgi.Controller):
"""The group type specs API controller for the OpenStack API."""
def _check_policy(self, context):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
policy.enforce(context, 'group:group_types_specs', target)
def _get_group_specs(self, context, group_type_id):
group_specs = db.group_type_specs_get(context, group_type_id)
specs_dict = {}
for key, value in group_specs.items():
specs_dict[key] = value
return dict(group_specs=specs_dict)
def _check_type(self, context, group_type_id):
try:
group_types.get_group_type(context, group_type_id)
except exception.GroupTypeNotFound as ex:
raise webob.exc.HTTPNotFound(explanation=ex.msg)
@wsgi.Controller.api_version('3.11')
def index(self, req, group_type_id):
"""Returns the list of group specs for a given group type."""
context = req.environ['cinder.context']
self._check_policy(context)
self._check_type(context, group_type_id)
return self._get_group_specs(context, group_type_id)
@wsgi.Controller.api_version('3.11')
@wsgi.response(202)
def create(self, req, group_type_id, body=None):
context = req.environ['cinder.context']
self._check_policy(context)
self.assert_valid_body(body, 'group_specs')
self._check_type(context, group_type_id)
specs = body['group_specs']
self._check_key_names(specs.keys())
utils.validate_dictionary_string_length(specs)
db.group_type_specs_update_or_create(context,
group_type_id,
specs)
notifier_info = dict(type_id=group_type_id, specs=specs)
notifier = rpc.get_notifier('groupTypeSpecs')
notifier.info(context, 'group_type_specs.create',
notifier_info)
return body
@wsgi.Controller.api_version('3.11')
def update(self, req, group_type_id, id, body=None):
context = req.environ['cinder.context']
self._check_policy(context)
if not body:
expl = _('Request body empty')
raise webob.exc.HTTPBadRequest(explanation=expl)
self._check_type(context, group_type_id)
if id not in body:
expl = _('Request body and URI mismatch')
raise webob.exc.HTTPBadRequest(explanation=expl)
if len(body) > 1:
expl = _('Request body contains too many items')
raise webob.exc.HTTPBadRequest(explanation=expl)
self._check_key_names(body.keys())
utils.validate_dictionary_string_length(body)
db.group_type_specs_update_or_create(context,
group_type_id,
body)
notifier_info = dict(type_id=group_type_id, id=id)
notifier = rpc.get_notifier('groupTypeSpecs')
notifier.info(context,
'group_type_specs.update',
notifier_info)
return body
@wsgi.Controller.api_version('3.11')
def show(self, req, group_type_id, id):
"""Return a single extra spec item."""
context = req.environ['cinder.context']
self._check_policy(context)
self._check_type(context, group_type_id)
specs = self._get_group_specs(context, group_type_id)
if id in specs['group_specs']:
return {id: specs['group_specs'][id]}
else:
msg = _("Group Type %(type_id)s has no extra spec with key "
"%(id)s.") % ({'type_id': group_type_id, 'id': id})
raise webob.exc.HTTPNotFound(explanation=msg)
@wsgi.Controller.api_version('3.11')
def delete(self, req, group_type_id, id):
"""Deletes an existing group spec."""
context = req.environ['cinder.context']
self._check_policy(context)
self._check_type(context, group_type_id)
try:
db.group_type_specs_delete(context, group_type_id, id)
except exception.GroupTypeSpecsNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
notifier_info = dict(type_id=group_type_id, id=id)
notifier = rpc.get_notifier('groupTypeSpecs')
notifier.info(context,
'group_type_specs.delete',
notifier_info)
return webob.Response(status_int=202)
def _check_key_names(self, keys):
if not common.validate_key_names(keys):
expl = _('Key names can only contain alphanumeric characters, '
'underscores, periods, colons and hyphens.')
raise webob.exc.HTTPBadRequest(explanation=expl)
def create_resource():
return wsgi.Resource(GroupTypeSpecsController())
|
Hybrid-Cloud/cinder
|
cinder/api/v3/group_specs.py
|
Python
|
apache-2.0
| 5,882
|
"""Tests for streams.py"""
import asyncio
import unittest
from unittest import mock
from aiohttp import streams
from aiohttp import test_utils
class TestStreamReader(unittest.TestCase):
DATA = b'line1\nline2\nline3\n'
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def _make_one(self, *args, **kwargs):
return streams.StreamReader(loop=self.loop, *args, **kwargs)
def test_create_waiter(self):
stream = self._make_one()
stream._waiter = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, stream._create_waiter, 'test')
@mock.patch('aiohttp.streams.asyncio')
def test_ctor_global_loop(self, m_asyncio):
stream = streams.StreamReader()
self.assertIs(stream._loop, m_asyncio.get_event_loop.return_value)
def test_at_eof(self):
stream = self._make_one()
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
self.assertFalse(stream.at_eof())
self.loop.run_until_complete(stream.readline())
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
stream.feed_eof()
self.loop.run_until_complete(stream.readline())
self.assertTrue(stream.at_eof())
def test_wait_eof(self):
stream = self._make_one()
wait_task = asyncio.Task(stream.wait_eof(), loop=self.loop)
def cb():
yield from asyncio.sleep(0.1, loop=self.loop)
stream.feed_eof()
asyncio.Task(cb(), loop=self.loop)
self.loop.run_until_complete(wait_task)
self.assertTrue(stream.is_eof())
self.assertIsNone(stream._eof_waiter)
def test_wait_eof_eof(self):
stream = self._make_one()
stream.feed_eof()
wait_task = asyncio.Task(stream.wait_eof(), loop=self.loop)
self.loop.run_until_complete(wait_task)
self.assertTrue(stream.is_eof())
def test_feed_empty_data(self):
stream = self._make_one()
stream.feed_data(b'')
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'', data)
def test_feed_nonempty_data(self):
stream = self._make_one()
stream.feed_data(self.DATA)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(self.DATA, data)
def test_read_zero(self):
# Read zero bytes.
stream = self._make_one()
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.read(0))
self.assertEqual(b'', data)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(self.DATA, data)
def test_read(self):
# Read bytes.
stream = self._make_one()
read_task = asyncio.Task(stream.read(30), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA, data)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'', data)
def test_read_line_breaks(self):
# Read bytes without line breaks.
stream = self._make_one()
stream.feed_data(b'line1')
stream.feed_data(b'line2')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'line1', data)
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'line2', data)
def test_read_eof(self):
# Read bytes, stop at eof.
stream = self._make_one()
read_task = asyncio.Task(stream.read(1024), loop=self.loop)
def cb():
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'', data)
data = self.loop.run_until_complete(stream.read())
self.assertIs(data, streams.EOF_MARKER)
@mock.patch('aiohttp.streams.internal_logger')
def test_read_eof_infinit(self, internal_logger):
# Read bytes.
stream = self._make_one()
stream.feed_eof()
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.loop.run_until_complete(stream.read())
self.assertTrue(internal_logger.warning.called)
def test_read_until_eof(self):
# Read all bytes until eof.
stream = self._make_one()
read_task = asyncio.Task(stream.read(-1), loop=self.loop)
def cb():
stream.feed_data(b'chunk1\n')
stream.feed_data(b'chunk2')
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1\nchunk2', data)
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'', data)
def test_read_exception(self):
stream = self._make_one()
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.read(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.read(2))
def test_readline(self):
# Read one line. 'readline' will need to wait for the data
# to come from 'cb'
stream = self._make_one()
stream.feed_data(b'chunk1 ')
read_task = asyncio.Task(stream.readline(), loop=self.loop)
def cb():
stream.feed_data(b'chunk2 ')
stream.feed_data(b'chunk3 ')
stream.feed_data(b'\n chunk4')
self.loop.call_soon(cb)
line = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1 chunk2 chunk3 \n', line)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b' chunk4', data)
def test_readline_limit_with_existing_data(self):
# Read one line. The data is in StreamReader's buffer
# before the event loop is run.
stream = self._make_one(limit=3)
stream.feed_data(b'li')
stream.feed_data(b'ne1\nline2\n')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer should contain the remaining data after exception
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'line2\n', data)
def test_readline_limit(self):
# Read one line. StreamReaders are fed with data after
# their 'readline' methods are called.
stream = self._make_one(limit=7)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
stream = self._make_one(limit=7)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2\n')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'chunk3\n', data)
def test_readline_nolimit_nowait(self):
# All needed data for the first 'readline' call will be
# in the buffer.
stream = self._make_one()
stream.feed_data(self.DATA[:6])
stream.feed_data(self.DATA[6:])
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line1\n', line)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'line2\nline3\n', data)
def test_readline_eof(self):
stream = self._make_one()
stream.feed_data(b'some data')
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'some data', line)
def test_readline_empty_eof(self):
stream = self._make_one()
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', line)
self.assertIs(line, streams.EOF_MARKER)
def test_readline_read_byte_count(self):
stream = self._make_one()
stream.feed_data(self.DATA)
self.loop.run_until_complete(stream.readline())
data = self.loop.run_until_complete(stream.read(7))
self.assertEqual(b'line2\nl', data)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'ine3\n', data)
def test_readline_exception(self):
stream = self._make_one()
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line\n', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
def test_readexactly_zero_or_less(self):
# Read exact number of bytes (zero or less).
stream = self._make_one()
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.readexactly(0))
self.assertEqual(b'', data)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(self.DATA, data)
stream = self._make_one()
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.readexactly(-1))
self.assertEqual(b'', data)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(self.DATA, data)
def test_readexactly(self):
# Read exact number of bytes.
stream = self._make_one()
n = 2 * len(self.DATA)
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA + self.DATA, data)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(self.DATA, data)
def test_readexactly_eof(self):
# Read exact number of bytes (eof).
stream = self._make_one()
n = 2 * len(self.DATA)
read_task = asyncio.Task(stream.readexactly(n), loop=self.loop)
def cb():
stream.feed_data(self.DATA)
stream.feed_eof()
self.loop.call_soon(cb)
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(read_task)
self.assertEqual(cm.exception.partial, self.DATA)
self.assertEqual(cm.exception.expected, n)
self.assertEqual(str(cm.exception),
'18 bytes read on a total of 36 expected bytes')
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'', data)
def test_readexactly_exception(self):
stream = self._make_one()
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readexactly(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readexactly(2))
def test_exception(self):
stream = self._make_one()
self.assertIsNone(stream.exception())
exc = ValueError()
stream.set_exception(exc)
self.assertIs(stream.exception(), exc)
def test_exception_waiter(self):
stream = self._make_one()
@asyncio.coroutine
def set_err():
stream.set_exception(ValueError())
t1 = asyncio.Task(stream.readline(), loop=self.loop)
t2 = asyncio.Task(set_err(), loop=self.loop)
self.loop.run_until_complete(asyncio.wait([t1, t2], loop=self.loop))
self.assertRaises(ValueError, t1.result)
def test_exception_cancel(self):
stream = self._make_one()
@asyncio.coroutine
def read_a_line():
yield from stream.readline()
t = asyncio.Task(read_a_line(), loop=self.loop)
test_utils.run_briefly(self.loop)
t.cancel()
test_utils.run_briefly(self.loop)
# The following line fails if set_exception() isn't careful.
stream.set_exception(RuntimeError('message'))
test_utils.run_briefly(self.loop)
self.assertIs(stream._waiter, None)
def test_readany_eof(self):
stream = self._make_one()
read_task = asyncio.Task(stream.readany(), loop=self.loop)
self.loop.call_soon(stream.feed_data, b'chunk1\n')
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1\n', data)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'', data)
def test_readany_empty_eof(self):
stream = self._make_one()
stream.feed_eof()
read_task = asyncio.Task(stream.readany(), loop=self.loop)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'', data)
self.assertIs(data, streams.EOF_MARKER)
def test_readany_exception(self):
stream = self._make_one()
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readany())
self.assertEqual(b'line\n', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readany())
def test_read_nowait(self):
stream = self._make_one()
stream.feed_data(b'line1\nline2\n')
self.assertEqual(
stream.read_nowait(), b'line1\nline2\n')
self.assertIs(
stream.read_nowait(), streams.EOF_MARKER)
stream.feed_eof()
data = self.loop.run_until_complete(stream.read())
self.assertEqual(b'', data)
def test_read_nowait_exception(self):
stream = self._make_one()
stream.feed_data(b'line\n')
stream.set_exception(ValueError())
self.assertRaises(ValueError, stream.read_nowait)
def test_read_nowait_waiter(self):
stream = self._make_one()
stream.feed_data(b'line\n')
stream._waiter = stream._create_waiter('readany')
self.assertRaises(RuntimeError, stream.read_nowait)
class TestEmptyStreamReader(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def test_empty_stream_reader(self):
s = streams.EmptyStreamReader()
self.assertIsNone(s.set_exception(ValueError()))
self.assertIsNone(s.exception())
self.assertIsNone(s.feed_eof())
self.assertIsNone(s.feed_data(b'data'))
self.assertTrue(s.at_eof())
self.assertIsNone(
self.loop.run_until_complete(s.wait_eof()))
self.assertIs(
self.loop.run_until_complete(s.read()), streams.EOF_MARKER)
self.assertIs(
self.loop.run_until_complete(s.readline()), streams.EOF_MARKER)
self.assertIs(
self.loop.run_until_complete(s.readany()), streams.EOF_MARKER)
self.assertRaises(
asyncio.IncompleteReadError,
self.loop.run_until_complete, s.readexactly(10))
self.assertIs(s.read_nowait(), streams.EOF_MARKER)
class DataQueueMixin:
def test_is_eof(self):
self.assertFalse(self.buffer.is_eof())
self.buffer.feed_eof()
self.assertTrue(self.buffer.is_eof())
def test_at_eof(self):
self.assertFalse(self.buffer.at_eof())
self.buffer.feed_eof()
self.assertTrue(self.buffer.at_eof())
self.buffer._buffer.append(object())
self.assertFalse(self.buffer.at_eof())
def test_feed_data(self):
item = object()
self.buffer.feed_data(item, 1)
self.assertEqual([(item, 1)], list(self.buffer._buffer))
def test_feed_eof(self):
self.buffer.feed_eof()
self.assertTrue(self.buffer._eof)
def test_read(self):
item = object()
read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
def cb():
self.buffer.feed_data(item, 1)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertIs(item, data)
def test_read_eof(self):
read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
def cb():
self.buffer.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
streams.EofStream, self.loop.run_until_complete, read_task)
def test_read_cancelled(self):
read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
test_utils.run_briefly(self.loop)
waiter = self.buffer._waiter
self.assertIsInstance(waiter, asyncio.Future)
read_task.cancel()
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, read_task)
self.assertTrue(waiter.cancelled())
self.assertIsNone(self.buffer._waiter)
self.buffer.feed_data(b'test', 4)
self.assertIsNone(self.buffer._waiter)
def test_read_until_eof(self):
item = object()
self.buffer.feed_data(item, 1)
self.buffer.feed_eof()
data = self.loop.run_until_complete(self.buffer.read())
self.assertIs(data, item)
self.assertRaises(
streams.EofStream,
self.loop.run_until_complete, self.buffer.read())
def test_read_exception(self):
self.buffer.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, self.buffer.read())
def test_read_exception_with_data(self):
val = object()
self.buffer.feed_data(val, 1)
self.buffer.set_exception(ValueError())
self.assertIs(val, self.loop.run_until_complete(self.buffer.read()))
self.assertRaises(
ValueError, self.loop.run_until_complete, self.buffer.read())
def test_read_exception_on_wait(self):
read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertIsInstance(self.buffer._waiter, asyncio.Future)
self.buffer.feed_eof()
self.buffer.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, read_task)
def test_exception(self):
self.assertIsNone(self.buffer.exception())
exc = ValueError()
self.buffer.set_exception(exc)
self.assertIs(self.buffer.exception(), exc)
def test_exception_waiter(self):
@asyncio.coroutine
def set_err():
self.buffer.set_exception(ValueError())
t1 = asyncio.Task(self.buffer.read(), loop=self.loop)
t2 = asyncio.Task(set_err(), loop=self.loop)
self.loop.run_until_complete(asyncio.wait([t1, t2], loop=self.loop))
self.assertRaises(ValueError, t1.result)
class TestDataQueue(unittest.TestCase, DataQueueMixin):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.buffer = streams.DataQueue(loop=self.loop)
def tearDown(self):
self.loop.close()
class TestChunksQueue(unittest.TestCase, DataQueueMixin):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.buffer = streams.ChunksQueue(loop=self.loop)
def tearDown(self):
self.loop.close()
def test_read_eof(self):
read_task = asyncio.Task(self.buffer.read(), loop=self.loop)
def cb():
self.buffer.feed_eof()
self.loop.call_soon(cb)
self.loop.run_until_complete(read_task)
self.assertTrue(self.buffer.at_eof())
def test_read_until_eof(self):
item = object()
self.buffer.feed_data(item, 1)
self.buffer.feed_eof()
data = self.loop.run_until_complete(self.buffer.read())
self.assertIs(data, item)
thing = self.loop.run_until_complete(self.buffer.read())
self.assertEqual(thing, b'')
self.assertTrue(self.buffer.at_eof())
def test_readany(self):
self.assertIs(self.buffer.read.__func__, self.buffer.readany.__func__)
|
vasylbo/aiohttp
|
tests/test_streams.py
|
Python
|
apache-2.0
| 21,126
|
__author__ = 'Lorenzo'
planet_mapper = {
'<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>': 'planet type', # link to yago category, can be explored more
'<http://live.dbpedia.org/ontology/wikiPageExternalLink>': 'external link', # many
'<http://live.dbpedia.org/property/inclination>': 'inclination', # quantity and text
'<http://www.w3.org/2000/01/rdf-schema#seeAlso>': 'see also', # many
'<http://live.dbpedia.org/property/albedo>': 'albedo', # quantity
'<http://xmlns.com/foaf/0.1/depiction>': 'depiction', # svg shape
'<http://live.dbpedia.org/property/rotVelocity>': 'rotation velocity', # quantity
'<http://live.dbpedia.org/property/period>': 'period', # quantity
'<http://live.dbpedia.org/property/meanTemp>': 'average temperature', # quantity
'<http://live.dbpedia.org/ontology/abstract>': 'abstract', # text
'<http://live.dbpedia.org/property/meanAnomaly>': 'average anomaly', # quantity
'<http://live.dbpedia.org/property/siderealDay>': 'sideral day', # quantity
'<http://live.dbpedia.org/property/scaleHeight>': 'atmospheric scale height', # quantity
'<http://live.dbpedia.org/property/mass>': 'mass', # quantity
'<http://live.dbpedia.org/property/escapeVelocity>': 'escape velocity (Km/s)', # quantity
'<http://live.dbpedia.org/property/atmosphere>': 'has atmosphere', # yes/no
'<http://live.dbpedia.org/property/ascNode>': 'asc node', # quantity
'<http://live.dbpedia.org/property/surfaceArea>': 'surface area', # quantity
'<http://live.dbpedia.org/property/equatorialRadius>': 'equatorial radius', # quantity
'<http://live.dbpedia.org/property/polarRadius>': 'polar radius', # quantity
'<http://live.dbpedia.org/ontology/escapeVelocity>': 'escape velocity (double)', # quantity
'<http://live.dbpedia.org/property/atmosphereComposition>': 'atmosphere chemistry', # text
'<http://live.dbpedia.org/property/surfacePressure>': 'surface pressure',
'<http://live.dbpedia.org/property/volume> ': 'volume',
'<http://live.dbpedia.org/property/angularSize>': 'angular size',
'<http://live.dbpedia.org/property/avgSpeed>': 'average speed (Km/s)',
'<http://live.dbpedia.org/property/declination>': 'declination',
'<http://live.dbpedia.org/property/surfaceGrav>': 'surface gravity (grams)',
'<http://live.dbpedia.org/property/satellites>': 'number of satellites'
}
|
pincopallino93/rdfendpoints
|
parser/dbpediamap.py
|
Python
|
apache-2.0
| 2,410
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for using the TensorFlow C API."""
from tensorflow.core.framework import api_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.client import pywrap_tf_session as c_api
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
class ScopedTFStatus(object):
"""Wrapper around TF_Status that handles deletion."""
__slots__ = ["status"]
def __init__(self):
self.status = c_api.TF_NewStatus()
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteStatus is not None:
c_api.TF_DeleteStatus(self.status)
class ScopedTFGraph(object):
"""Wrapper around TF_Graph that handles deletion."""
__slots__ = ["graph", "deleter"]
def __init__(self):
self.graph = c_api.TF_NewGraph()
# Note: when we're destructing the global context (i.e when the process is
# terminating) we may have already deleted other modules. By capturing the
# DeleteGraph function here, we retain the ability to cleanly destroy the
# graph at shutdown, which satisfies leak checkers.
self.deleter = c_api.TF_DeleteGraph
def __del__(self):
self.deleter(self.graph)
class ScopedTFImportGraphDefOptions(object):
"""Wrapper around TF_ImportGraphDefOptions that handles deletion."""
__slots__ = ["options"]
def __init__(self):
self.options = c_api.TF_NewImportGraphDefOptions()
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteImportGraphDefOptions is not None:
c_api.TF_DeleteImportGraphDefOptions(self.options)
class ScopedTFImportGraphDefResults(object):
"""Wrapper around TF_ImportGraphDefOptions that handles deletion."""
__slots__ = ["results"]
def __init__(self, results):
self.results = results
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteImportGraphDefResults is not None:
c_api.TF_DeleteImportGraphDefResults(self.results)
class ScopedTFFunction(object):
"""Wrapper around TF_Function that handles deletion."""
__slots__ = ["func", "deleter"]
def __init__(self, func):
self.func = func
# Note: when we're destructing the global context (i.e when the process is
# terminating) we may have already deleted other modules. By capturing the
# DeleteFunction function here, we retain the ability to cleanly destroy the
# Function at shutdown, which satisfies leak checkers.
self.deleter = c_api.TF_DeleteFunction
@property
def has_been_garbage_collected(self):
return self.func is None
def __del__(self):
if not self.has_been_garbage_collected:
self.deleter(self.func)
self.func = None
class ScopedTFBuffer(object):
"""An internal class to help manage the TF_Buffer lifetime."""
__slots__ = ["buffer"]
def __init__(self, buf_string):
self.buffer = c_api.TF_NewBufferFromString(compat.as_bytes(buf_string))
def __del__(self):
c_api.TF_DeleteBuffer(self.buffer)
class ApiDefMap(object):
"""Wrapper around Tf_ApiDefMap that handles querying and deletion.
The OpDef protos are also stored in this class so that they could
be queried by op name.
"""
__slots__ = ["_api_def_map", "_op_per_name"]
def __init__(self):
op_def_proto = op_def_pb2.OpList()
buf = c_api.TF_GetAllOpList()
try:
op_def_proto.ParseFromString(c_api.TF_GetBuffer(buf))
self._api_def_map = c_api.TF_NewApiDefMap(buf)
finally:
c_api.TF_DeleteBuffer(buf)
self._op_per_name = {}
for op in op_def_proto.op:
self._op_per_name[op.name] = op
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteApiDefMap is not None:
c_api.TF_DeleteApiDefMap(self._api_def_map)
def put_api_def(self, text):
c_api.TF_ApiDefMapPut(self._api_def_map, text, len(text))
def get_api_def(self, op_name):
api_def_proto = api_def_pb2.ApiDef()
buf = c_api.TF_ApiDefMapGet(self._api_def_map, op_name, len(op_name))
try:
api_def_proto.ParseFromString(c_api.TF_GetBuffer(buf))
finally:
c_api.TF_DeleteBuffer(buf)
return api_def_proto
def get_op_def(self, op_name):
if op_name in self._op_per_name:
return self._op_per_name[op_name]
raise ValueError(f"No op_def found for op name {op_name}.")
def op_names(self):
return self._op_per_name.keys()
@tf_contextlib.contextmanager
def tf_buffer(data=None):
"""Context manager that creates and deletes TF_Buffer.
Example usage:
with tf_buffer() as buf:
# get serialized graph def into buf
...
proto_data = c_api.TF_GetBuffer(buf)
graph_def.ParseFromString(compat.as_bytes(proto_data))
# buf has been deleted
with tf_buffer(some_string) as buf:
c_api.TF_SomeFunction(buf)
# buf has been deleted
Args:
data: An optional `bytes`, `str`, or `unicode` object. If not None, the
yielded buffer will contain this data.
Yields:
Created TF_Buffer
"""
if data:
buf = c_api.TF_NewBufferFromString(compat.as_bytes(data))
else:
buf = c_api.TF_NewBuffer()
try:
yield buf
finally:
c_api.TF_DeleteBuffer(buf)
def tf_output(c_op, index):
"""Returns a wrapped TF_Output with specified operation and index.
Args:
c_op: wrapped TF_Operation
index: integer
Returns:
Wrapped TF_Output
"""
ret = c_api.TF_Output()
ret.oper = c_op
ret.index = index
return ret
def tf_operations(graph):
"""Generator that yields every TF_Operation in `graph`.
Args:
graph: Graph
Yields:
wrapped TF_Operation
"""
# pylint: disable=protected-access
pos = 0
c_op, pos = c_api.TF_GraphNextOperation(graph._c_graph, pos)
while c_op is not None:
yield c_op
c_op, pos = c_api.TF_GraphNextOperation(graph._c_graph, pos)
# pylint: enable=protected-access
def new_tf_operations(graph):
"""Generator that yields newly-added TF_Operations in `graph`.
Specifically, yields TF_Operations that don't have associated Operations in
`graph`. This is useful for processing nodes added by the C API.
Args:
graph: Graph
Yields:
wrapped TF_Operation
"""
# TODO(b/69679162): do this more efficiently
for c_op in tf_operations(graph):
try:
graph._get_operation_by_tf_operation(c_op) # pylint: disable=protected-access
except KeyError:
yield c_op
|
tensorflow/tensorflow
|
tensorflow/python/framework/c_api_util.py
|
Python
|
apache-2.0
| 7,542
|