code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from __future__ import absolute_import
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
import scipy.optimize as opt
from . import equation_of_state as eos
from ..tools.math import bracket
import warnings
import numpy as np
def bulk_modulus(volume, params):
"""
Compute the bulk modulus as per the Morse potential
equation of state.
Returns bulk modulus in the same units as
the reference bulk modulus.
Pressure must be in :math:`[Pa]`.
"""
VoverV0 = volume / params['V_0']
x = (params['Kprime_0'] - 1.)*(1. - np.power(VoverV0, 1./3.))
K = params['K_0']*( ( 2./(params['Kprime_0'] - 1.) *
np.power(VoverV0, -2./3.) *
(np.exp(2.*x) - np.exp(x)) ) +
( np.power(VoverV0, -1./3.) *
(2.*np.exp(2.*x) - np.exp(x)) ) )
return K
def shear_modulus(volume, params):
"""
Shear modulus not currently implemented for this equation of state
"""
return 0.
def morse_potential(VoverV0, params):
"""
Equation for the Morse Potential equation of state,
returns pressure in the same units that are supplied
for the reference bulk modulus (params['K_0'])
"""
x = (params['Kprime_0'] - 1.)*(1. - np.power(VoverV0, 1./3.))
return ( 3. * params['K_0'] / (params['Kprime_0'] - 1.) *
np.power(VoverV0, -2./3.) *
(np.exp(2.*x) - np.exp(x)) ) + params['P_0']
def volume(pressure, params):
"""
Get the Morse Potential volume at a
reference temperature for a given pressure :math:`[Pa]`.
Returns molar volume in :math:`[m^3]`
"""
func = lambda V: morse_potential(V / params['V_0'], params) - pressure
try:
sol = bracket(func, params['V_0'], 1.e-2 * params['V_0'])
except:
raise ValueError(
'Cannot find a volume, perhaps you are outside of the range of validity for the equation of state?')
return opt.brentq(func, sol[0], sol[1])
class Morse(eos.EquationOfState):
"""
Class for the isothermal Morse Potential equation of state
detailed in :cite:`Stacey1981`.
This equation of state has no temperature dependence.
"""
def volume(self, pressure, temperature, params):
"""
Returns volume :math:`[m^3]` as a function of pressure :math:`[Pa]`.
"""
return volume(pressure, params)
def pressure(self, temperature, volume, params):
return morse_potential(volume / params['V_0'], params)
def isothermal_bulk_modulus(self, pressure, temperature, volume, params):
"""
Returns isothermal bulk modulus :math:`K_T` :math:`[Pa]` as a function of pressure :math:`[Pa]`,
temperature :math:`[K]` and volume :math:`[m^3]`.
"""
return bulk_modulus(volume, params)
def adiabatic_bulk_modulus(self, pressure, temperature, volume, params):
"""
Returns adiabatic bulk modulus :math:`K_s` of the mineral. :math:`[Pa]`.
"""
return bulk_modulus(volume, params)
def shear_modulus(self, pressure, temperature, volume, params):
"""
Returns shear modulus :math:`G` of the mineral. :math:`[Pa]`
"""
return shear_modulus(volume, params)
def entropy(self, pressure, temperature, volume, params):
"""
Returns the molar entropy :math:`\mathcal{S}` of the mineral. :math:`[J/K/mol]`
"""
return 0.
def molar_internal_energy(self, pressure, temperature, volume, params):
"""
Returns the internal energy :math:`\mathcal{E}` of the mineral. :math:`[J/mol]`
"""
x = (params['Kprime_0'] - 1)*(1 - np.power(volume/params['V_0'], 1./3.))
intPdV = ( 9./2. * params['V_0'] * params['K_0'] /
np.power(params['Kprime_0'] - 1., 2.) *
(2.*np.exp(x) - np.exp(2.*x) - 1.) )
return -intPdV + params['E_0']
def gibbs_free_energy(self, pressure, temperature, volume, params):
"""
Returns the Gibbs free energy :math:`\mathcal{G}` of the mineral. :math:`[J/mol]`
"""
return self.molar_internal_energy(pressure, temperature, volume, params) + volume*pressure
def molar_heat_capacity_v(self, pressure, temperature, volume, params):
"""
Since this equation of state does not contain temperature effects, simply return a very large number. :math:`[J/K/mol]`
"""
return 1.e99
def molar_heat_capacity_p(self, pressure, temperature, volume, params):
"""
Since this equation of state does not contain temperature effects, simply return a very large number. :math:`[J/K/mol]`
"""
return 1.e99
def thermal_expansivity(self, pressure, temperature, volume, params):
"""
Since this equation of state does not contain temperature effects, simply return zero. :math:`[1/K]`
"""
return 0.
def grueneisen_parameter(self, pressure, temperature, volume, params):
"""
Since this equation of state does not contain temperature effects, simply return zero. :math:`[unitless]`
"""
return 0.
def validate_parameters(self, params):
"""
Check for existence and validity of the parameters
"""
if 'E_0' not in params:
params['E_0'] = 0.
if 'P_0' not in params:
params['P_0'] = 0.
# If G and Gprime are not included this is presumably deliberate,
# as we can model density and bulk modulus just fine without them,
# so just add them to the dictionary as nans
if 'G_0' not in params:
params['G_0'] = float('nan')
if 'Gprime_0' not in params:
params['Gprime_0'] = float('nan')
# Check that all the required keys are in the dictionary
expected_keys = ['V_0', 'K_0', 'Kprime_0', 'G_0', 'Gprime_0']
for k in expected_keys:
if k not in params:
raise KeyError('params object missing parameter : ' + k)
# Finally, check that the values are reasonable.
if params['P_0'] < 0.:
warnings.warn('Unusual value for P_0', stacklevel=2)
if params['V_0'] < 1.e-7 or params['V_0'] > 1.e-3:
warnings.warn('Unusual value for V_0', stacklevel=2)
if params['K_0'] < 1.e9 or params['K_0'] > 1.e13:
warnings.warn('Unusual value for K_0', stacklevel=2)
if params['Kprime_0'] < 0. or params['Kprime_0'] > 10.:
warnings.warn('Unusual value for Kprime_0', stacklevel=2)
if params['G_0'] < 0.0 or params['G_0'] > 1.e13:
warnings.warn('Unusual value for G_0', stacklevel=2)
if params['Gprime_0'] < -5. or params['Gprime_0'] > 10.:
warnings.warn('Unusual value for Gprime_0', stacklevel=2)
| bobmyhill/burnman | burnman/eos/morse_potential.py | Python | gpl-2.0 | 7,014 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-06 02:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticketing', '0008_auto_20160305_1840'),
]
operations = [
migrations.AlterField(
model_name='featurerequest',
name='title',
field=models.CharField(help_text='Enter a short, descriptive name of the feature request.', max_length=300),
),
migrations.AlterUniqueTogether(
name='featurerequest',
unique_together=set([('title', 'client')]),
),
]
| himadriganguly/featurerequest | ticketing/migrations/0009_auto_20160306_0248.py | Python | gpl-3.0 | 671 |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# This is the runner of this task: it reads the input, and then executes the
# function 'min3nombres' of the solution
import sys, traceback
# Import the function min3nombres from the solution
try:
from solution import min3nombres
except:
# Remove the runner from the traceback
excInfo = sys.exc_info()
traceback.print_exception(excInfo[0], excInfo[1], excInfo[2].tb_next)
sys.exit(1)
if __name__ == '__main__':
# Read the input
# The sanitizer ensures the input will always be three numbers
nb1, nb2, nb3 = sys.stdin.read().split()
# Execute the solution
print min3nombres(int(nb1), int(nb2), int(nb3))
| France-ioi/taskgrader | examples/taskRunner/tests/gen/runner.py | Python | mit | 696 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import unittest
from pants_test.contrib.android.android_integration_test import AndroidIntegrationTest
class AaptBuilderIntegrationTest(AndroidIntegrationTest):
"""Integration test for AaptBuilder, which builds an unsigned .apk
The Android SDK is modular, finding an SDK on the PATH is no guarantee that there is
an aapt binary anywhere on disk. The TOOLS are the ones required by the target in the
'test_aapt_bundle' method. If you add a target, you may need to expand the TOOLS list
and perhaps define new BUILD_TOOLS or TARGET_SDK class variables.
"""
TOOLS = [
os.path.join('build-tools', AndroidIntegrationTest.BUILD_TOOLS, 'aapt'),
os.path.join('build-tools', AndroidIntegrationTest.BUILD_TOOLS, 'lib', 'dx.jar'),
os.path.join('platforms', 'android-' + AndroidIntegrationTest.TARGET_SDK, 'android.jar')
]
tools = AndroidIntegrationTest.requirements(TOOLS)
@unittest.skipUnless(tools, reason='Android integration test requires tools {} '
'and ANDROID_HOME set in path.'.format(TOOLS))
def test_aapt_bundle(self):
self.bundle_test(AndroidIntegrationTest.TEST_TARGET)
def bundle_test(self, target):
pants_run = self.run_pants(['apk', target])
self.assert_success(pants_run)
@unittest.skipUnless(tools, reason='Android integration test requires tools {} '
'and ANDROID_HOME set in path.'.format(TOOLS))
def test_android_library_products(self):
# Doing the work under a tempdir gives us a handle for the workdir and guarantees a clean build.
with self.temporary_workdir() as workdir:
spec = 'contrib/android/examples/src/android/hello_with_library:'
pants_run = self.run_pants_with_workdir(['apk', '-ldebug', spec], workdir)
self.assert_success(pants_run)
# Make sure that the unsigned apk was produced for the binary target.
apk_file = 'apk/apk/org.pantsbuild.examples.hello_with_library.unsigned.apk'
self.assertEqual(os.path.isfile(os.path.join(workdir, apk_file)), True)
# Scrape debug statements.
def find_aapt_blocks(lines):
for line in lines:
if re.search(r'Executing: .*?\baapt package -f -M', line):
yield line
aapt_blocks = list(find_aapt_blocks(pants_run.stderr_data.split('\n')))
# Only one apk is built, so only one aapt invocation here, for any number of dependent libs.
self.assertEquals(len(aapt_blocks), 1, 'Expected one invocation of the aapt tool! '
'(was: {})\n{}'.format(len(aapt_blocks),
pants_run.stderr_data))
# Check to make sure the resources are being passed in correct order (apk->libs).
for line in aapt_blocks:
resource_dirs = re.findall(r'-S ([^\s]+)', line)
self.assertEqual(resource_dirs[0], 'contrib/android/examples/src/android/hello_with_library/main/res')
self.assertEqual(resource_dirs[1], 'contrib/android/examples/src/android/example_library/res')
# The other six are google-play-services v21 resource_dirs. Their presence is enough.
self.assertEquals(len(resource_dirs), 8, 'Expected eight resource dirs to be included '
'when calling aapt on hello_with_library apk.'
' (was: {})\n'.format(resource_dirs))
| pombredanne/pants | contrib/android/tests/python/pants_test/contrib/android/tasks/test_aapt_builder_integration.py | Python | apache-2.0 | 3,770 |
import os
def read_file(path):
lines = []
with open(path, "r", encoding="utf-8") as f:
lines = f.readlines()
lines = [ln.strip(os.linesep) for ln in lines]
return lines
def write_file(path, rows, separator="\t"):
with open(path, "wb") as outfile:
for row in rows:
line = ""
if isinstance(row, list) or isinstance(row, tuple):
line = separator.join(row) + os.linesep
else:
line = row + os.linesep
outfile.write(line.encode("utf-8"))
| icoxfog417/python_training | basic/file_service.py | Python | mit | 557 |
import unittest
import os
import printapp
import flask
from printapp.api import _has_supported_filetype
import printapp.test.util
class ApiTestCase(unittest.TestCase):
def setUp(self):
printapp.app.secret_key = 'test key'
printapp.app.config['TESTING'] = True
self.get_client = printapp.app.test_client
self.username = os.getenv('UNIFLOW_USER')
self.password = os.getenv('UNIFLOW_PASSWORD')
self.assertIsNotNone(self.username)
self.assertIsNotNone(self.password)
self._path = os.path.abspath(os.path.dirname(__file__))
def test_get_index(self):
with self.get_client() as app:
response = app.get('/')
self.assertEqual(self._status(response), 200)
def test_login(self):
with self.get_client() as app:
response = app.post('/api/login')
self.assertEqual(self._status(response), 400)
self.assertIsNone(flask.session.get('email'))
self.assertIsNone(flask.session.get('password'))
form_data = {}
form_data['email'] = '{}@students.calvin.edu'.format(self.username)
form_data['password'] = self.password
response = app.post('/api/login', data=form_data)
self.assertEqual(self._status(response), 200)
self.assertEqual(flask.session['email'], '{}@students.calvin.edu'.format(self.username))
self.assertEqual(flask.session['password'], self.password)
# test that whitespaces get stripped from the email
form_data['email'] = ' {}@students.calvin.edu '.format(self.username)
response = app.post('/api/login', data=form_data)
self.assertEqual(self._status(response), 200)
self.assertEqual(flask.session['email'], '{}@students.calvin.edu'.format(self.username))
form_data['email'] = 'invalid'
form_data['password'] = 'invalid'
response = app.post('/api/login', data=form_data)
self.assertEqual(self._status(response), 401)
self.assertIsNone(flask.session.get('email'))
self.assertIsNone(flask.session.get('password'))
@unittest.skipIf(printapp.test.util.is_cloudprint_api_key_missing(),
'no cloudprint api key found')
def test_cloudprintstatus(self):
with self.get_client() as app:
response = app.get('/api/cloudprintstatus')
self.assertEqual(self._status(response), 401)
self.assertIsNone(flask.session.get('email'))
self.assertIsNone(flask.session.get('password'))
self._sign_in(app)
response = app.get('/api/cloudprintstatus')
response_json = flask.json.loads(response.data)
self.assertEqual(self._status(response), 200)
self.assertEquals(response_json['haveCloudPrintPermission'], True)
self.assertEquals(response_json['isPrinterInstalled'], True)
self.assertIsNotNone(response_json['cloudPrintPermissionUrl'])
def test_uniflowstatus(self):
with self.get_client() as app:
response = app.get('/api/uniflowstatus')
self.assertEqual(self._status(response), 401)
self.assertIsNone(flask.session.get('email'))
self.assertIsNone(flask.session.get('password'))
self._sign_in(app)
response = app.get("/api/uniflowstatus")
response_json = flask.json.loads(response.data)
self.assertEqual(self._status(response), 200)
self.assertIsNotNone(response_json['queue'])
self.assertIsNotNone(response_json['budget'])
def test_upload_file(self):
with self.get_client() as app:
self._sign_in(app)
for name in ['test.pdf', 'test.txt', 'test.doc', 'test.docx']:
path = os.path.join(self._path, 'docs', name)
test_file = open(path, 'rb')
form_data = {}
form_data['file'] = test_file
response = app.post('/api/upload', data=form_data)
response_json = flask.json.loads(response.data)
self.assertEqual(self._status(response), 201)
self.assertIsNotNone(response_json['file_id'])
form_data = {}
response = app.post('/api/upload', data=form_data)
self.assertEqual(self._status(response), 400)
def test_has_supported_filetype(self):
self.assertFalse(_has_supported_filetype('test.file'))
self.assertFalse(_has_supported_filetype('test.docc'))
self.assertFalse(_has_supported_filetype('test.pddf'))
self.assertTrue(_has_supported_filetype('test.doc'))
self.assertTrue(_has_supported_filetype('test.docx'))
self.assertTrue(_has_supported_filetype('test.pdf'))
self.assertTrue(_has_supported_filetype('test.txt'))
def _status(self, response):
return int(response.status.split()[0])
def _sign_in(self, app):
form_data = {}
form_data['email'] = '{}@students.calvin.edu'.format(self.username)
form_data['password'] = self.password
app.post('/api/login', data=form_data)
@unittest.skip("Skip test which actually prints.")
class ApiPrintDeleteTestCase(unittest.TestCase):
def setUp(self):
printapp.app.secret_key = 'test key'
printapp.app.config['TESTING'] = True
self.get_client = printapp.app.test_client
self.username = os.getenv('UNIFLOW_USER')
self.password = os.getenv('UNIFLOW_PASSWORD')
self.assertIsNotNone(self.username)
self.assertIsNotNone(self.password)
self._path = os.path.abspath(os.path.dirname(__file__))
def tearDown(self):
""" Tests the /api/deletejob/<job_id> endpoint.
"""
with self.get_client() as app:
self._sign_in(app)
response = app.post('/api/deletejob')
self.assertEqual(self._status(response), 404)
response = app.post('/api/deletejob/')
self.assertEqual(self._status(response), 404)
response = app.post('/api/deletejob/invalidJobID')
self.assertEqual(self._status(response), 200)
response = app.get("/api/uniflowstatus")
response_json = flask.json.loads(response.data)
self.assertEqual(self._status(response), 200)
queue = response_json['queue']
self.assertIsNotNone(queue)
for document in queue:
response = app.post('/api/deletejob/' + document['job_id'])
self.assertEqual(self._status(response), 200)
response = app.get("/api/uniflowstatus")
response_json = flask.json.loads(response.data)
self.assertEqual(response_json['queue'], [])
def test_print(self):
"""Tests the /api/upload and /api/print endpoints.
"""
with self.get_client() as app:
self._sign_in(app)
oid = []
file_names = ['test.pdf', 'test.txt', 'test.doc', 'test.docx']
for name in file_names:
path = os.path.join(self._path, 'docs', name)
test_file = open(path, 'rb')
form_data = {}
form_data['file'] = test_file
response = app.post('/api/upload', data=form_data)
response_json = flask.json.loads(response.data)
objectid = response_json['file_id']
oid.append(objectid)
form_data = {}
form_data['file_id'] = None
form_data['color'] = True
form_data['double_sided'] = 5
form_data['collate'] = True
form_data['copies'] = 1
form_data['staple'] = True
response = app.post('/api/print', data=form_data)
self.assertEqual(self._status(response), 400)
form_data['double_sided'] = False
response = app.post('/api/print', data=form_data)
self.assertEqual(self._status(response), 400)
for objectid in oid:
form_data['file_id'] = objectid
response = app.post('/api/print', data=form_data)
self.assertEqual(self._status(response), 201)
def _status(self, response):
return int(response.status.split()[0])
def _sign_in(self, app):
form_data = {}
form_data['email'] = '{}@students.calvin.edu'.format(self.username)
form_data['password'] = self.password
app.post('/api/login', data=form_data)
| tylervz/calvinwebprint | src/printapp/test/test-api.py | Python | mit | 8,634 |
from django.test import TestCase, tag
from ..lab import AliquotType, LabProfile, ProcessingProfile, RequisitionPanel
from ..lab import PanelAlreadyRegistered, ProcessingProfileInvalidDerivative
from ..lab import RequisitionPanelError, Process, InvalidProcessingProfile
from ..lab import RequisitionPanelModelError
class TestBuildProfile(TestCase):
def setUp(self):
self.wb = AliquotType(
name='whole_blood', numeric_code='02', alpha_code='WB')
self.bc = AliquotType(
name='buffy_coat', numeric_code='12', alpha_code='BC')
def test_repr(self):
obj = LabProfile(
name='profile', requisition_model='edc_lab.subjectrequisition')
self.assertTrue(repr(obj))
def test_str(self):
obj = LabProfile(
name='profile', requisition_model='edc_lab.subjectrequisition')
self.assertTrue(str(obj))
def test_processing_bad(self):
"""Asserts CANNOT add process for aliquot B to a profile
for aliquot A if B cannot be derived from A."""
a = AliquotType(name='aliquot_a', numeric_code='55', alpha_code='AA')
b = AliquotType(name='aliquot_b', numeric_code='66', alpha_code='BB')
process = Process(aliquot_type=b, aliquot_count=3)
processing_profile = ProcessingProfile(
name='process', aliquot_type=a)
self.assertRaises(
ProcessingProfileInvalidDerivative,
processing_profile.add_processes, process)
def test_processing_ok(self):
"""Asserts CAN add process for aliquot B to a profile
for aliquot A since B can be derived from A."""
a = AliquotType(name='aliquot_a', numeric_code='55', alpha_code='AA')
b = AliquotType(name='aliquot_b', numeric_code='66', alpha_code='BB')
a.add_derivatives(b)
process = Process(aliquot_type=b, aliquot_count=3)
processing_profile = ProcessingProfile(
name='process', aliquot_type=a)
try:
processing_profile.add_processes(process)
except ProcessingProfileInvalidDerivative:
self.fail('ProcessingProfileInvalidDerivative unexpectedly raised.')
def test_panel(self):
RequisitionPanel(name='Viral Load', aliquot_type=self.bc)
def test_panel_raises_missing_aliquot_type(self):
self.assertRaises(
RequisitionPanelError,
RequisitionPanel,
name='Viral Load',
aliquot_type=None)
def test_panel_raises_on_invalid_model(self):
a = AliquotType(name='aliquot_a', numeric_code='55', alpha_code='AA')
for requisition_model in [None, 'edc_lab.blah', 'blah']:
with self.subTest(requisition_model=requisition_model):
panel = RequisitionPanel(
name='Viral Load',
aliquot_type=a)
panel.requisition_model = requisition_model
try:
panel.requisition_model_cls
except RequisitionPanelModelError:
pass
else:
self.fail(
'RequisitionPanelModelError unexpectedly not raised.')
def test_panel_adds_processing_profile(self):
a = AliquotType(name='aliquot_a', numeric_code='55', alpha_code='AA')
b = AliquotType(name='aliquot_b', numeric_code='66', alpha_code='BB')
a.add_derivatives(b)
process = Process(aliquot_type=b, aliquot_count=3)
processing_profile = ProcessingProfile(
name='process', aliquot_type=a)
processing_profile.add_processes(process)
RequisitionPanel(
name='Viral Load',
aliquot_type=a,
processing_profile=processing_profile)
def test_panel_adding_processing_profile_raises(self):
"""Asserts CANNOT add processing profile for aliquot type B
to panel for aliquot type C.
"""
a = AliquotType(name='aliquot_a', numeric_code='55', alpha_code='AA')
b = AliquotType(name='aliquot_b', numeric_code='66', alpha_code='BB')
c = AliquotType(name='aliquot_c', numeric_code='77', alpha_code='CC')
a.add_derivatives(b)
process = Process(aliquot_type=b, aliquot_count=3)
processing_profile = ProcessingProfile(
name='process', aliquot_type=a)
processing_profile.add_processes(process)
self.assertRaises(
InvalidProcessingProfile,
RequisitionPanel,
name='Viral Load',
aliquot_type=c,
processing_profile=processing_profile)
def test_add_processesing(self):
a = AliquotType(name='aliquot_a', numeric_code='55', alpha_code='AA')
b = AliquotType(name='aliquot_b', numeric_code='66', alpha_code='BB')
a.add_derivatives(b)
process = Process(aliquot_type=b, aliquot_count=3)
processing_profile = ProcessingProfile(
name='process', aliquot_type=a)
processing_profile.add_processes(process)
panel = RequisitionPanel(
name='Viral Load',
aliquot_type=a,
processing_profile=processing_profile)
lab_profile = LabProfile(
name='profile', requisition_model='edc_lab.subjectrequisition')
lab_profile.add_panel(panel=panel)
def test_add_panel(self):
a = AliquotType(name='aliquot_a', numeric_code='55', alpha_code='AA')
b = AliquotType(name='aliquot_b', numeric_code='66', alpha_code='BB')
a.add_derivatives(b)
process = Process(aliquot_type=b, aliquot_count=3)
processing_profile = ProcessingProfile(
name='process', aliquot_type=a)
processing_profile.add_processes(process)
panel = RequisitionPanel(
name='Viral Load',
aliquot_type=a,
processing_profile=processing_profile)
lab_profile = LabProfile(
name='profile', requisition_model='edc_lab.subjectrequisition')
lab_profile.add_panel(panel=panel)
self.assertRaises(
PanelAlreadyRegistered,
lab_profile.add_panel, panel=panel)
| botswana-harvard/edc-lab | edc_lab/tests/test_lab_profile.py | Python | gpl-2.0 | 6,159 |
#!/usr/bin/env python
import os
import sys
import xapi
import xapi.storage.api.plugin
from xapi.storage import log
class Implementation(xapi.storage.api.plugin.Plugin_skeleton):
def query(self, dbg):
return {
"plugin": "tapdisk",
"name": "The tapdisk user-space datapath plugin",
"description": ("This plugin manages and configures tapdisk"
" instances backend by either raw or vhd"
" format files"),
"vendor": "Citrix",
"copyright": "(C) 2015 Citrix Inc",
"version": "3.0",
"required_api_version": "3.0",
"features": [
],
"configuration": {},
"required_cluster_stack": []}
if __name__ == "__main__":
log.log_call_argv()
cmd = xapi.storage.api.plugin.Plugin_commandline(Implementation())
base = os.path.basename(sys.argv[0])
if base == "Plugin.Query":
cmd.query()
else:
raise xapi.storage.api.plugin.Unimplemented(base)
| jjd27/xapi-storage-datapath-plugins | src/tapdisk/plugin.py | Python | lgpl-2.1 | 1,061 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of DaBroker, a distributed data access manager.
##
## DaBroker is Copyright © 2014 by Matthias Urlichs <matthias@urlichs.de>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
__VERSION__ = (0,1,9)
# Not using gevent is not yet supported
# mainly because you can't kill/cancel OS threads from within Python
USE_GEVENT=True
## change the default encoding to UTF-8
## this is a no-op in PY3
# PY2 defaults to ASCII, which requires adding spurious .encode("utf-8") to
# absolutely everything you might want to print / write to a file
import sys
try:
reload(sys)
except NameError:
# py3 doesn't have reload()
pass
else:
# py3 also doesn't have sys.setdefaultencoding
sys.setdefaultencoding("utf-8")
def patch():
"""\
Patch the system for the correct threading implementation (gevent or not).
This function MUST be called as early as possible.
It MUST NOT be called from within an import.
"""
if USE_GEVENT:
## You get spurious errors if the core threading module is imported
## before monkeypatching.
if 'threading' in sys.modules:
raise Exception('The ‘threading’ module was loaded before patching for gevent')
import gevent.monkey
gevent.monkey.patch_all()
else:
pass
# Warnings are bad, except for some which are not
from warnings import filterwarnings
#filterwarnings("error")
#filterwarnings("ignore",category=DeprecationWarning)
#filterwarnings("ignore",category=PendingDeprecationWarning)
#filterwarnings("ignore",category=ImportWarning)
filterwarnings("ignore",message="^Converting column '.*' from VARCHAR to TEXT") # mysql special
def unit(app, cfg="/etc/dabroker.cfg", **args):
"""Return the DaBroker unit for this app."""
from dabroker.unit import Unit
return Unit(app,cfg, **args)
| smurfix/DaBroker | dabroker/__init__.py | Python | gpl-3.0 | 2,194 |
"""Test fixtures for integration tests only"""
# pylint: disable=redefined-outer-name
from datetime import datetime
import os
from pathlib import Path
import pytest
import requests
from redcap import Project
SUPER_TOKEN = os.getenv("REDCAPDEMO_SUPERUSER_TOKEN")
def create_project(url: str, super_token: str, project_xml_path: Path) -> str:
"""Create a project for testing on redcapdemo.vanderbilt.edu
This API method returns the token for the newly created project, which
used for the integration tests
"""
current_time = datetime.now().strftime("%m-%d %H:%M:%S")
project_title = f"PyCap { project_xml_path.stem }: { current_time }"
project_info = f"""<?xml version="1.0" encoding="UTF-8" ?>
<item>
<project_title>{project_title}</project_title>
<purpose>0</purpose>
</item>"""
with open(project_xml_path, encoding="UTF-8") as proj_xml_file:
project_data = proj_xml_file.read()
res = requests.post(
url=url,
data={
"token": super_token,
"content": "project",
"format": "xml",
"data": project_info,
"odm": project_data,
},
)
return res.text
@pytest.fixture(scope="module")
def redcapdemo_url() -> str:
"""API url for redcapdemo testing site"""
return "https://redcapdemo.vanderbilt.edu/api/"
@pytest.fixture(scope="module")
def simple_project_token(redcapdemo_url) -> str:
"""Create a simple project and return it's API token"""
simple_project_xml_path = Path("tests/data/test_simple_project.xml")
project_token = create_project(redcapdemo_url, SUPER_TOKEN, simple_project_xml_path)
return project_token
@pytest.fixture(scope="module")
def simple_project(redcapdemo_url, simple_project_token):
"""A simple REDCap project"""
return Project(redcapdemo_url, simple_project_token)
@pytest.fixture(scope="module")
def long_project_token(redcapdemo_url) -> str:
"""Create a long project and return it's API token"""
long_project_xml_path = Path("tests/data/test_long_project.xml")
project_token = create_project(redcapdemo_url, SUPER_TOKEN, long_project_xml_path)
return project_token
@pytest.fixture(scope="module")
def long_project(redcapdemo_url, long_project_token):
"""A long REDCap project"""
return Project(redcapdemo_url, long_project_token)
| redcap-tools/PyCap | tests/integration/conftest.py | Python | mit | 2,373 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_network_policy_spec import V1NetworkPolicySpec
class TestV1NetworkPolicySpec(unittest.TestCase):
""" V1NetworkPolicySpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1NetworkPolicySpec(self):
"""
Test V1NetworkPolicySpec
"""
model = kubernetes.client.models.v1_network_policy_spec.V1NetworkPolicySpec()
if __name__ == '__main__':
unittest.main()
| djkonro/client-python | kubernetes/test/test_v1_network_policy_spec.py | Python | apache-2.0 | 893 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#******************************************************************************
# $Id$
#
# Project: GDAL Python Interface
# Purpose: Application for converting raster data to a vector polygon layer.
# Author: Frank Warmerdam, warmerdam@pobox.com
#
#******************************************************************************
# Copyright (c) 2008, Frank Warmerdam
# Copyright (c) 2009-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#******************************************************************************
import sys
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
def Usage():
print("""
gdal_polygonize [-8] [-nomask] [-mask filename] raster_file [-b band|mask]
[-q] [-f ogr_format] out_file [layer] [fieldname]
""")
sys.exit(1)
# =============================================================================
# Mainline
# =============================================================================
format = 'GML'
options = []
quiet_flag = 0
src_filename = None
src_band_n = 1
dst_filename = None
dst_layername = None
dst_fieldname = None
dst_field = -1
mask = 'default'
gdal.AllRegister()
argv = gdal.GeneralCmdLineProcessor( sys.argv )
if argv is None:
sys.exit( 0 )
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-f':
i = i + 1
format = argv[i]
elif arg == '-q' or arg == '-quiet':
quiet_flag = 1
elif arg == '-8':
options.append('8CONNECTED=8')
elif arg == '-nomask':
mask = 'none'
elif arg == '-mask':
i = i + 1
mask = argv[i]
elif arg == '-b':
i = i + 1
if argv[i].startswith('mask'):
src_band_n = argv[i]
else:
src_band_n = int(argv[i])
elif src_filename is None:
src_filename = argv[i]
elif dst_filename is None:
dst_filename = argv[i]
elif dst_layername is None:
dst_layername = argv[i]
elif dst_fieldname is None:
dst_fieldname = argv[i]
else:
Usage()
i = i + 1
if src_filename is None or dst_filename is None:
Usage()
if dst_layername is None:
dst_layername = 'out'
# =============================================================================
# Verify we have next gen bindings with the polygonize method.
# =============================================================================
try:
gdal.Polygonize
except:
print('')
print('gdal.Polygonize() not available. You are likely using "old gen"')
print('bindings or an older version of the next gen bindings.')
print('')
sys.exit(1)
# =============================================================================
# Open source file
# =============================================================================
src_ds = gdal.Open( src_filename )
if src_ds is None:
print('Unable to open %s' % src_filename)
sys.exit(1)
if src_band_n == 'mask':
srcband = src_ds.GetRasterBand(1).GetMaskBand()
# Workaround the fact that most source bands have no dataset attached
options.append('DATASET_FOR_GEOREF=' + src_filename)
elif isinstance(src_band_n, str) and src_band_n.startswith('mask,'):
srcband = src_ds.GetRasterBand(int(src_band_n[len('mask,'):])).GetMaskBand()
# Workaround the fact that most source bands have no dataset attached
options.append('DATASET_FOR_GEOREF=' + src_filename)
else:
srcband = src_ds.GetRasterBand(src_band_n)
if mask is 'default':
maskband = srcband.GetMaskBand()
elif mask is 'none':
maskband = None
else:
mask_ds = gdal.Open( mask )
maskband = mask_ds.GetRasterBand(1)
# =============================================================================
# Try opening the destination file as an existing file.
# =============================================================================
try:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
dst_ds = ogr.Open( dst_filename, update=1 )
gdal.PopErrorHandler()
except:
dst_ds = None
# =============================================================================
# Create output file.
# =============================================================================
if dst_ds is None:
drv = ogr.GetDriverByName(format)
if not quiet_flag:
print('Creating output %s of format %s.' % (dst_filename, format))
dst_ds = drv.CreateDataSource( dst_filename )
# =============================================================================
# Find or create destination layer.
# =============================================================================
try:
dst_layer = dst_ds.GetLayerByName(dst_layername)
except:
dst_layer = None
if dst_layer is None:
srs = None
if src_ds.GetProjectionRef() != '':
srs = osr.SpatialReference()
srs.ImportFromWkt( src_ds.GetProjectionRef() )
dst_layer = dst_ds.CreateLayer(dst_layername, geom_type=ogr.wkbPolygon, srs = srs )
if dst_fieldname is None:
dst_fieldname = 'DN'
fd = ogr.FieldDefn( dst_fieldname, ogr.OFTInteger )
dst_layer.CreateField( fd )
dst_field = 0
else:
if dst_fieldname is not None:
dst_field = dst_layer.GetLayerDefn().GetFieldIndex(dst_fieldname)
if dst_field < 0:
print("Warning: cannot find field '%s' in layer '%s'" % (dst_fieldname, dst_layername))
# =============================================================================
# Invoke algorithm.
# =============================================================================
if quiet_flag:
prog_func = None
else:
prog_func = gdal.TermProgress
result = gdal.Polygonize( srcband, maskband, dst_layer, dst_field, options,
callback = prog_func )
srcband = None
src_ds = None
dst_ds = None
mask_ds = None
| worldbank/cv4ag | utils/gdal_polygonize.py | Python | mit | 6,981 |
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the EPRCOIN-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "EPRCOIN-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| EPRCOIN/EPRCOIN | share/qt/clean_mac_info_plist.py | Python | mit | 895 |
#!/usr/bin/env python
import pyscf
r = 1.1941
mol = pyscf.gto.M(
atom=[['C', (0.0, 0.0, 0.0)],
['N', (0.0, 0.0, r)]],
basis='sto-3g',
spin=1,
verbose=1,
symmetry=False,
)
if __name__ == '__main__':
from pyci.tests.test_runner import test_runner
test_runner(mol)
| shivupa/pyci | examples/cn_sto3g.py | Python | gpl-3.0 | 305 |
import binascii
import itertools
import os
import time
import numpy
import six
import chainer
from chainer import configuration
from chainer import cuda
from chainer import function
from chainer.functions.activation import relu
from chainer.functions.activation import tanh
from chainer.functions.array import concat
from chainer.functions.array import reshape
from chainer.functions.array import split_axis
from chainer.functions.array import stack
from chainer.functions.connection import linear
from chainer.functions.noise import dropout
from chainer.utils import argument
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cudnn.cudnn
_cudnn_version = libcudnn.getVersion()
def _stack_weight(ws):
# TODO(unno): Input of the current LSTM implementaiton is shuffled
w = stack.stack(ws, axis=1)
shape = w.shape
return reshape.reshape(w, (shape[0] * shape[1],) + shape[2:])
class PointerArray(object):
def __init__(self, lst, back_pointer):
self._value = numpy.array(lst, dtype=numpy.intp)
# Store back_pointer to prevent the GC removes the original variable
self._back_pointer = back_pointer
@property
def data(self):
return self._value.ctypes.data
def _make_tensor_descriptor_array(xs):
"""Make an array of pointers denoting pointers of tensor descriptors.
"""
descs = []
for x in xs:
if x.ndim < 3:
shape = x.shape + (1,) * (3 - x.ndim)
x = x.reshape(shape)
desc = cudnn.create_tensor_nd_descriptor(x)
descs.append(desc)
return PointerArray([d.value for d in descs], descs)
def _make_ptr_array(xs):
"""Make an array of pointers denoting pointers of ndarrays.
"""
return PointerArray([x.data.ptr for x in xs], xs)
class DropoutStates(object):
def __init__(self, states, desc):
self.states = states
self.desc = desc
def set_dropout_ratio(self, handle, dropout):
cudnn.set_dropout_descriptor(self.desc, handle, dropout)
@staticmethod
def create(handle, dropout, seed):
states = cudnn.create_dropout_states(handle)
desc = cudnn.create_dropout_descriptor(
handle, dropout, states.data.ptr, states.size, seed)
return DropoutStates(states, desc)
class DropoutRandomStates(object):
def __init__(self, seed):
self._states = None
if seed is None:
try:
seed_str = binascii.hexlify(os.urandom(8))
seed = numpy.uint64(int(seed_str, 16))
except NotImplementedError:
seed = numpy.uint64(time.clock() * 1000000)
else:
seed = numpy.uint64(seed)
self._seed = seed
def create_dropout_states(self, dropout):
handle = cudnn.get_handle()
if self._states is None:
self._states = DropoutStates.create(handle, dropout, self._seed)
else:
self._states.set_dropout_ratio(handle, dropout)
return self._states
def _split(inputs, pos):
return inputs[:pos], inputs[pos:]
_random_states = {}
def get_random_state():
global _random_states
dev = cuda.Device()
rs = _random_states.get(dev.id, None)
if rs is None:
rs = DropoutRandomStates(os.getenv('CHAINER_SEED'))
_random_states[dev.id] = rs
return rs
if cuda.cudnn_enabled and _cudnn_version >= 5000:
# Define RNN parameters using dict.
_rnn_dirs = {
'uni': libcudnn.CUDNN_UNIDIRECTIONAL,
'bi': libcudnn.CUDNN_BIDIRECTIONAL,
}
_rnn_modes = {
'rnn_relu': libcudnn.CUDNN_RNN_RELU,
'rnn_tanh': libcudnn.CUDNN_RNN_TANH,
'gru': libcudnn.CUDNN_GRU,
'lstm': libcudnn.CUDNN_LSTM,
}
_rnn_n_params = {
libcudnn.CUDNN_RNN_RELU: 2,
libcudnn.CUDNN_RNN_TANH: 2,
libcudnn.CUDNN_GRU: 6,
libcudnn.CUDNN_LSTM: 8,
}
_rnn_params_direction = {
libcudnn.CUDNN_UNIDIRECTIONAL: 1,
libcudnn.CUDNN_BIDIRECTIONAL: 2,
}
_rnn_params_use_cell = {
libcudnn.CUDNN_RNN_RELU: False,
libcudnn.CUDNN_RNN_TANH: False,
libcudnn.CUDNN_GRU: False,
libcudnn.CUDNN_LSTM: True,
}
class BaseNStepRNN(function.Function):
def __init__(self, n_layers, states, rnn_dir, rnn_mode, **kwargs):
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
if rnn_dir not in _rnn_dirs:
candidate_list = ','.join(_rnn_dirs.keys())
raise ValueError('Invalid rnn_dir: "%s". Please select from [%s]'
% (rnn_dir, candidate_list))
if rnn_mode not in _rnn_modes:
candidate_list = ','.join(_rnn_modes.keys())
raise ValueError('Invalid rnn_mode: "%s". Please select from [%s]'
% (rnn_mode, candidate_list))
self.rnn_dir = _rnn_dirs[rnn_dir]
self.rnn_mode = _rnn_modes[rnn_mode]
self.rnn_direction = _rnn_params_direction[self.rnn_dir]
self.n_layers = n_layers
self.states = states
self.use_cell = _rnn_params_use_cell[self.rnn_mode]
self.n_W = _rnn_n_params[self.rnn_mode]
@property
def _n_cell(self):
if self.use_cell:
return 2
else:
return 1
@property
def _n_params(self):
return self.n_layers * self.rnn_direction * self.n_W
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > self._n_cell + self._n_params * 2)
if self.use_cell:
(h_type, c_type), in_types = _split(in_types, 2)
h_size = self.n_layers * self.rnn_direction
type_check.expect(
h_type.dtype == numpy.float32,
c_type.dtype == numpy.float32,
h_type.ndim == 3,
h_type.shape[0] == h_size,
c_type.ndim == 3,
c_type.shape[0] == h_size,
# mini-batch size
h_type.shape[1] == c_type.shape[1],
# hidden size
h_type.shape[2] == c_type.shape[2],
)
else:
(h_type, ), in_types = _split(in_types, 1)
h_size = self.n_layers * self.rnn_direction
type_check.expect(
h_type.dtype == numpy.float32,
h_type.ndim == 3,
h_type.shape[0] == h_size,
)
w_types, in_types = _split(in_types, self._n_params)
b_types, in_types = _split(in_types, self._n_params)
x_types = in_types
for x_type in x_types:
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 2,
)
for x1_type, x2_type in six.moves.zip(x_types, x_types[1:]):
type_check.expect(
# Check if xs are sorted by descending lengths
x1_type.shape[0] >= x2_type.shape[0],
x1_type.shape[1] == x2_type.shape[1])
in_size = x_types[0].shape[1]
out_size = h_type.shape[2]
for layer in six.moves.range(self.n_layers):
for i in six.moves.range(self.n_W):
for di in six.moves.range(self.rnn_direction):
ind = (layer * self.rnn_direction + di) * self.n_W + i
w_type = w_types[ind]
b_type = b_types[ind]
if self.rnn_direction == 1:
# Uni-direction
if layer == 0 and i < (self.n_W // 2):
w_in = in_size
else:
w_in = out_size
else:
# Bi-direction
if layer == 0 and i < (self.n_W // 2):
w_in = in_size
elif layer > 0 and i < (self.n_W // 2):
w_in = out_size * self.rnn_direction
else:
w_in = out_size
type_check.expect(
w_type.dtype == numpy.float32,
w_type.ndim == 2,
w_type.shape[0] == out_size,
w_type.shape[1] == w_in,
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == out_size,
)
def forward(self, inputs):
if self.use_cell:
# LSTM
(hx, cx), inputs = _split(inputs, self._n_cell)
cx = cuda.cupy.ascontiguousarray(cx)
cx_desc = cudnn.create_tensor_nd_descriptor(cx)
cy = cuda.cupy.empty_like(cx)
cy_desc = cudnn.create_tensor_nd_descriptor(cy)
cx_data_ptr = cx.data.ptr
cy_data_ptr = cy.data.ptr
cx_desc_value = cx_desc.value
cy_desc_value = cy_desc.value
else:
# RNN, GRU
(hx, ), inputs = _split(inputs, self._n_cell)
cx = cy = None
cx_data_ptr = cy_data_ptr = 0
cx_desc_value = cy_desc_value = 0
ws, inputs = _split(inputs, self._n_params)
bs, inputs = _split(inputs, self._n_params)
x_list = inputs
hx = cuda.cupy.ascontiguousarray(hx)
x_desc = cudnn.create_tensor_nd_descriptor(x_list[0][..., None])
length = len(x_list)
n_units = hx.shape[2]
xs = cuda.cupy.concatenate(x_list, axis=0)
ys = cuda.cupy.empty((len(xs),
n_units * self.rnn_direction), dtype=xs.dtype)
handle = cudnn.get_handle()
self.handle = handle
rnn_desc = cudnn.create_rnn_descriptor(
n_units, self.n_layers, self.states.desc,
libcudnn.CUDNN_LINEAR_INPUT, self.rnn_dir,
self.rnn_mode, libcudnn.CUDNN_DATA_FLOAT)
self.rnn_desc = rnn_desc
c_x_descs = _make_tensor_descriptor_array(x_list)
hx_desc = cudnn.create_tensor_nd_descriptor(hx)
weights_size = libcudnn.getRNNParamsSize(
handle, rnn_desc.value, x_desc.value, libcudnn.CUDNN_DATA_FLOAT)
w = cuda.cupy.empty((weights_size // 4, 1, 1), dtype=numpy.float32)
w_desc = cudnn.create_filter_descriptor(w)
for layer in six.moves.range(self.n_layers):
for di in six.moves.range(self.rnn_direction):
# di = 0: forward, 1: backward
for lin_layer_id in six.moves.range(self.n_W):
mat_index = layer * self.rnn_direction + di
mat = cudnn.get_rnn_lin_layer_matrix_params(
handle, rnn_desc, mat_index,
x_desc, w_desc, w, lin_layer_id)
W_index = mat_index * self.n_W + lin_layer_id
m = mat.reshape(mat.size)
m[...] = ws[W_index].ravel()
bias = cudnn.get_rnn_lin_layer_bias_params(
handle, rnn_desc, mat_index,
x_desc, w_desc, w, lin_layer_id)
b = bias.reshape(bias.size)
b[...] = bs[W_index]
self.w = w
self.w_desc = w_desc
sections = numpy.cumsum([len(x) for x in x_list[:-1]])
y_list = cuda.cupy.split(ys, sections)
c_y_descs = _make_tensor_descriptor_array(y_list)
hy = cuda.cupy.empty_like(hx)
hy_desc = cudnn.create_tensor_nd_descriptor(hy)
work_size = libcudnn.getRNNWorkspaceSize(
handle, rnn_desc.value, length, c_x_descs.data)
workspace = cuda.cupy.empty((work_size,), dtype='b')
self.workspace = workspace
if not configuration.config.train:
libcudnn.RNNForwardInference(
handle, rnn_desc.value, length,
c_x_descs.data, xs.data.ptr, hx_desc.value, hx.data.ptr,
cx_desc_value, cx_data_ptr, w_desc.value, w.data.ptr,
c_y_descs.data, ys.data.ptr, hy_desc.value, hy.data.ptr,
cy_desc_value, cy_data_ptr, workspace.data.ptr, work_size)
else:
reserve_size = libcudnn.getRNNTrainingReserveSize(
handle, rnn_desc.value, length, c_x_descs.data)
self.reserve_space = cuda.cupy.empty((reserve_size,), dtype='b')
libcudnn.RNNForwardTraining(
handle, rnn_desc.value, length,
c_x_descs.data, xs.data.ptr, hx_desc.value, hx.data.ptr,
cx_desc_value, cx_data_ptr, w_desc.value, w.data.ptr,
c_y_descs.data, ys.data.ptr, hy_desc.value, hy.data.ptr,
cy_desc_value, cy_data_ptr,
workspace.data.ptr, work_size,
self.reserve_space.data.ptr, reserve_size)
self.c_y_descs = c_y_descs
self.ys = ys
self.c_x_descs = c_x_descs
if self.use_cell:
# LSTM
return tuple([hy, cy] + y_list)
else:
# GRU, RNN
return tuple([hy, ] + y_list)
def backward(self, inputs, grads):
if self.use_cell:
# LSTM
(hx, cx), inputs = _split(inputs, self._n_cell)
dhy, dcy = grads[:self._n_cell]
if dcy is None:
dcy = cuda.cupy.zeros_like(cx)
cx = cuda.cupy.ascontiguousarray(cx)
dcx = cuda.cupy.empty_like(cx)
cx_desc = cudnn.create_tensor_nd_descriptor(cx)
dcx_desc = cudnn.create_tensor_nd_descriptor(dcx)
dcy_desc = cudnn.create_tensor_nd_descriptor(dcy)
cx_data_ptr = cx.data.ptr
dcy_data_ptr = dcy.data.ptr
dcx_data_ptr = dcx.data.ptr
cx_desc_value = cx_desc.value
dcx_desc_value = dcx_desc.value
dcy_desc_value = dcy_desc.value
else:
# GRU, RNN
(hx, ), inputs = _split(inputs, self._n_cell)
dhy, = grads[:self._n_cell]
dcy = cx = dcx = None
cx_data_ptr = dcy_data_ptr = dcx_data_ptr = 0
cx_desc_value = dcx_desc_value = dcy_desc_value = 0
ws_size = self.n_layers * self.rnn_direction * self.n_W
ws, inputs = _split(inputs, ws_size)
bs, inputs = _split(inputs, ws_size)
x_list = inputs
hx = cuda.cupy.ascontiguousarray(hx)
if dhy is None:
dhy = cuda.cupy.zeros_like(hx)
dy_list = list(grads[self._n_cell:])
for i in six.moves.range(len(dy_list)):
if dy_list[i] is None:
dy_list[i] = cuda.cupy.zeros_like(x_list[i])
xs = cuda.cupy.concatenate(x_list, axis=0)
length = len(x_list)
dhx = cuda.cupy.empty_like(hx)
hx_desc = cudnn.create_tensor_nd_descriptor(hx)
dhy_desc = cudnn.create_tensor_nd_descriptor(dhy)
c_dy_descs = _make_tensor_descriptor_array(dy_list)
dys = cuda.cupy.concatenate(dy_list, axis=0)
rnn_desc = self.rnn_desc
handle = self.handle
work_size = libcudnn.getRNNWorkspaceSize(
handle, rnn_desc.value, length, self.c_x_descs.data)
workspace = cuda.cupy.empty((work_size,), dtype='b')
dhx_desc = cudnn.create_tensor_nd_descriptor(dhx)
dxs = cuda.cupy.empty_like(xs)
sections = numpy.cumsum([len(x) for x in x_list[:-1]])
dx_list = cuda.cupy.split(dxs, sections, 0)
c_dx_descs = _make_tensor_descriptor_array(dx_list)
libcudnn.RNNBackwardData(
handle, rnn_desc.value, length,
self.c_y_descs.data, self.ys.data.ptr,
c_dy_descs.data, dys.data.ptr, dhy_desc.value, dhy.data.ptr,
dcy_desc_value, dcy_data_ptr, self.w_desc.value, self.w.data.ptr,
hx_desc.value, hx.data.ptr, cx_desc_value, cx_data_ptr,
c_dx_descs.data, dxs.data.ptr, dhx_desc.value, dhx.data.ptr,
dcx_desc_value, dcx_data_ptr, workspace.data.ptr, work_size,
self.reserve_space.data.ptr, self.reserve_space.size)
dw = cuda.cupy.zeros_like(self.w)
dw_desc = cudnn.create_filter_descriptor(dw)
libcudnn.RNNBackwardWeights(
handle, rnn_desc.value, length,
self.c_x_descs.data, xs.data.ptr,
hx_desc.value, hx.data.ptr, self.c_y_descs.data, self.ys.data.ptr,
workspace.data.ptr, work_size, dw_desc.value, dw.data.ptr,
self.reserve_space.data.ptr, self.reserve_space.size)
dx = dx_list[0]
dx = dx.reshape(dx.shape + (1,))
dx_desc = cudnn.create_tensor_nd_descriptor(dx)
dws = []
dbs = []
for layer in six.moves.range(self.n_layers):
for di in six.moves.range(self.rnn_direction):
for lin_layer_id in six.moves.range(self.n_W):
mat_index = layer * self.rnn_direction + di
mat = cudnn.get_rnn_lin_layer_matrix_params(
handle, rnn_desc, mat_index,
dx_desc, dw_desc, dw, lin_layer_id)
W_index = mat_index * self.n_W + lin_layer_id
dws.append(mat.reshape(ws[W_index].shape))
bias = cudnn.get_rnn_lin_layer_bias_params(
handle, rnn_desc, mat_index,
dx_desc, dw_desc, dw, lin_layer_id)
dbs.append(bias.reshape(bs[W_index].shape))
if self.use_cell:
# LSTM
return tuple([dhx, dcx] + dws + dbs + dx_list)
else:
# GRU, RNN
return tuple([dhx, ] + dws + dbs + dx_list)
class NStepRNNTanh(BaseNStepRNN):
def __init__(self, n_layers, states, **kwargs):
BaseNStepRNN.__init__(self, n_layers, states, rnn_dir='uni',
rnn_mode='rnn_tanh', **kwargs)
class NStepRNNReLU(BaseNStepRNN):
def __init__(self, n_layers, states, **kwargs):
BaseNStepRNN.__init__(self, n_layers, states, rnn_dir='uni',
rnn_mode='rnn_relu', **kwargs)
class NStepBiRNNTanh(BaseNStepRNN):
def __init__(self, n_layers, states, **kwargs):
BaseNStepRNN.__init__(self, n_layers, states, rnn_dir='bi',
rnn_mode='rnn_tanh', **kwargs)
class NStepBiRNNReLU(BaseNStepRNN):
def __init__(self, n_layers, states, **kwargs):
BaseNStepRNN.__init__(self, n_layers, states, rnn_dir='bi',
rnn_mode='rnn_relu', **kwargs)
def n_step_rnn(
n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh', **kwargs):
"""n_step_rnn(n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh')
Stacked Uni-directional RNN function for sequence inputs.
This function calculates stacked Uni-directional RNN with sequences.
This function gets an initial hidden state :math:`h_0`,
an initial cell state :math:`c_0`, an input sequence :math:`x`,
weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
h_t = f(W_0 x_t + W_1 h_{t-1} + b_0 + b_1)
where :math:`f` is an activation function.
Weight matrices :math:`W` contains two matrices :math:`W_0` and
:math:`W_1`. :math:`W_0` is a parameter for an input sequence.
:math:`W_1` is a parameter for a hidden state.
Bias matrices :math:`b` contains two matrices :math:`b_0` and :math:`b_1`.
:math:`b_0` is a parameter for an input sequence.
:math:`b_1` is a parameter for a hidden state.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Two weight matrices and two bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`2S` weigth matrices and :math:`2S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimention of hidden units.
ws (list of list of chainer.Variable): Weight matrices. ``ws[i]``
represents weights for i-th layer.
Each ``ws[i]`` is a list containing two matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 1`` is ``(I, N)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of chainer.Variable): Bias vectors. ``bs[i]``
represnents biases for i-th layer.
Each ``bs[i]`` is a list containing two vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimention of
hidden units.
xs (list of chainer.Variable): A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this functions supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
Returns:
tuple: This functions returns a tuple concaining three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
"""
return n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,
activation, use_bi_direction=False, **kwargs)
def n_step_birnn(
n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh', **kwargs):
"""n_step_birnn(n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh')
Stacked Bi-directional RNN function for sequence inputs.
This function calculates stacked Bi-directional RNN with sequences.
This function gets an initial hidden state :math:`h_0`, an initial
cell state :math:`c_0`, an input sequence :math:`x`,
weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
h^{f}_t &=& f(W^{f}_0 x_t + W^{f}_1 h_{t-1} + b^{f}_0 + b^{f}_1), \\\\
h^{b}_t &=& f(W^{b}_0 x_t + W^{b}_1 h_{t-1} + b^{b}_0 + b^{b}_1), \\\\
h_t &=& [h^{f}_t; h^{f}_t], \\\\
where :math:`f` is an activation function.
Weight matrices :math:`W` contains two matrices :math:`W^{f}` and
:math:`W^{b}`. :math:`W^{f}` is weight matrices for forward directional
RNN. :math:`W^{b}` is weight matrices for backward directional RNN.
:math:`W^{f}` contains :math:`W^{f}_0` for an input sequence and
:math:`W^{f}_1` for a hidden state.
:math:`W^{b}` contains :math:`W^{b}_0` for an input sequence and
:math:`W^{b}_1` for a hidden state.
Bias matrices :math:`b` contains two matrices :math:`b^{f}` and
:math:`b^{f}`. :math:`b^{f}` contains :math:`b^{f}_0` for an input sequence
and :math:`b^{f}_1` for a hidden state.
:math:`b^{b}` contains :math:`b^{b}_0` for an input sequence and
:math:`b^{b}_1` for a hidden state.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Two weight matrices and two bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`2S` weigth matrices and :math:`2S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimention of hidden units.
ws (list of list of chainer.Variable): Weight matrices. ``ws[i + di]``
represents weights for i-th layer.
Note that ``di = 0`` for forward-RNN and ``di = 1`` for
backward-RNN.
Each ``ws[i + di]`` is a list containing two matrices.
``ws[i + di][j]`` is corresponding with ``W^{f}_j`` if ``di = 0``
and corresponding with ``W^{b}_j`` if ``di = 1`` in the equation.
Only ``ws[0][j]`` and ``ws[1][j]`` where ``0 <= j < 1`` are
``(I, N)`` shape as they are multiplied with input variables.
All other matrices has ``(N, N)`` shape.
bs (list of list of chainer.Variable): Bias vectors. ``bs[i + di]``
represnents biases for i-th layer.
Note that ``di = 0`` for forward-RNN and ``di = 1`` for
backward-RNN.
Each ``bs[i + di]`` is a list containing two vectors.
``bs[i + di][j]`` is corresponding with ``b^{f}_j`` if ``di = 0``
and corresponding with ``b^{b}_j`` if ``di = 1`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimention of
hidden units.
xs (list of chainer.Variable): A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this functions supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
Returns:
tuple: This functions returns a tuple concaining three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t``
is mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
"""
return n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,
activation, use_bi_direction=True)
def n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,
activation, use_bi_direction, **kwargs):
"""n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs, activation, use_bi_direction)
Base function for Stack RNN/BiRNN functions.
This function is used at :func:`chainer.functions.n_step_birnn` and
:func:`chainer.functions.n_step_rnn`.
This function's behavior depends on following arguments,
``activation`` and ``use_bi_direction``.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimention of hidden units.
ws (list of list of chainer.Variable): Weight matrices. ``ws[i]``
represents weights for i-th layer.
Each ``ws[i]`` is a list containing two matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 1`` is ``(I, N)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of chainer.Variable): Bias vectors. ``bs[i]``
represnents biases for i-th layer.
Each ``bs[i]`` is a list containing two vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimention of
hidden units.
xs (list of chainer.Variable): A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this functions supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
use_bi_direction (bool): If ``True``, this function uses
Bi-directional RNN.
Returns:
tuple: This functions returns a tuple concaining three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t``
is mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
.. seealso::
:func:`chainer.functions.n_step_rnn`
:func:`chainer.functions.n_step_birnn`
""" # NOQA
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
activation_list = ['tanh', 'relu']
if activation not in activation_list:
candidate = ','.join(activation_list)
raise ValueError('Invalid activation: "%s". Please select from [%s]'
% (activation, candidate))
xp = cuda.get_array_module(hx)
if xp is not numpy and chainer.should_use_cudnn('>=auto', 5000):
states = get_random_state().create_dropout_states(dropout_ratio)
# flatten all input variables
inputs = tuple(itertools.chain(
(hx, ),
itertools.chain.from_iterable(ws),
itertools.chain.from_iterable(bs),
xs))
if use_bi_direction:
# Bi-directional RNN
if activation == 'tanh':
rnn = NStepBiRNNTanh(n_layers, states)
elif activation == 'relu':
rnn = NStepBiRNNReLU(n_layers, states)
else:
# Uni-directional RNN
if activation == 'tanh':
rnn = NStepRNNTanh(n_layers, states)
elif activation == 'relu':
rnn = NStepRNNReLU(n_layers, states)
ret = rnn(*inputs)
hy, = ret[:1]
ys = ret[1:]
return hy, ys
else:
direction = 2 if use_bi_direction else 1
hx = split_axis.split_axis(hx, n_layers * direction, axis=0,
force_tuple=True)
hx = [reshape.reshape(h, h.shape[1:]) for h in hx]
xws = [_stack_weight([w[0]]) for w in ws]
hws = [_stack_weight([w[1]]) for w in ws]
xbs = [_stack_weight([b[0]]) for b in bs]
hbs = [_stack_weight([b[1]]) for b in bs]
xs_next = xs
hy = []
for layer in six.moves.range(n_layers):
def _one_directional_loop(di):
# di=0, forward RNN
# di=1, backward RNN
xs_list = xs_next if di == 0 else reversed(xs_next)
layer_idx = direction * layer + di
h = hx[layer_idx]
h_list = []
for x in xs_list:
batch = x.shape[0]
if h.shape[0] > batch:
h, h_rest = split_axis.split_axis(h, [batch], axis=0)
else:
h_rest = None
if layer > 0:
x = dropout.dropout(x, ratio=dropout_ratio)
rnn_in = (linear.linear(x, xws[layer_idx],
xbs[layer_idx]) +
linear.linear(h, hws[layer_idx], hbs[layer_idx]))
if activation == 'tanh':
h_bar = tanh.tanh(rnn_in)
elif activation == 'relu':
h_bar = relu.relu(rnn_in)
if h_rest is not None:
h = concat.concat([h_bar, h_rest], axis=0)
else:
h = h_bar
h_list.append(h_bar)
return h, h_list
# Forward RNN
h, h_forward = _one_directional_loop(di=0)
hy.append(h)
if use_bi_direction:
# Backward RNN
h, h_backward = _one_directional_loop(di=1)
h_backward.reverse()
# Concat
xs_next = [concat.concat([hfi, hbi], axis=1) for (hfi, hbi) in
six.moves.zip(h_forward, h_backward)]
hy.append(h)
else:
# Uni-directional RNN
xs_next = h_forward
ys = xs_next
hy = stack.stack(hy)
return hy, tuple(ys)
| kiyukuta/chainer | chainer/functions/connection/n_step_rnn.py | Python | mit | 36,446 |
# changegroup.py - Mercurial changegroup manipulation functions
#
# Copyright 2006 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import weakref
from i18n import _
from node import nullrev, nullid, hex, short
import mdiff, util, dagutil
import struct, os, bz2, zlib, tempfile
import discovery, error, phases, branchmap
_BUNDLE10_DELTA_HEADER = "20s20s20s20s"
def readexactly(stream, n):
'''read n bytes from stream.read and abort if less was available'''
s = stream.read(n)
if len(s) < n:
raise util.Abort(_("stream ended unexpectedly"
" (got %d bytes, expected %d)")
% (len(s), n))
return s
def getchunk(stream):
"""return the next chunk from stream as a string"""
d = readexactly(stream, 4)
l = struct.unpack(">l", d)[0]
if l <= 4:
if l:
raise util.Abort(_("invalid chunk length %d") % l)
return ""
return readexactly(stream, l - 4)
def chunkheader(length):
"""return a changegroup chunk header (string)"""
return struct.pack(">l", length + 4)
def closechunk():
"""return a changegroup chunk header (string) for a zero-length chunk"""
return struct.pack(">l", 0)
class nocompress(object):
def compress(self, x):
return x
def flush(self):
return ""
bundletypes = {
"": ("", nocompress), # only when using unbundle on ssh and old http servers
# since the unification ssh accepts a header but there
# is no capability signaling it.
"HG10UN": ("HG10UN", nocompress),
"HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
"HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
}
# hgweb uses this list to communicate its preferred type
bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
def writebundle(cg, filename, bundletype, vfs=None):
"""Write a bundle file and return its filename.
Existing files will not be overwritten.
If no filename is specified, a temporary file is created.
bz2 compression can be turned off.
The bundle file will be deleted in case of errors.
"""
fh = None
cleanup = None
try:
if filename:
if vfs:
fh = vfs.open(filename, "wb")
else:
fh = open(filename, "wb")
else:
fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
fh = os.fdopen(fd, "wb")
cleanup = filename
header, compressor = bundletypes[bundletype]
fh.write(header)
z = compressor()
# parse the changegroup data, otherwise we will block
# in case of sshrepo because we don't know the end of the stream
# an empty chunkgroup is the end of the changegroup
# a changegroup has at least 2 chunkgroups (changelog and manifest).
# after that, an empty chunkgroup is the end of the changegroup
for chunk in cg.getchunks():
fh.write(z.compress(chunk))
fh.write(z.flush())
cleanup = None
return filename
finally:
if fh is not None:
fh.close()
if cleanup is not None:
if filename and vfs:
vfs.unlink(cleanup)
else:
os.unlink(cleanup)
def decompressor(fh, alg):
if alg == 'UN':
return fh
elif alg == 'GZ':
def generator(f):
zd = zlib.decompressobj()
for chunk in util.filechunkiter(f):
yield zd.decompress(chunk)
elif alg == 'BZ':
def generator(f):
zd = bz2.BZ2Decompressor()
zd.decompress("BZ")
for chunk in util.filechunkiter(f, 4096):
yield zd.decompress(chunk)
else:
raise util.Abort("unknown bundle compression '%s'" % alg)
return util.chunkbuffer(generator(fh))
class unbundle10(object):
deltaheader = _BUNDLE10_DELTA_HEADER
deltaheadersize = struct.calcsize(deltaheader)
def __init__(self, fh, alg):
self._stream = decompressor(fh, alg)
self._type = alg
self.callback = None
def compressed(self):
return self._type != 'UN'
def read(self, l):
return self._stream.read(l)
def seek(self, pos):
return self._stream.seek(pos)
def tell(self):
return self._stream.tell()
def close(self):
return self._stream.close()
def chunklength(self):
d = readexactly(self._stream, 4)
l = struct.unpack(">l", d)[0]
if l <= 4:
if l:
raise util.Abort(_("invalid chunk length %d") % l)
return 0
if self.callback:
self.callback()
return l - 4
def changelogheader(self):
"""v10 does not have a changelog header chunk"""
return {}
def manifestheader(self):
"""v10 does not have a manifest header chunk"""
return {}
def filelogheader(self):
"""return the header of the filelogs chunk, v10 only has the filename"""
l = self.chunklength()
if not l:
return {}
fname = readexactly(self._stream, l)
return {'filename': fname}
def _deltaheader(self, headertuple, prevnode):
node, p1, p2, cs = headertuple
if prevnode is None:
deltabase = p1
else:
deltabase = prevnode
return node, p1, p2, deltabase, cs
def deltachunk(self, prevnode):
l = self.chunklength()
if not l:
return {}
headerdata = readexactly(self._stream, self.deltaheadersize)
header = struct.unpack(self.deltaheader, headerdata)
delta = readexactly(self._stream, l - self.deltaheadersize)
node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
return {'node': node, 'p1': p1, 'p2': p2, 'cs': cs,
'deltabase': deltabase, 'delta': delta}
def getchunks(self):
"""returns all the chunks contains in the bundle
Used when you need to forward the binary stream to a file or another
network API. To do so, it parse the changegroup data, otherwise it will
block in case of sshrepo because it don't know the end of the stream.
"""
# an empty chunkgroup is the end of the changegroup
# a changegroup has at least 2 chunkgroups (changelog and manifest).
# after that, an empty chunkgroup is the end of the changegroup
empty = False
count = 0
while not empty or count <= 2:
empty = True
count += 1
while True:
chunk = getchunk(self)
if not chunk:
break
empty = False
yield chunkheader(len(chunk))
pos = 0
while pos < len(chunk):
next = pos + 2**20
yield chunk[pos:next]
pos = next
yield closechunk()
class headerlessfixup(object):
def __init__(self, fh, h):
self._h = h
self._fh = fh
def read(self, n):
if self._h:
d, self._h = self._h[:n], self._h[n:]
if len(d) < n:
d += readexactly(self._fh, n - len(d))
return d
return readexactly(self._fh, n)
class bundle10(object):
deltaheader = _BUNDLE10_DELTA_HEADER
def __init__(self, repo, bundlecaps=None):
"""Given a source repo, construct a bundler.
bundlecaps is optional and can be used to specify the set of
capabilities which can be used to build the bundle.
"""
# Set of capabilities we can use to build the bundle.
if bundlecaps is None:
bundlecaps = set()
self._bundlecaps = bundlecaps
self._changelog = repo.changelog
self._manifest = repo.manifest
reorder = repo.ui.config('bundle', 'reorder', 'auto')
if reorder == 'auto':
reorder = None
else:
reorder = util.parsebool(reorder)
self._repo = repo
self._reorder = reorder
self._progress = repo.ui.progress
def close(self):
return closechunk()
def fileheader(self, fname):
return chunkheader(len(fname)) + fname
def group(self, nodelist, revlog, lookup, units=None, reorder=None):
"""Calculate a delta group, yielding a sequence of changegroup chunks
(strings).
Given a list of changeset revs, return a set of deltas and
metadata corresponding to nodes. The first delta is
first parent(nodelist[0]) -> nodelist[0], the receiver is
guaranteed to have this parent as it has all history before
these changesets. In the case firstparent is nullrev the
changegroup starts with a full revision.
If units is not None, progress detail will be generated, units specifies
the type of revlog that is touched (changelog, manifest, etc.).
"""
# if we don't have any revisions touched by these changesets, bail
if len(nodelist) == 0:
yield self.close()
return
# for generaldelta revlogs, we linearize the revs; this will both be
# much quicker and generate a much smaller bundle
if (revlog._generaldelta and reorder is not False) or reorder:
dag = dagutil.revlogdag(revlog)
revs = set(revlog.rev(n) for n in nodelist)
revs = dag.linearize(revs)
else:
revs = sorted([revlog.rev(n) for n in nodelist])
# add the parent of the first rev
p = revlog.parentrevs(revs[0])[0]
revs.insert(0, p)
# build deltas
total = len(revs) - 1
msgbundling = _('bundling')
for r in xrange(len(revs) - 1):
if units is not None:
self._progress(msgbundling, r + 1, unit=units, total=total)
prev, curr = revs[r], revs[r + 1]
linknode = lookup(revlog.node(curr))
for c in self.revchunk(revlog, curr, prev, linknode):
yield c
yield self.close()
# filter any nodes that claim to be part of the known set
def prune(self, revlog, missing, commonrevs, source):
rr, rl = revlog.rev, revlog.linkrev
return [n for n in missing if rl(rr(n)) not in commonrevs]
def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
'''yield a sequence of changegroup chunks (strings)'''
repo = self._repo
cl = self._changelog
mf = self._manifest
reorder = self._reorder
progress = self._progress
# for progress output
msgbundling = _('bundling')
mfs = {} # needed manifests
fnodes = {} # needed file nodes
changedfiles = set()
# Callback for the changelog, used to collect changed files and manifest
# nodes.
# Returns the linkrev node (identity in the changelog case).
def lookupcl(x):
c = cl.read(x)
changedfiles.update(c[3])
# record the first changeset introducing this manifest version
mfs.setdefault(c[0], x)
return x
# Callback for the manifest, used to collect linkrevs for filelog
# revisions.
# Returns the linkrev node (collected in lookupcl).
def lookupmf(x):
clnode = mfs[x]
if not fastpathlinkrev:
mdata = mf.readfast(x)
for f, n in mdata.iteritems():
if f in changedfiles:
# record the first changeset introducing this filelog
# version
fnodes[f].setdefault(n, clnode)
return clnode
for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets'),
reorder=reorder):
yield chunk
progress(msgbundling, None)
for f in changedfiles:
fnodes[f] = {}
mfnodes = self.prune(mf, mfs, commonrevs, source)
for chunk in self.group(mfnodes, mf, lookupmf, units=_('manifests'),
reorder=reorder):
yield chunk
progress(msgbundling, None)
mfs.clear()
needed = set(cl.rev(x) for x in clnodes)
def linknodes(filerevlog, fname):
if fastpathlinkrev:
llr = filerevlog.linkrev
def genfilenodes():
for r in filerevlog:
linkrev = llr(r)
if linkrev in needed:
yield filerevlog.node(r), cl.node(linkrev)
fnodes[fname] = dict(genfilenodes())
return fnodes.get(fname, {})
for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
source):
yield chunk
yield self.close()
progress(msgbundling, None)
if clnodes:
repo.hook('outgoing', node=hex(clnodes[0]), source=source)
def generatefiles(self, changedfiles, linknodes, commonrevs, source):
repo = self._repo
progress = self._progress
reorder = self._reorder
msgbundling = _('bundling')
total = len(changedfiles)
# for progress output
msgfiles = _('files')
for i, fname in enumerate(sorted(changedfiles)):
filerevlog = repo.file(fname)
if not filerevlog:
raise util.Abort(_("empty or missing revlog for %s") % fname)
linkrevnodes = linknodes(filerevlog, fname)
# Lookup for filenodes, we collected the linkrev nodes above in the
# fastpath case and with lookupmf in the slowpath case.
def lookupfilelog(x):
return linkrevnodes[x]
filenodes = self.prune(filerevlog, linkrevnodes, commonrevs, source)
if filenodes:
progress(msgbundling, i + 1, item=fname, unit=msgfiles,
total=total)
yield self.fileheader(fname)
for chunk in self.group(filenodes, filerevlog, lookupfilelog,
reorder=reorder):
yield chunk
def revchunk(self, revlog, rev, prev, linknode):
node = revlog.node(rev)
p1, p2 = revlog.parentrevs(rev)
base = prev
prefix = ''
if base == nullrev:
delta = revlog.revision(node)
prefix = mdiff.trivialdiffheader(len(delta))
else:
delta = revlog.revdiff(base, rev)
p1n, p2n = revlog.parents(node)
basenode = revlog.node(base)
meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
meta += prefix
l = len(meta) + len(delta)
yield chunkheader(l)
yield meta
yield delta
def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
# do nothing with basenode, it is implicitly the previous one in HG10
return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
def _changegroupinfo(repo, nodes, source):
if repo.ui.verbose or source == 'bundle':
repo.ui.status(_("%d changesets found\n") % len(nodes))
if repo.ui.debugflag:
repo.ui.debug("list of changesets:\n")
for node in nodes:
repo.ui.debug("%s\n" % hex(node))
def getsubset(repo, outgoing, bundler, source, fastpath=False):
repo = repo.unfiltered()
commonrevs = outgoing.common
csets = outgoing.missing
heads = outgoing.missingheads
# We go through the fast path if we get told to, or if all (unfiltered
# heads have been requested (since we then know there all linkrevs will
# be pulled by the client).
heads.sort()
fastpathlinkrev = fastpath or (
repo.filtername is None and heads == sorted(repo.heads()))
repo.hook('preoutgoing', throw=True, source=source)
_changegroupinfo(repo, csets, source)
gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
return unbundle10(util.chunkbuffer(gengroup), 'UN')
def changegroupsubset(repo, roots, heads, source):
"""Compute a changegroup consisting of all the nodes that are
descendants of any of the roots and ancestors of any of the heads.
Return a chunkbuffer object whose read() method will return
successive changegroup chunks.
It is fairly complex as determining which filenodes and which
manifest nodes need to be included for the changeset to be complete
is non-trivial.
Another wrinkle is doing the reverse, figuring out which changeset in
the changegroup a particular filenode or manifestnode belongs to.
"""
cl = repo.changelog
if not roots:
roots = [nullid]
# TODO: remove call to nodesbetween.
csets, roots, heads = cl.nodesbetween(roots, heads)
discbases = []
for n in roots:
discbases.extend([p for p in cl.parents(n) if p != nullid])
outgoing = discovery.outgoing(cl, discbases, heads)
bundler = bundle10(repo)
return getsubset(repo, outgoing, bundler, source)
def getlocalbundle(repo, source, outgoing, bundlecaps=None):
"""Like getbundle, but taking a discovery.outgoing as an argument.
This is only implemented for local repos and reuses potentially
precomputed sets in outgoing."""
if not outgoing.missing:
return None
bundler = bundle10(repo, bundlecaps)
return getsubset(repo, outgoing, bundler, source)
def _computeoutgoing(repo, heads, common):
"""Computes which revs are outgoing given a set of common
and a set of heads.
This is a separate function so extensions can have access to
the logic.
Returns a discovery.outgoing object.
"""
cl = repo.changelog
if common:
hasnode = cl.hasnode
common = [n for n in common if hasnode(n)]
else:
common = [nullid]
if not heads:
heads = cl.heads()
return discovery.outgoing(cl, common, heads)
def getbundle(repo, source, heads=None, common=None, bundlecaps=None):
"""Like changegroupsubset, but returns the set difference between the
ancestors of heads and the ancestors common.
If heads is None, use the local heads. If common is None, use [nullid].
The nodes in common might not all be known locally due to the way the
current discovery protocol works.
"""
outgoing = _computeoutgoing(repo, heads, common)
return getlocalbundle(repo, source, outgoing, bundlecaps=bundlecaps)
def changegroup(repo, basenodes, source):
# to avoid a race we use changegroupsubset() (issue1320)
return changegroupsubset(repo, basenodes, repo.heads(), source)
def addchangegroupfiles(repo, source, revmap, trp, pr, needfiles):
revisions = 0
files = 0
while True:
chunkdata = source.filelogheader()
if not chunkdata:
break
f = chunkdata["filename"]
repo.ui.debug("adding %s revisions\n" % f)
pr()
fl = repo.file(f)
o = len(fl)
if not fl.addgroup(source, revmap, trp):
raise util.Abort(_("received file revlog group is empty"))
revisions += len(fl) - o
files += 1
if f in needfiles:
needs = needfiles[f]
for new in xrange(o, len(fl)):
n = fl.node(new)
if n in needs:
needs.remove(n)
else:
raise util.Abort(
_("received spurious file revlog entry"))
if not needs:
del needfiles[f]
repo.ui.progress(_('files'), None)
for f, needs in needfiles.iteritems():
fl = repo.file(f)
for n in needs:
try:
fl.rev(n)
except error.LookupError:
raise util.Abort(
_('missing file data for %s:%s - run hg verify') %
(f, hex(n)))
return revisions, files
def addchangegroup(repo, source, srctype, url, emptyok=False):
"""Add the changegroup returned by source.read() to this repo.
srctype is a string like 'push', 'pull', or 'unbundle'. url is
the URL of the repo where this changegroup is coming from.
Return an integer summarizing the change to this repo:
- nothing changed or no source: 0
- more heads than before: 1+added heads (2..n)
- fewer heads than before: -1-removed heads (-2..-n)
- number of heads stays the same: 1
"""
repo = repo.unfiltered()
def csmap(x):
repo.ui.debug("add changeset %s\n" % short(x))
return len(cl)
def revmap(x):
return cl.rev(x)
if not source:
return 0
repo.hook('prechangegroup', throw=True, source=srctype, url=url)
changesets = files = revisions = 0
efiles = set()
# write changelog data to temp files so concurrent readers will not see
# inconsistent view
cl = repo.changelog
cl.delayupdate()
oldheads = cl.heads()
tr = repo.transaction("\n".join([srctype, util.hidepassword(url)]))
try:
trp = weakref.proxy(tr)
# pull off the changeset group
repo.ui.status(_("adding changesets\n"))
clstart = len(cl)
class prog(object):
step = _('changesets')
count = 1
ui = repo.ui
total = None
def __call__(repo):
repo.ui.progress(repo.step, repo.count, unit=_('chunks'),
total=repo.total)
repo.count += 1
pr = prog()
source.callback = pr
source.changelogheader()
srccontent = cl.addgroup(source, csmap, trp)
if not (srccontent or emptyok):
raise util.Abort(_("received changelog group is empty"))
clend = len(cl)
changesets = clend - clstart
for c in xrange(clstart, clend):
efiles.update(repo[c].files())
efiles = len(efiles)
repo.ui.progress(_('changesets'), None)
# pull off the manifest group
repo.ui.status(_("adding manifests\n"))
pr.step = _('manifests')
pr.count = 1
pr.total = changesets # manifests <= changesets
# no need to check for empty manifest group here:
# if the result of the merge of 1 and 2 is the same in 3 and 4,
# no new manifest will be created and the manifest group will
# be empty during the pull
source.manifestheader()
repo.manifest.addgroup(source, revmap, trp)
repo.ui.progress(_('manifests'), None)
needfiles = {}
if repo.ui.configbool('server', 'validate', default=False):
# validate incoming csets have their manifests
for cset in xrange(clstart, clend):
mfest = repo.changelog.read(repo.changelog.node(cset))[0]
mfest = repo.manifest.readdelta(mfest)
# store file nodes we must see
for f, n in mfest.iteritems():
needfiles.setdefault(f, set()).add(n)
# process the files
repo.ui.status(_("adding file changes\n"))
pr.step = _('files')
pr.count = 1
pr.total = efiles
source.callback = None
newrevs, newfiles = addchangegroupfiles(repo, source, revmap, trp, pr,
needfiles)
revisions += newrevs
files += newfiles
dh = 0
if oldheads:
heads = cl.heads()
dh = len(heads) - len(oldheads)
for h in heads:
if h not in oldheads and repo[h].closesbranch():
dh -= 1
htext = ""
if dh:
htext = _(" (%+d heads)") % dh
repo.ui.status(_("added %d changesets"
" with %d changes to %d files%s\n")
% (changesets, revisions, files, htext))
repo.invalidatevolatilesets()
if changesets > 0:
p = lambda: cl.writepending() and repo.root or ""
if 'node' not in tr.hookargs:
tr.hookargs['node'] = hex(cl.node(clstart))
repo.hook('pretxnchangegroup', throw=True, source=srctype,
url=url, pending=p, **tr.hookargs)
added = [cl.node(r) for r in xrange(clstart, clend)]
publishing = repo.ui.configbool('phases', 'publish', True)
if srctype in ('push', 'serve'):
# Old servers can not push the boundary themselves.
# New servers won't push the boundary if changeset already
# exists locally as secret
#
# We should not use added here but the list of all change in
# the bundle
if publishing:
phases.advanceboundary(repo, phases.public, srccontent)
else:
phases.advanceboundary(repo, phases.draft, srccontent)
phases.retractboundary(repo, phases.draft, added)
elif srctype != 'strip':
# publishing only alter behavior during push
#
# strip should not touch boundary at all
phases.retractboundary(repo, phases.draft, added)
# make changelog see real files again
cl.finalize(trp)
tr.close()
if changesets > 0:
if srctype != 'strip':
# During strip, branchcache is invalid but coming call to
# `destroyed` will repair it.
# In other case we can safely update cache on disk.
branchmap.updatecache(repo.filtered('served'))
def runhooks():
# These hooks run when the lock releases, not when the
# transaction closes. So it's possible for the changelog
# to have changed since we last saw it.
if clstart >= len(repo):
return
# forcefully update the on-disk branch cache
repo.ui.debug("updating the branch cache\n")
repo.hook("changegroup", source=srctype, url=url,
**tr.hookargs)
for n in added:
repo.hook("incoming", node=hex(n), source=srctype,
url=url)
newheads = [h for h in repo.heads() if h not in oldheads]
repo.ui.log("incoming",
"%s incoming changes - new heads: %s\n",
len(added),
', '.join([hex(c[:6]) for c in newheads]))
repo._afterlock(runhooks)
finally:
tr.release()
# never return 0 here:
if dh < 0:
return dh - 1
else:
return dh + 1
| ya790206/temp_hg | mercurial/changegroup.py | Python | gpl-2.0 | 26,986 |
import mock
from twisted.trial import unittest
from tests.mocks import mock_conf_settings
from lbrynet.daemon.auth import server
class AuthJSONRPCServerTest(unittest.TestCase):
# TODO: move to using a base class for tests
# and add useful general utilities like this
# onto it.
def setUp(self):
self.server = server.AuthJSONRPCServer(use_authentication=False)
def test_get_server_port(self):
self.assertSequenceEqual(
('example.com', 80), self.server.get_server_port('http://example.com'))
self.assertSequenceEqual(
('example.com', 1234), self.server.get_server_port('http://example.com:1234'))
def test_foreign_origin_is_rejected(self):
mock_conf_settings(self) # have to call this to generate Config mock
request = mock.Mock(['getHeader'])
request.getHeader = mock.Mock(return_value='http://example.com')
self.assertFalse(self.server._check_header_source(request, 'Origin'))
def test_wrong_port_is_rejected(self):
mock_conf_settings(self, {'api_port': 1234})
request = mock.Mock(['getHeader'])
request.getHeader = mock.Mock(return_value='http://localhost:9999')
self.assertFalse(self.server._check_header_source(request, 'Origin'))
def test_matching_origin_is_allowed(self):
mock_conf_settings(self, {'api_host': 'example.com', 'api_port': 1234})
request = mock.Mock(['getHeader'])
request.getHeader = mock.Mock(return_value='http://example.com:1234')
self.assertTrue(self.server._check_header_source(request, 'Origin'))
def test_any_origin_is_allowed(self):
mock_conf_settings(self, {'api_host': '0.0.0.0', 'api_port': 80})
request = mock.Mock(['getHeader'])
request.getHeader = mock.Mock(return_value='http://example.com')
self.assertTrue(self.server._check_header_source(request, 'Origin'))
request = mock.Mock(['getHeader'])
request.getHeader = mock.Mock(return_value='http://another-example.com')
self.assertTrue(self.server._check_header_source(request, 'Origin'))
def test_matching_referer_is_allowed(self):
mock_conf_settings(self, {'api_host': 'the_api', 'api_port': 1111})
request = mock.Mock(['getHeader'])
request.getHeader = mock.Mock(return_value='http://the_api:1111?settings')
self.assertTrue(self.server._check_header_source(request, 'Referer'))
request.getHeader.assert_called_with('Referer')
def test_request_is_allowed_when_matching_allowed_origin_setting(self):
mock_conf_settings(self, {'allowed_origin': 'http://example.com:1234'})
request = mock.Mock(['getHeader'])
request.getHeader = mock.Mock(return_value='http://example.com:1234')
self.assertTrue(self.server._check_header_source(request, 'Origin'))
def test_request_is_rejected_when_not_matching_allowed_origin_setting(self):
mock_conf_settings(self, {'allowed_origin': 'http://example.com:1234'})
request = mock.Mock(['getHeader'])
# note the ports don't match
request.getHeader = mock.Mock(return_value='http://example.com:1235')
self.assertFalse(self.server._check_header_source(request, 'Origin'))
| zestyr/lbry | tests/unit/lbrynet_daemon/auth/test_server.py | Python | mit | 3,256 |
# Script save model renders for selected cameras (or all aligned cameras if no aligned cameras selected)
# to the same folder where the source photos are present with the "_render" suffix.
#
# This is python script for Metashape Pro. Scripts repository: https://github.com/agisoft-llc/metashape-scripts
import Metashape
import os
# Checking compatibility
compatible_major_version = "1.5"
found_major_version = ".".join(Metashape.app.version.split('.')[:2])
if found_major_version != compatible_major_version:
raise Exception("Incompatible Metashape version: {} != {}".format(found_major_version, compatible_major_version))
def get_cameras(chunk):
selected_cameras = [camera for camera in chunk.cameras if camera.transform and camera.selected and camera.type == Metashape.Camera.Type.Regular]
if len(selected_cameras) > 0:
return selected_cameras
else:
return [camera for camera in chunk.cameras if camera.transform and camera.type == Metashape.Camera.Type.Regular]
def render_cameras():
print("Script started...")
chunk = Metashape.app.document.chunk
if not chunk.model:
raise Exception("No model!")
for camera in get_cameras(chunk):
render = chunk.model.renderImage(camera.transform, camera.sensor.calibration)
photo_dir = os.path.dirname(camera.photo.path)
photo_filename = os.path.basename(camera.photo.path)
render_filename = os.path.splitext(photo_filename)[0] + "_render.jpg"
render.save(os.path.join(photo_dir, render_filename))
print("Script finished!")
label = "Custom menu/Render photos for cameras"
Metashape.app.addMenuItem(label, render_cameras)
print("To execute this script press {}".format(label))
| agisoft-llc/photoscan-scripts | src/render_photos_for_cameras.py | Python | mit | 1,728 |
import re
from django.contrib.gis.db import models
class BaseSpatialFeatures:
gis_enabled = True
# Does the database contain a SpatialRefSys model to store SRID information?
has_spatialrefsys_table = True
# Does the backend support the django.contrib.gis.utils.add_srs_entry() utility?
supports_add_srs_entry = True
# Does the backend introspect GeometryField to its subtypes?
supports_geometry_field_introspection = True
# Does the backend support storing 3D geometries?
supports_3d_storage = False
# Reference implementation of 3D functions is:
# https://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
supports_3d_functions = False
# Does the database support SRID transform operations?
supports_transform = True
# Can geometry fields be null?
supports_null_geometries = True
# Are empty geometries supported?
supports_empty_geometries = False
# Can the function be applied on geodetic coordinate systems?
supports_distance_geodetic = True
supports_length_geodetic = True
supports_perimeter_geodetic = False
supports_area_geodetic = True
# Is the database able to count vertices on polygons (with `num_points`)?
supports_num_points_poly = True
# The following properties indicate if the database backend support
# certain lookups (dwithin, left and right, relate, ...)
supports_left_right_lookups = False
# Does the backend support expressions for specifying distance in the
# dwithin lookup?
supports_dwithin_distance_expr = True
# Does the database have raster support?
supports_raster = False
# Does the database support a unique index on geometry fields?
supports_geometry_field_unique_index = True
@property
def supports_bbcontains_lookup(self):
return 'bbcontains' in self.connection.ops.gis_operators
@property
def supports_contained_lookup(self):
return 'contained' in self.connection.ops.gis_operators
@property
def supports_crosses_lookup(self):
return 'crosses' in self.connection.ops.gis_operators
@property
def supports_distances_lookups(self):
return self.has_Distance_function
@property
def supports_dwithin_lookup(self):
return 'dwithin' in self.connection.ops.gis_operators
@property
def supports_relate_lookup(self):
return 'relate' in self.connection.ops.gis_operators
@property
def supports_isvalid_lookup(self):
return self.has_IsValid_function
# Is the aggregate supported by the database?
@property
def supports_collect_aggr(self):
return models.Collect not in self.connection.ops.disallowed_aggregates
@property
def supports_extent_aggr(self):
return models.Extent not in self.connection.ops.disallowed_aggregates
@property
def supports_make_line_aggr(self):
return models.MakeLine not in self.connection.ops.disallowed_aggregates
@property
def supports_union_aggr(self):
return models.Union not in self.connection.ops.disallowed_aggregates
def __getattr__(self, name):
m = re.match(r'has_(\w*)_function$', name)
if m:
func_name = m[1]
return func_name not in self.connection.ops.unsupported_functions
raise AttributeError
| theo-l/django | django/contrib/gis/db/backends/base/features.py | Python | bsd-3-clause | 3,370 |
'''import json, pickle
from sklearn.feature_extraction.text import TfidfVectorizer
# load terms
with open('../Dataset/Collection/Stopwords/collection_terms.json', 'r') as f:
collection_terms = json.load(f)
f.close()
# load tf-idf matrix
with open('../Dataset/Collection/Stopwords/collection_tfidf.pkl', 'r' ) as f:
collection_tfidf = pickle.load(f)
f.close()
print 'Stopwords'
print len(collection_terms), collection_tfidf.getnnz()
# load terms
with open('../Dataset/Collection/NonNon/collection_terms.json', 'r') as f:
collection_terms = json.load(f)
f.close()
# load tf-idf matrix
with open('../Dataset/Collection/NonNon/collection_tfidf.pkl', 'r' ) as f:
collection_tfidf = pickle.load(f)
f.close()
print 'NonNon'
print len(collection_terms), collection_tfidf.getnnz()
# load terms
with open('../Dataset/Collection/Stemming/collection_terms.json', 'r') as f:
collection_terms = json.load(f)
f.close()
# load tf-idf matrix
with open('../Dataset/Collection/Stemming/collection_tfidf.pkl', 'r' ) as f:
collection_tfidf = pickle.load(f)
f.close()
print 'Stemming'
print len(collection_terms), collection_tfidf.getnnz()
# load terms
with open('../Dataset/Collection/collection_terms.json', 'r') as f:
collection_terms = json.load(f)
f.close()
# load tf-idf matrix
with open('../Dataset/Collection/collection_tfidf.pkl', 'r' ) as f:
collection_tfidf = pickle.load(f)
f.close()
print 'StemmingStopwords'
print len(collection_terms), collection_tfidf.getnnz()
'''
'''
import json, string
def remove_equations(text):
result_text = ''
non_equation_start_index = 0
while True:
equation_start_index = string.find(text, '\\$', non_equation_start_index)
if equation_start_index != -1:
# extract the non-equation content
result_text += text[non_equation_start_index: equation_start_index]
# deal with next equation mark
equation_end_index = string.find(text, '\\$', equation_start_index + 2)
if equation_end_index != -1:
non_equation_start_index = equation_end_index + 3
if non_equation_start_index == len(text):
break
else:
break
else:
result_text += text[non_equation_start_index:]
break
text = result_text
result_text = ''
non_equation_start_index = 0
while True:
equation_start_index = string.find(text, '$$', non_equation_start_index)
if equation_start_index != -1:
# extract the non-equation content
result_text += text[non_equation_start_index: equation_start_index]
# deal with next equation mark
equation_end_index = string.find(text, '$$', equation_start_index + 2)
if equation_end_index != -1:
equation = text[equation_start_index + 2: equation_end_index]
text = string.replace(text, equation, '')
non_equation_start_index = non_equation_start_index + 4
if non_equation_start_index == len(text):
break
else:
break
else:
result_text += text[non_equation_start_index:]
break
return result_text
with open('../Dataset/CVPR_sample.json') as f:
all_articles = json.load(f)
f.close()
for year in all_articles:
one_year_articles = all_articles[year]
for article_ID in one_year_articles:
article = one_year_articles[article_ID]
if article['FullText'] is not None:
for section in article['FullText']:
section_content = article['FullText'][section]
section_content = remove_equations(section_content)
all_articles[year][article_ID]['FullText'][section] = section_content
with open('CVPR_sample_new.json', 'w') as f:
json.dump(all_articles, f)
f.close()
''' | lidalei/IR-Project | src/UnitTest.py | Python | gpl-2.0 | 4,092 |
# -*- coding: UTF-8 -*-
'''
Authorized by vlon Jang
Created on May 16, 2016
Email:zhangzhiwei@ict.ac.cn
From Institute of Computing Technology
All Rights Reserved.
'''
import pandas as pd
import pymysql
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import LinearSVR
import numpy as np
import time, os, copy, re, time, sys, datetime
from sklearn.linear_model import Ridge
import xgboost as xgb
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
class LinearRegression(object):
"""
solver:
ls for least square method, sgd for gridient descent
"""
def __init__(self,solver="ls",lr=0.2,max_iter=200,bias=False):
self.solver=solver
self.coef_=None
self.bias=bias
if self.solver=='sgd':
self.lr=lr
self.max_iter=max_iter
def gradient_descent(self,X,y):
m=len(y)
for i in xrange(0,self.max_iter):
pred=X.dot(self.coef_)
for j in xrange(0,X.shape[1]):
tmp=X[:,j]
errors = (pred - y) * tmp#element-wise multi
self.coef_[j]=self.coef_[j] - self.lr * np.mean(errors)
return self.coef_
def fit(self,X,y):
if self.bias:
X = np.hstack([X,np.ones((X.shape[0],1))])
if self.solver=="ls":
self.coef_=np.linalg.lstsq(X,y)[0]
else:
self.coef_=np.zeros(X.shape[1])
self.coef_=self.gradient_descent(X,y)
def predict(self,X):
if self.bias:
X = np.hstack([X,np.ones((X.shape[0],1))])
return X.dot(self.coef_)
def get_train_predict(model = None, use_cache = True,
use_artist_features=False, use_user_artist_features = False):
mysql_cn= pymysql.connect(host='localhost', port=3306,user='root', passwd='111111', db='music')
append_ua = lambda x : '_ua' if x else ''
append_uua = lambda x : '_uua' if x else ''
X_train_filename = './data/X_train_data%s%s.csv' %(append_ua(use_artist_features),
append_uua(use_user_artist_features))
X_test_filename = './data/X_test_data%s%s.csv' %(append_ua(use_artist_features),
append_uua(use_user_artist_features))
model_type = re.split('\.|\'', "%s" %type(model))
params = re.split("(|)", "%s" %model.get_params())
print "Training model %s Got!\nModel params are: %s" %(model_type[-2], params[-1])
print "Getting X train data"
X_train = None
if os.path.exists(X_train_filename) and use_cache:
X_train = pd.read_csv(X_train_filename)
X_train = X_train.fillna(value=0).values
else:
X_train = pd.read_sql('select * from user_X_train;', con=mysql_cn)
if use_artist_features:
print '\tGetting artist X train data...'
artist_train = pd.read_sql('select * from user_artist_features_train;',
con=mysql_cn).replace('NULL', value = 0)
X_train = pd.concat([X_train, artist_train], axis = 1, ignore_index=True)
if use_user_artist_features:
print '\tGetting user_and_artist X train data...'
artist_train = pd.read_sql('select * from user_and_artist_features_train;',
con=mysql_cn).replace('NULL', value = 0)
X_train = pd.concat([X_train, artist_train], axis = 1, ignore_index=True)
X_train = X_train.fillna(value=0)
X_train.to_csv(X_train_filename, header =False, index=False)
X_train = X_train.astype(float).values
print "Getting y train data"
y_train = None
if os.path.exists('./data/y_train_data.csv') and use_cache:
y_train = pd.read_csv('./data/y_train_data.csv')
y_train = y_train.fillna(value=0).values
else:
y_train = pd.read_sql('select * from user_y_train;', con=mysql_cn)
y_train = y_train.fillna(value=0)
y_train.to_csv('./data/y_train_data.csv', header =False, index=False)
y_train = y_train.values
y_train = y_train.reshape((y_train.shape[0]))
print "Getting X test data"
X_test = None
if os.path.exists(X_test_filename) and use_cache:
X_test = pd.read_csv(X_test_filename)
X_test= X_test.fillna(value=0).values
else:
X_test= pd.read_sql('select * from user_X_test;', con=mysql_cn)
if use_artist_features:
print '\tGetting artist X test data...'
artist_test = pd.read_sql('select * from user_artist_features_test;',
con=mysql_cn).replace('NULL', value = 0)
X_test = pd.concat([X_test, artist_test], axis = 1, ignore_index=True)
if use_user_artist_features:
print '\tGetting user_and_artist X test data...'
artist_test = pd.read_sql('select * from user_and_artist_features_test;',
con=mysql_cn).replace('NULL', value = 0)
X_test = pd.concat([X_test, artist_test], axis = 1, ignore_index=True)
X_test= X_test.fillna(value=0)
X_test.to_csv(X_test_filename, header =False, index=False)
X_test= X_test.astype(float).values
print 'Fitting data...'
model.fit(X_train, y_train)
print 'Predicting data...'
y_test = model.predict(X_test)
y_test = pd.DataFrame(y_test)
print "Getting test keys"
keys_test = None
if os.path.exists('./data/keys_test_data.csv') and use_cache:
keys_test = pd.read_csv('./data/keys_test_data.csv')
else:
keys_test = pd.read_sql('select * from user_keys_test;', con=mysql_cn)
keys_test.to_csv('./data/keys_test_data.csv', header =False, index=False)
res = pd.concat([keys_test, y_test], axis = 1, ignore_index=True)
mysql_cn.close()
return res
def get_test_predict(model = None, use_cache = True,
use_artist_features = False, use_user_artist_features = False):
mysql_cn= pymysql.connect(host='localhost', port=3306,user='root', passwd='111111', db='music')
append_ua = lambda x : '_ua' if x else ''
append_uua = lambda x : '_uua' if x else ''
X_train_filename = './data/X_test_data%s%s.csv' %(append_ua(use_artist_features),
append_uua(use_user_artist_features))
X_test_filename = './data/X_submit_data%s%s.csv' %(append_ua(use_artist_features),
append_uua(use_user_artist_features))
model_type = re.split('\.|\'', "%s" %type(model))
params = re.split("(|)", "%s" %model.get_params())
print "Testing model %s Got!\nModel params are: %s" %(model_type[-2], params[-1])
print "Getting X test data"
# X_train = None
# if os.path.exists(X_train_filename) and use_cache:
X_train = pd.read_csv(X_train_filename)
X_train = X_train.fillna(value=0).values
# else:
# X_train = pd.read_sql('select * from user_X_test;', con=mysql_cn)
# if use_artist_features:
# print '\tGetting artist X test data...'
# artist_train = pd.read_sql('select * from user_artist_features_test;',
# con=mysql_cn).replace('NULL', value = 0)
# X_train = pd.concat([X_train, artist_train], axis = 1, ignore_index=True)
# if use_user_artist_features:
# print '\tGetting user_and_artist X test data...'
# artist_train = pd.read_sql('select * from user_and_artist_features_test;',
# con=mysql_cn).replace('NULL', value = 0)
# X_train = pd.concat([X_train, artist_train], axis = 1, ignore_index=True)
# X_train = X_train.fillna(value=0)
# X_train.to_csv(X_train_filename, header =False, index=False)
# X_train = X_train.astype(float).values
print "Getting y test data"
y_train = None
if os.path.exists('./data/y_test_data.csv') and use_cache:
y_train = pd.read_csv('./data/y_test_data.csv')
y_train = y_train.fillna(value=0).values
else:
y_train = pd.read_sql('select * from user_y_test;', con=mysql_cn)
y_train = y_train.fillna(value=0)
y_train.to_csv('./data/y_test_data.csv', header =False, index=False)
y_train = y_train.values
y_train = y_train.reshape((y_train.shape[0]))
print "Getting X submit data"
X_test = None
if os.path.exists(X_test_filename) and use_cache:
X_test = pd.read_csv(X_test_filename)
X_test= X_test.fillna(value=0).values
else:
X_test= pd.read_sql('select * from user_X_submit;', con=mysql_cn)
if use_artist_features:
print '\tGetting artist X submit data...'
artist_test = pd.read_sql('select * from user_artist_features_submit;',
con=mysql_cn).replace('NULL', value = 0)
X_test = pd.concat([X_test, artist_test], axis = 1, ignore_index=True)
if use_user_artist_features:
print '\tGetting user_and_artist X submit data...'
artist_test = pd.read_sql('select * from user_and_artist_features_submit;',
con=mysql_cn).replace('NULL', value = 0)
X_test = pd.concat([X_test, artist_test], axis = 1, ignore_index=True)
X_test= X_test.fillna(value=0)
X_test.to_csv(X_test_filename, header =False, index=False)
X_test= X_test.astype(float).values
print 'Fitting data...'
model.fit(X_train, y_train)
print 'Predicting data...'
y_test = model.predict(X_test)
y_test = pd.DataFrame(y_test)
print "Getting submit keys"
keys_test = None
if os.path.exists('./data/keys_submit_data.csv') and use_cache:
keys_test = pd.read_csv('./data/keys_submit_data.csv')
else:
keys_test = pd.read_sql('select * from user_keys_submit;', con=mysql_cn)
keys_test.to_csv('./data/keys_submit_data.csv', header =False, index=False)
res = pd.concat([keys_test, y_test], axis = 1, ignore_index=True)
mysql_cn.close()
return res
def gen_predic_csv(dateNow = time.strftime('%Y%m%d'),
timeNow = time.strftime('%H%M%S'),
use_cache = True,
use_artist_features = False,
use_user_artist_features = False):
# model = RandomForestRegressor(n_jobs=-1,
# n_estimators=100,
# max_features=5,#5
# max_depth=8,#8
# min_samples_leaf=2,
# random_state=219)
# model = LinearSVR(C=18,random_state=219)#17
# model=RandomForestRegressor(
# n_estimators=100,
# random_state=219,
# n_jobs=-1,
# min_samples_split=4)#438*
model=xgb.XGBRegressor(
max_depth=8,
learning_rate=0.03,
n_estimators=1000,
# silent=True,
# objective='count:poisson',#reg:linear,count:poisson
nthread=-1,
# gamma=0.,
# min_child_weight=2,
# max_delta_step=2,
# subsample=0.8,
# colsample_bytree=0.3,
# colsample_bylevel=1,
# reg_alpha=0,
# reg_lambda=10,
seed=219,
missing=None)
log_file = open('./%s/%s.log' %(dateNow, timeNow), 'a')
log_file.write(re.split("(|)", "%s" %model.get_params())[-1] + '\n')
log_file.close()
test_model = copy.deepcopy(model)
submit_model = copy.deepcopy(model)
get_train_predict(test_model, use_cache, use_artist_features, use_user_artist_features).to_csv(
'./%s/%s_train.csv' %(dateNow, timeNow), header=False, index = False)
get_test_predict(submit_model, use_cache, use_artist_features, use_user_artist_features).to_csv(
'./%s/%s_test.csv' %(dateNow, timeNow), header=False, index = False)
# def get_songDic(ifile = None):
# assert ifile
# songDic = {}
# f = open(ifile).readlines()
# for item in f:
# items = item.split(',')
# if not songDic.has_key(items[0]):
# songDic[items[0]] = items[1]
# return songDic
#
# def get_artist(ifile = None, songdic= None):
# assert ifile and songdic
# f = open(ifile).readlines()
# res = []
# for item in f:
# items = item.split(',')
# if songdic.has_key(items[1]):
# if items[2] == '20151031' or items[2] == '20150831':
# continue
# res.append([songdic[items[1]], float(items[3].split('\n')[0]), items[2]])
# return res
'''
This function is used to fine tuning the results using lr.
'''
def gen_finally_results(dateNow = None, timeNow = None):
print '--------------------------Fine tuning model ------------------------------'
mysql_cn= pymysql.connect(host='localhost', port=3306,user='root', passwd='111111', db='music')
train_X_data = pd.read_csv('./%s/%s_test_results_avg.csv' %(dateNow, timeNow),
names=['artist_id', 'plays', 'ds'])
train_X_data = train_X_data.sort_values(by=['artist_id', 'ds'])
train_X_data['plays'] = train_X_data['plays'].astype(int)
# print train_X_data
train_y_data = pd.read_sql("""
SELECT test_3000_lines.artist_id, plays, test_3000_lines.ds FROM
test_3000_lines
LEFT JOIN(
SELECT artist_id, avg(plays) as plays, ds from(
SELECT test_3000_lines.artist_id, plays, test_3000_lines.ds from
test_3000_lines
LEFT JOIN(
SELECT artist_id, count(*) as plays, ds from
user_actions left JOIN songs
on user_actions.song_id = songs.song_id
WHERE ds >= '20150702' and ds <= '20150830' and action_type = '1'
GROUP BY ds, artist_id
ORDER BY artist_id, ds)a
on test_3000_lines.artist_id = a.artist_id and test_3000_lines.ds = a.ds
ORDER BY ds
LIMIT 50, 3000)c
GROUP BY artist_id
)avgtmp
on test_3000_lines.artist_id = avgtmp.artist_id
ORDER BY ds
LIMIT 50, 3000
""", mysql_cn)
train_y_data = train_y_data.fillna(value=0)
train_y_data = train_y_data.sort_values(by=['artist_id', 'ds'])
train_y_data['plays'] = train_y_data['plays'].astype(int)
# print train_y_data
# print train_y_data['plays'].values.shape
model = LinearRegression()
model.fit(train_X_data['plays'].values.reshape((train_X_data['plays'].values.shape[0],1)),
train_y_data['plays'].values.reshape(train_y_data['plays'].values.shape[0]))
submit_X_data = pd.read_csv('./%s/%s_submit_results.csv' %(dateNow, timeNow),
names=['artist_id', 'plays', 'ds'])
submit_X_data = submit_X_data.sort_values(by=['artist_id', 'ds'])
submit_X_data['plays'] = submit_X_data['plays'].astype(int)
# print submit_X_data
plays = pd.DataFrame(model.predict(
submit_X_data['plays'].values.reshape((submit_X_data['plays'].values.shape[0],1))))
submit_X_data['plays'] = plays.astype(int)
submit_X_data.sort_values(by=['artist_id', 'ds'])
print 'Saving submit results...'
submit_X_data.to_csv('./submit/submit_results.csv', header =False, index=False)
get_avg_results('./submit/submit_results.csv')
get_min_error_mean_results('./submit/submit_results.csv')
# print 'LR params is ', model.get_params
"""
The following two functions generate the min error results.
"""
def get_min_error_res(play_res):
res_sum = 0
res_sum_2 = 0
for res in play_res:
res_sum += res
res_sum_2 += (res*res)
if res_sum == 0: return 0
return res_sum_2 / res_sum
def get_min_error_mean_results(in_filename):
"""
in_filename: artist_id, times, ds
"""
keys = ['artist_id', 'times', 'ds']
artist = {}
data = pd.read_csv(in_filename, header = None, names = keys)
days = set()
for _, row in data.iterrows():
artist_id = row[keys[0]]
if artist_id not in artist:
artist[artist_id] = []
artist[artist_id].append(row[keys[1]])
days.add(row[keys[2]])
days = [day for day in days]
sorted(days)
out_filename= in_filename.replace('.csv', '_me.csv')
results = []
for artist_id, times in artist.iteritems():
min_error_res = int(get_min_error_res(times))
for day in days:
results.append([artist_id, min_error_res, day])
df = pd.DataFrame(results)
df.columns = ['artist_id', 'plays', 'ds']
df = df.sort_values(by = ['artist_id', 'ds'])
df.to_csv(out_filename, header =False, index=False)
"""
The following two functions generate the average results.
"""
def get_ID_average(ifile_name = None):
assert ifile_name
output = {}
f = open(ifile_name).readlines()
for item in f:
user_id, play_times, _ = tuple(item.split(','))
if output.has_key(user_id):
output[user_id].append(float(play_times))
else:
output[user_id] = [float(play_times)]
for key in output.keys():
output[key] = sum(output[key]) / len(output[key])
return output
def get_avg_results(ifile_name = None, ratio = 1):
assert ifile_name
ofile_name= ifile_name.replace('.csv', '_avg.csv')
avg = get_ID_average(ifile_name)
fi = open(ifile_name).readlines()
fo = open(ofile_name, 'w')
for item in fi:
user_id, play_times, pdate = tuple(item.split(','))
play_times = avg[user_id] * ratio
fo.write('%s,%s,%s' %(user_id, int(play_times), pdate))
fo.close()
def get_f_score(y_true,y_pred):
'''
both y_true and y_pred should be 1D array
'''
sig=np.sqrt(np.sum(y_true))
data=[]
for i,j in zip(y_true,y_pred):
if i==0:
continue
data.append(np.power((j*1.0-i)/i,2))
#delta=np.sqrt(np.mean(np.power((y_pred-y_true)/y_true,2)))
delta=np.sqrt(np.mean(np.array(data)))
#print sig,delta
return (1-delta)*sig
def evaluate(dateNow = None, timeNow = None):
print '------------------------Evaluating results---------------------------------'
mysql_cn= pymysql.connect(host='localhost', port=3306,user='root', passwd='111111', db='music')
ifile_name = './%s/%s_test_results.csv' %(dateNow, timeNow)
for ifile in [ifile_name,
ifile_name.replace('.csv', '_avg.csv'),
ifile_name.replace('.csv', '_me.csv')]:
Id_date=pd.read_csv("./data/test.csv")[["artist_id","gmt_date","artist_target"]]
Id_pred=pd.read_csv(ifile,
names=['artist_id', 'ypred', 'gmt_date'])
fscore=0.0
artists=Id_date.artist_id.unique()
for _,artist in enumerate(artists):
df=Id_date[Id_date.artist_id==artist]
df=df.sort_values(by="gmt_date")
df2=Id_pred[Id_pred.artist_id==artist]
df2=df2.sort_values(by="gmt_date")
f=get_f_score(df["artist_target"],df2["ypred"])
#print artist,f
fscore+=f
y_true = pd.read_sql("""
SELECT test_3000_lines.artist_id, plays, test_3000_lines.ds from
test_3000_lines LEFT JOIN(
SELECT artist_id, count(*) as plays, ds from
user_actions left JOIN songs
on user_actions.song_id = songs.song_id
WHERE ds >= '20150702' and ds <= '20150830' and action_type = '1'
GROUP BY ds, artist_id
ORDER BY artist_id, ds)a
on test_3000_lines.artist_id = a.artist_id and test_3000_lines.ds = a.ds
ORDER BY ds
LIMIT 50, 3000
""", mysql_cn)
y_true = y_true.fillna(value=0)
y_true = y_true.sort_values(by=['artist_id', 'ds'])
y_true = y_true['plays'].values
Id_pred = Id_pred.sort_values(by=['artist_id', 'gmt_date'])
y_pred = Id_pred['ypred'].values
r2 = r2_score(y_true, y_pred)
mse = mean_squared_error(y_true, y_pred)
print '#####%s#####' %ifile
try:
from termcolor import colored
print colored('final fscore', 'red'),":",colored(fscore, 'green')
print colored('r2 score', 'red'),":",colored(r2, 'green')
print colored('mse', 'red'),":",colored(mse, 'green')
except:
print 'final fscore:',fscore
print 'r2 score:', r2
print 'mse:', mse
log_file = open('./%s/%s.log' %(dateNow, timeNow), 'a')
log_file.write('#####%s#####\n' %ifile)
log_file.write('Final fscore:%f\n' %fscore)
log_file.write('r2 score:%f\n' %r2)
log_file.write('mse:%f\n' %mse)
log_file.close()
def gen_alter_result_csv(dateNow = None, timeNow = None):
assert dateNow and timeNow
print '-----------------------Generating alter results-----------------------------'
file_list = ['./%s/%s_test_results.csv' %(dateNow, timeNow),
'./%s/%s_submit_results.csv' %(dateNow, timeNow)]
for ifile_name in file_list:
get_avg_results(ifile_name)
get_min_error_mean_results(ifile_name)
print ifile_name, ' altered!'
def gen_result_csv(dateNow = time.strftime('%Y%m%d'),
timeNow = time.strftime('%H%M%S'),
use_cache = True,
del_temp_result = True,
use_artist_features = False,
use_user_artist_features = False):
mysql_cn= pymysql.connect(host='localhost', port=3306,user='root', passwd='111111', db='music')
print '-------------------------Getting model...---------------------------------'
gen_predic_csv(dateNow, timeNow, use_cache,
use_artist_features= use_artist_features,
use_user_artist_features = use_user_artist_features)
print '-----------------------Getting train results------------------------------'
print 'Getting dataframe of train data...'
df = pd.read_csv('./%s/%s_train.csv' %(dateNow, timeNow),
names=['user_id', 'artist_id', 'ds', 'plays'])
df.pop('user_id')
df['plays'] = df['plays'].astype(float)
df = df.groupby(['artist_id', 'ds']).sum()
df['plays'] = df['plays'].astype(int)
df = df.reset_index()
ds = df.pop('ds')
df.insert(2, 'ds', ds)
df.columns = ['artist_id', 'plays', 'ds']
df.to_sql('tmp_test_result', mysql_cn, flavor='mysql', if_exists='replace',
index = False)
df = pd.read_sql('''
SELECT test_3000_lines.artist_id, tmp_test_result.plays, test_3000_lines.ds FROM
test_3000_lines left join tmp_test_result
on test_3000_lines.artist_id = tmp_test_result.artist_id and test_3000_lines.ds = tmp_test_result.ds;''',
con=mysql_cn)
df = df.fillna(value=0)
df['plays'] = df['plays'].astype(int)
df.sort_values(by='ds')
df = df.iloc[50:, :]
df = df.sort_values(by=['artist_id', 'ds'])
print 'Saving test results...'
df.to_csv('./%s/%s_test_results.csv' %(dateNow, timeNow), header =False, index=False)
print '-----------------------Getting test results--------------------------------'
print 'Getting dataframe of test data...'
df = pd.read_csv('./%s/%s_test.csv' %(dateNow, timeNow),
names=['user_id', 'artist_id', 'ds', 'plays'])
df.pop('user_id')
df['plays'] = df['plays'].astype(float)
df = df.groupby(['artist_id', 'ds']).sum()
df['plays'] = df['plays'].astype(int)
df = df.reset_index()
ds = df.pop('ds')
df.insert(2, 'ds', ds)
df.columns = ['artist_id', 'plays', 'ds']
df.to_sql('tmp_submit_result', mysql_cn, flavor='mysql', if_exists='replace',
index = False)
df = pd.read_sql('''
SELECT submit_3000_lines.artist_id, tmp_submit_result.plays, submit_3000_lines.ds FROM
submit_3000_lines left join tmp_submit_result
on submit_3000_lines.artist_id = tmp_submit_result.artist_id and submit_3000_lines.ds = tmp_submit_result.ds;''',
con=mysql_cn)
df = df.fillna(value=0)
df['plays'] = df['plays'].astype(int)
df = df.sort_values(by=['artist_id', 'ds'])
print 'Saving submit results...'
df.to_csv('./%s/%s_submit_results.csv' %(dateNow, timeNow), header =False, index=False)
gen_alter_result_csv(dateNow, timeNow)
gen_finally_results(dateNow, timeNow)
if del_temp_result:
print '-----------------------Deleting temp results--------------------------------'
os.system('rm ./%s/%s_train.csv' %(dateNow, timeNow))
print './%s/%s_train.csv deleted!' %(dateNow, timeNow)
os.system('rm ./%s/%s_test.csv' %(dateNow, timeNow))
print './%s/%s_test.csv deleted!' %(dateNow, timeNow)
evaluate(dateNow, timeNow)
if __name__ == '__main__':
print '-------------------------System info--------------------------------------'
dateNow = time.strftime('%Y%m%d')
timeNow = time.strftime('%H%M%S')
dataDirs = ['data', 'submit', 'ago', dateNow]
for dataDir in dataDirs:
if not os.path.exists(dataDir):
os.system('mkdir %s' %dataDir)
gapday = datetime.timedelta(days=1)
tomorrow = (datetime.datetime.now() - gapday).strftime('%Y%m%d')
if os.path.exists('./%s'%tomorrow):
print 'Moving tomorrow %s files' %tomorrow
os.system('mv ./%s ./ago' %tomorrow)
args = {'uc':True, 'dt':True, 'ua':True, 'uua':True,
'-uc':False, '-dt':False, '-ua':False, '-uua':False}
use_cache = True
del_temp_result = True
use_artist_features = True
use_user_artist_features = True
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
assert args.has_key(arg)
if arg =='uc' or arg == '-uc':
use_cache = args[arg]
elif arg =='dt' or arg == '-dt':
del_temp_result = args[arg]
elif arg =='ua' or arg == '-ua':
use_artist_features = args[arg]
elif arg =='uua' or arg == '-uua':
use_user_artist_features = args[arg]
get_color = lambda x: 'green' if x else 'red'
try:
from termcolor import colored
print colored('Use data cache', 'blue'),":",colored(use_cache, get_color(use_cache))
print colored('Delete temp files', 'blue'),":",colored(del_temp_result, get_color(del_temp_result))
print colored('Use artist features', 'blue'),":",colored(use_artist_features, get_color(use_artist_features))
print colored('Use user and artist features', 'blue'),":",colored(use_user_artist_features, get_color(use_user_artist_features))
except:
print 'Use data cache:', use_cache
print 'Delete temp files:', del_temp_result
print 'Use artist features:',use_artist_features
print 'Use user_artist features:',use_user_artist_features
log_file = open('./%s/%s.log' %(dateNow, timeNow), 'a')
log_file.write('Use data cache:%s\n'%use_cache +
'Delete temp files:%s\n'%del_temp_result +
'Use artist features:%s\n'%use_artist_features +
'Use user_artist features:%s\n'%use_user_artist_features)
log_file.close()
timeStart = time.time()
gen_result_csv(dateNow = dateNow,
timeNow = timeNow,
use_cache= use_cache,
del_temp_result=del_temp_result,
use_artist_features=use_artist_features,
use_user_artist_features=use_user_artist_features)
timeUsed = int(time.time() - timeStart)
info = '\nTotal use %d min(s) %d sec(s).' %(timeUsed / 60, timeUsed % 60)
try:
from termcolor import colored
print colored(info, 'yellow')
except:
print info
# gen_finally_results('20160528', '144748')
| wangqingbaidu/aliMusic | models/run_u_a_model.py | Python | gpl-3.0 | 28,923 |
import json
from django.test import LiveServerTestCase
from data_api.models import Command, LocalComputer, COMMAND_NOOP, Signal, System, Blob, Event, Setting
from vm.base import Configurator
from vm.data_connection import DataConnection
import datetime
import pytz
class TestDataConnection(LiveServerTestCase):
"""
Data Connection API Tests
"""
def setUp(self):
# With a configuration pointed to localhost
self.local_computer = LocalComputer.objects.create(name="a_computer")
self.configurator = Configurator()
config = self.configurator.get_config()
config['id'] = self.local_computer.id
config['server'] = self.live_server_url
self.data_connection = DataConnection(self.configurator)
def tearDown(self):
pass
def test_get_new_commands(self):
# with a new command
json_command = json.dumps({'a':1, 'b':'c'})
Command.objects.create(type=COMMAND_NOOP, local_computer=self.local_computer, json_command=json_command)
# should get command
commands = self.data_connection.get_new_commands()
self.assertEqual(len(commands), 1)
self.assertEqual(commands[0]['type'], COMMAND_NOOP)
self.assertEqual(commands[0]['json_command'], json_command)
def test_add_signal_points(self):
# with a signal and persisted data
signal = Signal.objects.create(name='a_signal')
n1 = Signal.utc_to_millisec(datetime.datetime.now(tz=pytz.UTC))
n2 = Signal.utc_to_millisec(datetime.datetime.now(tz=pytz.UTC) + datetime.timedelta(seconds=1))
json_data = [[1, n1], [2, n2]]
self.data_connection.add_signal_points(signal.id, json_data)
# should find persisted data
points = signal.get_data()
self.assertEqual(2, len(points))
self.assertEqual(1, points[0][0])
self.assertEqual(2, points[1][0])
self.assertAlmostEqual(n1, points[0][1], 2)
self.assertAlmostEqual(n2, points[1][1], 2)
# should download points by id
downloaded_points = self.data_connection.get_signal_points(signal.id)
self.assertEqual(2, len(downloaded_points))
self.assertEqual(1, downloaded_points[0][0])
self.assertEqual(2, downloaded_points[1][0])
self.assertAlmostEqual(n1, downloaded_points[0][1], 2)
self.assertAlmostEqual(n2, downloaded_points[1][1], 2)
def test_add_signal_points_by_name(self):
# with a signal
signal = Signal.objects.create(name='a_signal', local_computer=self.local_computer)
n1 = Signal.utc_to_millisec(datetime.datetime.now(tz=pytz.UTC))
n1 = Signal.utc_to_millisec(datetime.datetime.now(tz=pytz.UTC))
n2 = Signal.utc_to_millisec(datetime.datetime.now(tz=pytz.UTC) + datetime.timedelta(seconds=1))
json_data = [[1, n1], [2, n2]]
self.data_connection.add_signal_points_by_name(signal.name, json_data)
# should find persisted data
points = signal.get_data()
self.assertEqual(2, len(points))
self.assertEqual(1, points[0][0])
self.assertEqual(2, points[1][0])
self.assertAlmostEqual(n1, points[0][1], 2)
self.assertAlmostEqual(n2, points[1][1], 2)
# should download points by name
downloaded_points = self.data_connection.get_signal_points_by_name(signal.name)
self.assertEqual(2, len(downloaded_points))
self.assertEqual(1, downloaded_points[0][0])
self.assertEqual(2, downloaded_points[1][0])
self.assertAlmostEqual(n1, downloaded_points[0][1], 2)
self.assertAlmostEqual(n2, downloaded_points[1][1], 2)
def test_add_blob(self):
# with a blob and uploaded data
blob = Blob.objects.create(name='another blob', local_computer=self.local_computer)
data = range(10,20)
blob_data = json.dumps(data)
self.data_connection.set_blob_data(blob.id, blob_data)
# server should persist the data
self.assertIsNotNone(blob.get_data())
self.assertSequenceEqual(json.loads(blob.get_data()), data)
# should download data
download_data = json.loads(self.data_connection.get_blob_data(blob.id))
self.assertSequenceEqual(download_data, data)
def test_add_blob_by_name(self):
# with a blob and uploaded data
blob = Blob.objects.create(name='another blob', local_computer=self.local_computer)
data = range(10,20)
blob_data = json.dumps(data)
self.data_connection.set_blob_data_by_name(blob.name, blob_data)
# server should persist the data
self.assertIsNotNone(blob.get_data())
self.assertSequenceEqual(json.loads(blob.get_data()), data)
# should download data
download_data = json.loads(self.data_connection.get_blob_data_by_name(blob.name))
self.assertSequenceEqual(download_data, data)
def test_get_or_create_signal(self):
# when creating a signal
json_signal = self.data_connection.get_or_create_signal("a signal")
# and looking it up in the db.
created_signal = Signal.objects.get(name="a signal")
# should create a signal
self.assertIsNotNone(created_signal)
# with proper id
self.assertEqual(json_signal['id'], created_signal.id)
def test_get_or_create_blob(self):
self.data_connection.get_or_create_blob("a blob")
self.assertIsNotNone(Blob.objects.get(name="a blob"))
def test_get_or_create_setting(self):
gain = self.data_connection.get_or_create_setting("gain")
self.assertIsNotNone(Setting.objects.get(key="gain", local_computer_id=self.local_computer.id))
self.assertEqual(gain['value'], "")
def test_set_setting(self):
Setting.objects.create(key="gain", local_computer_id=self.local_computer.id, value=1.0)
gain = self.data_connection.get_or_create_setting("gain")
self.assertEqual(float(gain['value']), 1.0)
self.data_connection.write_setting("gain", 2.0)
self.assertEqual(Setting.objects.get(key="gain", local_computer_id=self.local_computer.id).value, "2.0")
gain = self.data_connection.get_or_create_setting("gain")
self.assertEqual(gain['value'], "2.0")
def test_create_event(self):
self.data_connection.create_event("a type", "some text")
self.assertIsNotNone(Event.objects.get(type="a type", info="some text", local_computer=self.local_computer))
def test_create_multiple_signals(self):
signal1 = self.data_connection.get_or_create_signal("signal1")
self.assertEqual(signal1['id'], Signal.objects.get(name="signal1", local_computer=self.local_computer).id)
signal2 = self.data_connection.get_or_create_signal("signal2")
self.assertEqual(signal2['id'], Signal.objects.get(name="signal2", local_computer=self.local_computer).id) | kietdlam/Dator | vm/tests/test_data_connection.py | Python | mit | 6,916 |
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from neutron.api.v2 import attributes
from neutron import context
from neutron.db.quota import driver as quota_driver
from neutron import manager
from neutron.pecan_wsgi.controllers import resource
from neutron.pecan_wsgi.hooks import policy_enforcement as pe
from neutron import policy
from neutron.tests.functional.pecan_wsgi import test_functional
class TestOwnershipHook(test_functional.PecanFunctionalTest):
def test_network_ownership_check(self):
net_response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}},
headers={'X-Project-Id': 'tenid'})
network_id = jsonutils.loads(net_response.body)['network']['id']
port_response = self.app.post_json(
'/v2.0/ports.json',
params={'port': {'network_id': network_id,
'admin_state_up': True}},
headers={'X-Project-Id': 'tenid'})
self.assertEqual(201, port_response.status_int)
class TestQuotaEnforcementHook(test_functional.PecanFunctionalTest):
def test_quota_enforcement_single(self):
ctx = context.get_admin_context()
quota_driver.DbQuotaDriver.update_quota_limit(
ctx, 'tenid', 'network', 1)
# There is enough headroom for creating a network
response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}},
headers={'X-Project-Id': 'tenid'})
self.assertEqual(response.status_int, 201)
# But a second request will fail
response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh-2'}},
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(response.status_int, 409)
def test_quota_enforcement_bulk_request(self):
ctx = context.get_admin_context()
quota_driver.DbQuotaDriver.update_quota_limit(
ctx, 'tenid', 'network', 3)
# There is enough headroom for a bulk request creating 2 networks
response = self.app.post_json(
'/v2.0/networks.json',
params={'networks': [
{'name': 'meh1'},
{'name': 'meh2'}]},
headers={'X-Project-Id': 'tenid'})
self.assertEqual(response.status_int, 201)
# But it won't be possible to create 2 more networks...
response = self.app.post_json(
'/v2.0/networks.json',
params={'networks': [
{'name': 'meh3'},
{'name': 'meh4'}]},
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(response.status_int, 409)
class TestPolicyEnforcementHook(test_functional.PecanFunctionalTest):
FAKE_RESOURCE = {
'mehs': {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True, 'primary_key': True},
'attr': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': ''},
'restricted_attr': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string':
attributes.TENANT_ID_MAX_LEN},
'is_visible': True}
}
}
def setUp(self):
# Create a controller for a fake resource. This will make the tests
# independent from the evolution of the API (so if one changes the API
# or the default policies there won't be any risk of breaking these
# tests, or at least I hope so)
super(TestPolicyEnforcementHook, self).setUp()
self.mock_plugin = mock.Mock()
attributes.RESOURCE_ATTRIBUTE_MAP.update(self.FAKE_RESOURCE)
attributes.PLURALS['mehs'] = 'meh'
manager.NeutronManager.set_plugin_for_resource('meh', self.mock_plugin)
fake_controller = resource.CollectionsController('mehs', 'meh')
manager.NeutronManager.set_controller_for_resource(
'mehs', fake_controller)
# Inject policies for the fake resource
policy.init()
policy._ENFORCER.set_rules(
oslo_policy.Rules.from_dict(
{'create_meh': '',
'update_meh': 'rule:admin_only',
'delete_meh': 'rule:admin_only',
'get_meh': 'rule:admin_only or field:mehs:id=xxx',
'get_meh:restricted_attr': 'rule:admin_only'}),
overwrite=False)
def test_before_on_create_authorized(self):
# Mock a return value for an hypothetical create operation
self.mock_plugin.create_meh.return_value = {
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
response = self.app.post_json('/v2.0/mehs.json',
params={'meh': {'attr': 'meh'}},
headers={'X-Project-Id': 'tenid'})
# We expect this operation to succeed
self.assertEqual(201, response.status_int)
self.assertEqual(0, self.mock_plugin.get_meh.call_count)
self.assertEqual(1, self.mock_plugin.create_meh.call_count)
def test_before_on_put_not_authorized(self):
# The policy hook here should load the resource, and therefore we must
# mock a get response
self.mock_plugin.get_meh.return_value = {
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
# The policy engine should trigger an exception in 'before', and the
# plugin method should not be called at all
response = self.app.put_json('/v2.0/mehs/xxx.json',
params={'meh': {'attr': 'meh'}},
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(403, response.status_int)
self.assertEqual(1, self.mock_plugin.get_meh.call_count)
self.assertEqual(0, self.mock_plugin.update_meh.call_count)
def test_before_on_delete_not_authorized(self):
# The policy hook here should load the resource, and therefore we must
# mock a get response
self.mock_plugin.delete_meh.return_value = None
self.mock_plugin.get_meh.return_value = {
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
# The policy engine should trigger an exception in 'before', and the
# plugin method should not be called
response = self.app.delete_json('/v2.0/mehs/xxx.json',
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(403, response.status_int)
self.assertEqual(1, self.mock_plugin.get_meh.call_count)
self.assertEqual(0, self.mock_plugin.delete_meh.call_count)
def test_after_on_get_not_authorized(self):
# The GET test policy will deny access to anything whose id is not
# 'xxx', so the following request should be forbidden
self.mock_plugin.get_meh.return_value = {
'id': 'yyy',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
# The policy engine should trigger an exception in 'after', and the
# plugin method should be called
response = self.app.get('/v2.0/mehs/yyy.json',
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(403, response.status_int)
self.assertEqual(1, self.mock_plugin.get_meh.call_count)
def test_after_on_get_excludes_admin_attribute(self):
self.mock_plugin.get_meh.return_value = {
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
response = self.app.get('/v2.0/mehs/xxx.json',
headers={'X-Project-Id': 'tenid'})
self.assertEqual(200, response.status_int)
json_response = jsonutils.loads(response.body)
self.assertNotIn('restricted_attr', json_response['meh'])
def test_after_on_list_excludes_admin_attribute(self):
self.mock_plugin.get_mehs.return_value = [{
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}]
response = self.app.get('/v2.0/mehs',
headers={'X-Project-Id': 'tenid'})
self.assertEqual(200, response.status_int)
json_response = jsonutils.loads(response.body)
self.assertNotIn('restricted_attr', json_response['mehs'][0])
class TestDHCPNotifierHook(test_functional.PecanFunctionalTest):
def setUp(self):
# the DHCP notifier needs to be mocked so that correct operations can
# be easily validated. For the purpose of this test it is indeed not
# necessary that the notification is actually received and processed by
# the agent
patcher = mock.patch('neutron.api.rpc.agentnotifiers.'
'dhcp_rpc_agent_api.DhcpAgentNotifyAPI.notify')
self.mock_notifier = patcher.start()
super(TestDHCPNotifierHook, self).setUp()
def test_dhcp_notifications_disabled(self):
cfg.CONF.set_override('dhcp_agent_notification', False)
self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}},
headers={'X-Project-Id': 'tenid'})
self.assertEqual(0, self.mock_notifier.call_count)
def test_get_does_not_trigger_notification(self):
self.do_request('/v2.0/networks', tenant_id='tenid')
self.assertEqual(0, self.mock_notifier.call_count)
def test_post_put_delete_triggers_notification(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}}, headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.assertEqual(1, self.mock_notifier.call_count)
self.assertEqual(mock.call(mock.ANY, json_body, 'network.create.end'),
self.mock_notifier.mock_calls[-1])
network_id = json_body['network']['id']
response = self.app.put_json(
'/v2.0/networks/%s.json' % network_id,
params={'network': {'name': 'meh-2'}},
headers=req_headers)
self.assertEqual(200, response.status_int)
json_body = jsonutils.loads(response.body)
self.assertEqual(2, self.mock_notifier.call_count)
self.assertEqual(mock.call(mock.ANY, json_body, 'network.update.end'),
self.mock_notifier.mock_calls[-1])
response = self.app.delete(
'/v2.0/networks/%s.json' % network_id, headers=req_headers)
self.assertEqual(204, response.status_int)
self.assertEqual(3, self.mock_notifier.call_count)
# No need to validate data content sent to the notifier as it's just
# going to load the object from the database
self.assertEqual(mock.call(mock.ANY, mock.ANY, 'network.delete.end'),
self.mock_notifier.mock_calls[-1])
def test_bulk_create_triggers_notifications(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
response = self.app.post_json(
'/v2.0/networks.json',
params={'networks': [{'name': 'meh_1'},
{'name': 'meh_2'}]},
headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
item_1 = json_body['networks'][0]
item_2 = json_body['networks'][1]
self.assertEqual(2, self.mock_notifier.call_count)
self.mock_notifier.assert_has_calls(
[mock.call(mock.ANY, {'network': item_1}, 'network.create.end'),
mock.call(mock.ANY, {'network': item_2}, 'network.create.end')])
class TestNovaNotifierHook(test_functional.PecanFunctionalTest):
def setUp(self):
patcher = mock.patch('neutron.pecan_wsgi.hooks.notifier.NotifierHook.'
'_nova_notify')
self.mock_notifier = patcher.start()
super(TestNovaNotifierHook, self).setUp()
def test_nova_notification_skips_on_failure(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
response = self.app.put_json(
'/v2.0/networks/%s.json' % uuidutils.generate_uuid(),
params={'network': {'name': 'meh-2'}},
headers=req_headers,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertFalse(self.mock_notifier.called)
def test_nova_notifications_disabled(self):
cfg.CONF.set_override('notify_nova_on_port_data_changes', False)
self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}},
headers={'X-Project-Id': 'tenid'})
self.assertFalse(self.mock_notifier.called)
def test_post_put_delete_triggers_notification(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}}, headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.mock_notifier.assert_called_once_with('create', 'network', {},
json_body)
self.mock_notifier.reset_mock()
network_id = json_body['network']['id']
# NOTE(kevinbenton): the original passed into the notifier does
# not contain all of the fields of the object. Only those required
# by the policy engine are included.
controller = manager.NeutronManager.get_controller_for_resource(
'networks')
orig = pe.fetch_resource(context.get_admin_context(), controller,
'network', network_id)
response = self.app.put_json(
'/v2.0/networks/%s.json' % network_id,
params={'network': {'name': 'meh-2'}},
headers=req_headers)
self.assertEqual(200, response.status_int)
json_body = jsonutils.loads(response.body)
self.mock_notifier.assert_called_once_with('update', 'network',
orig, json_body)
self.mock_notifier.reset_mock()
orig = pe.fetch_resource(context.get_admin_context(), controller,
'network', network_id)
response = self.app.delete(
'/v2.0/networks/%s.json' % network_id, headers=req_headers)
self.assertEqual(204, response.status_int)
# No need to validate data content sent to the notifier as it's just
# going to load the object from the database
self.mock_notifier.assert_called_once_with('delete', 'network', {},
{'network': orig})
def test_bulk_create_triggers_notifications(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
response = self.app.post_json(
'/v2.0/networks.json',
params={'networks': [{'name': 'meh_1'},
{'name': 'meh_2'}]},
headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
item_1 = json_body['networks'][0]
item_2 = json_body['networks'][1]
self.assertEqual(
[mock.call('create', 'network', {}, {'network': item_1}),
mock.call('create', 'network', {}, {'network': item_2})],
self.mock_notifier.mock_calls)
class TestMetricsNotifierHook(test_functional.PecanFunctionalTest):
def setUp(self):
patcher = mock.patch('neutron.pecan_wsgi.hooks.notifier.NotifierHook.'
'_notifier')
self.mock_notifier = patcher.start().info
super(TestMetricsNotifierHook, self).setUp()
def test_post_put_delete_triggers_notification(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'network': {'name': 'meh'}}
response = self.app.post_json(
'/v2.0/networks.json',
params=payload, headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.assertEqual(
[mock.call(mock.ANY, 'network.create.start', payload),
mock.call(mock.ANY, 'network.create.end', json_body)],
self.mock_notifier.mock_calls)
self.mock_notifier.reset_mock()
network_id = json_body['network']['id']
payload = {'network': {'name': 'meh-2'}}
response = self.app.put_json(
'/v2.0/networks/%s.json' % network_id,
params=payload, headers=req_headers)
self.assertEqual(200, response.status_int)
json_body = jsonutils.loads(response.body)
# id should be in payload sent to notifier
payload['id'] = network_id
self.assertEqual(
[mock.call(mock.ANY, 'network.update.start', payload),
mock.call(mock.ANY, 'network.update.end', json_body)],
self.mock_notifier.mock_calls)
self.mock_notifier.reset_mock()
response = self.app.delete(
'/v2.0/networks/%s.json' % network_id, headers=req_headers)
self.assertEqual(204, response.status_int)
payload = {'network_id': network_id}
self.assertEqual(
[mock.call(mock.ANY, 'network.delete.start', payload),
mock.call(mock.ANY, 'network.delete.end', payload)],
self.mock_notifier.mock_calls)
def test_bulk_create_triggers_notification(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'networks': [{'name': 'meh_1'}, {'name': 'meh_2'}]}
response = self.app.post_json(
'/v2.0/networks.json',
params=payload,
headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.assertEqual(2, self.mock_notifier.call_count)
self.mock_notifier.assert_has_calls(
[mock.call(mock.ANY, 'network.create.start', payload),
mock.call(mock.ANY, 'network.create.end', json_body)])
def test_bad_create_doesnt_emit_end(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'network': {'name': 'meh'}}
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, 'create_network',
side_effect=ValueError):
response = self.app.post_json(
'/v2.0/networks.json',
params=payload, headers=req_headers,
expect_errors=True)
self.assertEqual(500, response.status_int)
self.assertEqual(
[mock.call(mock.ANY, 'network.create.start', mock.ANY)],
self.mock_notifier.mock_calls)
def test_bad_update_doesnt_emit_end(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'network': {'name': 'meh'}}
response = self.app.post_json(
'/v2.0/networks.json',
params=payload, headers=req_headers,
expect_errors=True)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.mock_notifier.reset_mock()
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, 'update_network',
side_effect=ValueError):
response = self.app.put_json(
'/v2.0/networks/%s.json' % json_body['network']['id'],
params=payload, headers=req_headers,
expect_errors=True)
self.assertEqual(500, response.status_int)
self.assertEqual(
[mock.call(mock.ANY, 'network.update.start', mock.ANY)],
self.mock_notifier.mock_calls)
def test_bad_delete_doesnt_emit_end(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'network': {'name': 'meh'}}
response = self.app.post_json(
'/v2.0/networks.json',
params=payload, headers=req_headers,
expect_errors=True)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.mock_notifier.reset_mock()
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, 'delete_network',
side_effect=ValueError):
response = self.app.delete(
'/v2.0/networks/%s.json' % json_body['network']['id'],
headers=req_headers, expect_errors=True)
self.assertEqual(500, response.status_int)
self.assertEqual(
[mock.call(mock.ANY, 'network.delete.start', mock.ANY)],
self.mock_notifier.mock_calls)
| igor-toga/local-snat | neutron/tests/functional/pecan_wsgi/test_hooks.py | Python | apache-2.0 | 22,555 |
import re
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.contrib.sites.models import Site
from django.db.models import Q
from django.db.models.query import EmptyQuerySet
from django.template import RequestContext
from django.test.client import RequestFactory
from django.utils.encoding import force_unicode
from django.utils.translation import get_language, activate
def _strip_tags(value):
"""
Returns the given HTML with all tags stripped.
This is a copy of django.utils.html.strip_tags, except that it adds some
whitespace in between replaced tags to make sure words are not erroneously
concatenated.
"""
return re.sub(r'<[^>]*?>', ' ', force_unicode(value))
try:
import importlib
except ImportError:
from django.utils import importlib
from haystack import indexes, site
from cms.models.pluginmodel import CMSPlugin
from cms_search import models as proxy_models
from cms_search import settings as search_settings
def _get_index_base():
index_string = search_settings.INDEX_BASE_CLASS
module, class_name = index_string.rsplit('.', 1)
mod = importlib.import_module(module)
base_class = getattr(mod, class_name, None)
if not base_class:
raise ImproperlyConfigured('CMS_SEARCH_INDEX_BASE_CLASS: module %s has no class %s' % (module, class_name))
if not issubclass(base_class, indexes.SearchIndex):
raise ImproperlyConfigured('CMS_SEARCH_INDEX_BASE_CLASS: %s is not a subclass of haystack.indexes.SearchIndex' % search_settings.INDEX_BASE_CLASS)
return base_class
rf = RequestFactory()
def page_index_factory(language_code):
class _PageIndex(_get_index_base()):
_language = language_code
language = indexes.CharField()
text = indexes.CharField(document=True, use_template=False)
pub_date = indexes.DateTimeField(model_attr='publication_date', null=True)
login_required = indexes.BooleanField(model_attr='login_required')
url = indexes.CharField(stored=True, indexed=False, model_attr='get_absolute_url')
title = indexes.CharField(stored=True, indexed=False, model_attr='get_title')
site_id = indexes.IntegerField(stored=True, indexed=True, model_attr='site_id')
def prepare(self, obj):
current_languge = get_language()
try:
if current_languge != self._language:
activate(self._language)
request = rf.get("/")
request.session = {}
request.LANGUAGE_CODE = self._language
self.prepared_data = super(_PageIndex, self).prepare(obj)
plugins = CMSPlugin.objects.filter(language=language_code, placeholder__in=obj.placeholders.all())
text = u''
for base_plugin in plugins:
instance, plugin_type = base_plugin.get_plugin_instance()
if instance is None:
# this is an empty plugin
continue
if hasattr(instance, 'search_fields'):
text += u' '.join(force_unicode(_strip_tags(getattr(instance, field, ''))) for field in instance.search_fields)
if getattr(instance, 'search_fulltext', False) or getattr(plugin_type, 'search_fulltext', False):
text += _strip_tags(instance.render_plugin(context=RequestContext(request))) + u' '
text += obj.get_meta_description() or u''
text += u' '
text += obj.get_meta_keywords() or u''
self.prepared_data['text'] = text
self.prepared_data['language'] = self._language
return self.prepared_data
finally:
if get_language() != current_languge:
activate(current_languge)
def index_queryset(self):
# get the correct language and exclude pages that have a redirect
base_qs = super(_PageIndex, self).index_queryset()
result_qs = EmptyQuerySet()
for site_obj in Site.objects.all():
qs = base_qs.published(site=site_obj.id).filter(
Q(title_set__language=language_code) & (Q(title_set__redirect__exact='') | Q(title_set__redirect__isnull=True)))
if 'publisher' in settings.INSTALLED_APPS:
qs = qs.filter(publisher_is_draft=True)
qs = qs.distinct()
result_qs |= qs
return result_qs
return _PageIndex
for language_code, language_name in settings.LANGUAGES:
proxy_model = getattr(proxy_models, proxy_models.proxy_name(language_code))
index = page_index_factory(language_code)
if proxy_model:
site.register(proxy_model, index)
else:
print "no page proxy model found for language %s" % language_code
| piquadrat/django-cms-search | cms_search/search_indexes.py | Python | bsd-3-clause | 4,930 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
#
# Convert locations to and from short codes.
#
# Open Location Codes are short, 10-11 character codes that can be used instead
# of street addresses. The codes can be generated and decoded offline, and use
# a reduced character set that minimises the chance of codes including words.
#
# Codes are able to be shortened relative to a nearby location. This means that
# in many cases, only four to seven characters of the code are needed.
# To recover the original code, the same location is not required, as long as
# a nearby location is provided.
#
# Codes represent rectangular areas rather than points, and the longer the
# code, the smaller the area. A 10 character code represents a 13.5x13.5
# meter area (at the equator. An 11 character code represents approximately
# a 2.8x3.5 meter area.
#
# Two encoding algorithms are used. The first 10 characters are pairs of
# characters, one for latitude and one for longitude, using base 20. Each pair
# reduces the area of the code by a factor of 400. Only even code lengths are
# sensible, since an odd-numbered length would have sides in a ratio of 20:1.
#
# At position 11, the algorithm changes so that each character selects one
# position from a 4x5 grid. This allows single-character refinements.
#
# Examples:
#
# Encode a location, default accuracy:
# encode(47.365590, 8.524997)
#
# Encode a location using one stage of additional refinement:
# encode(47.365590, 8.524997, 11)
#
# Decode a full code:
# coord = decode(code)
# msg = "Center is {lat}, {lon}".format(lat=coord.latitudeCenter, lon=coord.longitudeCenter)
#
# Attempt to trim the first characters from a code:
# shorten('8FVC9G8F+6X', 47.5, 8.5)
#
# Recover the full code from a short code:
# recoverNearest('9G8F+6X', 47.4, 8.6)
# recoverNearest('8F+6X', 47.4, 8.6)
import re
import math
# A separator used to break the code into two parts to aid memorability.
SEPARATOR_ = '+'
# The number of characters to place before the separator.
SEPARATOR_POSITION_ = 8
# The character used to pad codes.
PADDING_CHARACTER_ = '0'
# The character set used to encode the values.
CODE_ALPHABET_ = '23456789CFGHJMPQRVWX'
# The base to use to convert numbers to/from.
ENCODING_BASE_ = len(CODE_ALPHABET_)
# The maximum value for latitude in degrees.
LATITUDE_MAX_ = 90
# The maximum value for longitude in degrees.
LONGITUDE_MAX_ = 180
# The max number of digits to process in a plus code.
MAX_DIGIT_COUNT_ = 15
# Maximum code length using lat/lng pair encoding. The area of such a
# code is approximately 13x13 meters (at the equator), and should be suitable
# for identifying buildings. This excludes prefix and separator characters.
PAIR_CODE_LENGTH_ = 10
# First place value of the pairs (if the last pair value is 1).
PAIR_FIRST_PLACE_VALUE_ = ENCODING_BASE_**(PAIR_CODE_LENGTH_ / 2 - 1)
# Inverse of the precision of the pair section of the code.
PAIR_PRECISION_ = ENCODING_BASE_**3
# The resolution values in degrees for each position in the lat/lng pair
# encoding. These give the place value of each position, and therefore the
# dimensions of the resulting area.
PAIR_RESOLUTIONS_ = [20.0, 1.0, .05, .0025, .000125]
# Number of digits in the grid precision part of the code.
GRID_CODE_LENGTH_ = MAX_DIGIT_COUNT_ - PAIR_CODE_LENGTH_
# Number of columns in the grid refinement method.
GRID_COLUMNS_ = 4
# Number of rows in the grid refinement method.
GRID_ROWS_ = 5
# First place value of the latitude grid (if the last place is 1).
GRID_LAT_FIRST_PLACE_VALUE_ = GRID_ROWS_**(GRID_CODE_LENGTH_ - 1)
# First place value of the longitude grid (if the last place is 1).
GRID_LNG_FIRST_PLACE_VALUE_ = GRID_COLUMNS_**(GRID_CODE_LENGTH_ - 1)
# Multiply latitude by this much to make it a multiple of the finest
# precision.
FINAL_LAT_PRECISION_ = PAIR_PRECISION_ * GRID_ROWS_**(MAX_DIGIT_COUNT_ -
PAIR_CODE_LENGTH_)
# Multiply longitude by this much to make it a multiple of the finest
# precision.
FINAL_LNG_PRECISION_ = PAIR_PRECISION_ * GRID_COLUMNS_**(MAX_DIGIT_COUNT_ -
PAIR_CODE_LENGTH_)
# Minimum length of a code that can be shortened.
MIN_TRIMMABLE_CODE_LEN_ = 6
GRID_SIZE_DEGREES_ = 0.000125
"""
Determines if a code is valid.
To be valid, all characters must be from the Open Location Code character
set with at most one separator. The separator can be in any even-numbered
position up to the eighth digit.
"""
def isValid(code):
# The separator is required.
sep = code.find(SEPARATOR_)
if code.count(SEPARATOR_) > 1:
return False
# Is it the only character?
if len(code) == 1:
return False
# Is it in an illegal position?
if sep == -1 or sep > SEPARATOR_POSITION_ or sep % 2 == 1:
return False
# We can have an even number of padding characters before the separator,
# but then it must be the final character.
pad = code.find(PADDING_CHARACTER_)
if pad != -1:
# Short codes cannot have padding
if sep < SEPARATOR_POSITION_:
return False
# Not allowed to start with them!
if pad == 0:
return False
# There can only be one group and it must have even length.
rpad = code.rfind(PADDING_CHARACTER_) + 1
pads = code[pad:rpad]
if len(pads) % 2 == 1 or pads.count(PADDING_CHARACTER_) != len(pads):
return False
# If the code is long enough to end with a separator, make sure it does.
if not code.endswith(SEPARATOR_):
return False
# If there are characters after the separator, make sure there isn't just
# one of them (not legal).
if len(code) - sep - 1 == 1:
return False
# Check the code contains only valid characters.
sepPad = SEPARATOR_ + PADDING_CHARACTER_
for ch in code:
if ch.upper() not in CODE_ALPHABET_ and ch not in sepPad:
return False
return True
"""
Determines if a code is a valid short code.
A short Open Location Code is a sequence created by removing four or more
digits from an Open Location Code. It must include a separator
character.
"""
def isShort(code):
# Check it's valid.
if not isValid(code):
return False
# If there are less characters than expected before the SEPARATOR.
sep = code.find(SEPARATOR_)
if sep >= 0 and sep < SEPARATOR_POSITION_:
return True
return False
"""
Determines if a code is a valid full Open Location Code.
Not all possible combinations of Open Location Code characters decode to
valid latitude and longitude values. This checks that a code is valid
and also that the latitude and longitude values are legal. If the prefix
character is present, it must be the first character. If the separator
character is present, it must be after four characters.
"""
def isFull(code):
if not isValid(code):
return False
# If it's short, it's not full
if isShort(code):
return False
# Work out what the first latitude character indicates for latitude.
firstLatValue = CODE_ALPHABET_.find(code[0].upper()) * ENCODING_BASE_
if firstLatValue >= LATITUDE_MAX_ * 2:
# The code would decode to a latitude of >= 90 degrees.
return False
if len(code) > 1:
# Work out what the first longitude character indicates for longitude.
firstLngValue = CODE_ALPHABET_.find(code[1].upper()) * ENCODING_BASE_
if firstLngValue >= LONGITUDE_MAX_ * 2:
# The code would decode to a longitude of >= 180 degrees.
return False
return True
"""
Encode a location into an Open Location Code.
Produces a code of the specified length, or the default length if no length
is provided.
The length determines the accuracy of the code. The default length is
10 characters, returning a code of approximately 13.5x13.5 meters. Longer
codes represent smaller areas, but lengths > 14 are sub-centimetre and so
11 or 12 are probably the limit of useful codes.
Args:
latitude: A latitude in signed decimal degrees. Will be clipped to the
range -90 to 90.
longitude: A longitude in signed decimal degrees. Will be normalised to
the range -180 to 180.
codeLength: The number of significant digits in the output code, not
including any separator characters.
"""
def encode(latitude, longitude, codeLength=PAIR_CODE_LENGTH_):
if codeLength < 2 or (codeLength < PAIR_CODE_LENGTH_ and
codeLength % 2 == 1):
raise ValueError('Invalid Open Location Code length - ' +
str(codeLength))
codeLength = min(codeLength, MAX_DIGIT_COUNT_)
# Ensure that latitude and longitude are valid.
latitude = clipLatitude(latitude)
longitude = normalizeLongitude(longitude)
# Latitude 90 needs to be adjusted to be just less, so the returned code
# can also be decoded.
if latitude == 90:
latitude = latitude - computeLatitudePrecision(codeLength)
code = ''
# Compute the code.
# This approach converts each value to an integer after multiplying it by
# the final precision. This allows us to use only integer operations, so
# avoiding any accumulation of floating point representation errors.
# Multiply values by their precision and convert to positive.
# Force to integers so the division operations will have integer results.
# Note: Python requires rounding before truncating to ensure precision!
latVal = int(round((latitude + LATITUDE_MAX_) * FINAL_LAT_PRECISION_, 6))
lngVal = int(round((longitude + LONGITUDE_MAX_) * FINAL_LNG_PRECISION_, 6))
# Compute the grid part of the code if necessary.
if codeLength > PAIR_CODE_LENGTH_:
for i in range(0, MAX_DIGIT_COUNT_ - PAIR_CODE_LENGTH_):
latDigit = latVal % GRID_ROWS_
lngDigit = lngVal % GRID_COLUMNS_
ndx = latDigit * GRID_COLUMNS_ + lngDigit
code = CODE_ALPHABET_[ndx] + code
latVal //= GRID_ROWS_
lngVal //= GRID_COLUMNS_
else:
latVal //= pow(GRID_ROWS_, GRID_CODE_LENGTH_)
lngVal //= pow(GRID_COLUMNS_, GRID_CODE_LENGTH_)
# Compute the pair section of the code.
for i in range(0, PAIR_CODE_LENGTH_ // 2):
code = CODE_ALPHABET_[lngVal % ENCODING_BASE_] + code
code = CODE_ALPHABET_[latVal % ENCODING_BASE_] + code
latVal //= ENCODING_BASE_
lngVal //= ENCODING_BASE_
# Add the separator character.
code = code[:SEPARATOR_POSITION_] + SEPARATOR_ + code[SEPARATOR_POSITION_:]
# If we don't need to pad the code, return the requested section.
if codeLength >= SEPARATOR_POSITION_:
return code[0:codeLength + 1]
# Pad and return the code.
return code[0:codeLength] + ''.zfill(SEPARATOR_POSITION_ -
codeLength) + SEPARATOR_
"""
Decodes an Open Location Code into the location coordinates.
Returns a CodeArea object that includes the coordinates of the bounding
box - the lower left, center and upper right.
Args:
code: The Open Location Code to decode.
Returns:
A CodeArea object that provides the latitude and longitude of two of the
corners of the area, the center, and the length of the original code.
"""
def decode(code):
if not isFull(code):
raise ValueError(
'Passed Open Location Code is not a valid full code - ' + str(code))
# Strip out separator character (we've already established the code is
# valid so the maximum is one), and padding characters. Convert to upper
# case and constrain to the maximum number of digits.
code = re.sub('[+0]', '', code)
code = code.upper()
code = code[:MAX_DIGIT_COUNT_]
# Initialise the values for each section. We work them out as integers and
# convert them to floats at the end.
normalLat = -LATITUDE_MAX_ * PAIR_PRECISION_
normalLng = -LONGITUDE_MAX_ * PAIR_PRECISION_
gridLat = 0
gridLng = 0
# How many digits do we have to process?
digits = min(len(code), PAIR_CODE_LENGTH_)
# Define the place value for the most significant pair.
pv = PAIR_FIRST_PLACE_VALUE_
# Decode the paired digits.
for i in range(0, digits, 2):
normalLat += CODE_ALPHABET_.find(code[i]) * pv
normalLng += CODE_ALPHABET_.find(code[i + 1]) * pv
if i < digits - 2:
pv //= ENCODING_BASE_
# Convert the place value to a float in degrees.
latPrecision = float(pv) / PAIR_PRECISION_
lngPrecision = float(pv) / PAIR_PRECISION_
# Process any extra precision digits.
if len(code) > PAIR_CODE_LENGTH_:
# Initialise the place values for the grid.
rowpv = GRID_LAT_FIRST_PLACE_VALUE_
colpv = GRID_LNG_FIRST_PLACE_VALUE_
# How many digits do we have to process?
digits = min(len(code), MAX_DIGIT_COUNT_)
for i in range(PAIR_CODE_LENGTH_, digits):
digitVal = CODE_ALPHABET_.find(code[i])
row = digitVal // GRID_COLUMNS_
col = digitVal % GRID_COLUMNS_
gridLat += row * rowpv
gridLng += col * colpv
if i < digits - 1:
rowpv //= GRID_ROWS_
colpv //= GRID_COLUMNS_
# Adjust the precisions from the integer values to degrees.
latPrecision = float(rowpv) / FINAL_LAT_PRECISION_
lngPrecision = float(colpv) / FINAL_LNG_PRECISION_
# Merge the values from the normal and extra precision parts of the code.
lat = float(normalLat) / PAIR_PRECISION_ + float(
gridLat) / FINAL_LAT_PRECISION_
lng = float(normalLng) / PAIR_PRECISION_ + float(
gridLng) / FINAL_LNG_PRECISION_
# Multiple values by 1e14, round and then divide. This reduces errors due
# to floating point precision.
return CodeArea(round(lat, 14), round(lng,
14), round(lat + latPrecision, 14),
round(lng + lngPrecision, 14),
min(len(code), MAX_DIGIT_COUNT_))
"""
Recover the nearest matching code to a specified location.
Given a short Open Location Code of between four and seven characters,
this recovers the nearest matching full code to the specified location.
The number of characters that will be prepended to the short code, depends
on the length of the short code and whether it starts with the separator.
If it starts with the separator, four characters will be prepended. If it
does not, the characters that will be prepended to the short code, where S
is the supplied short code and R are the computed characters, are as
follows:
SSSS -> RRRR.RRSSSS
SSSSS -> RRRR.RRSSSSS
SSSSSS -> RRRR.SSSSSS
SSSSSSS -> RRRR.SSSSSSS
Note that short codes with an odd number of characters will have their
last character decoded using the grid refinement algorithm.
Args:
code: A valid OLC character sequence.
referenceLatitude: The latitude (in signed decimal degrees) to use to
find the nearest matching full code.
referenceLongitude: The longitude (in signed decimal degrees) to use
to find the nearest matching full code.
Returns:
The nearest full Open Location Code to the reference location that matches
the short code. If the passed code was not a valid short code, but was a
valid full code, it is returned with proper capitalization but otherwise
unchanged.
"""
def recoverNearest(code, referenceLatitude, referenceLongitude):
# if code is a valid full code, return it properly capitalized
if isFull(code):
return code.upper()
if not isShort(code):
raise ValueError('Passed short code is not valid - ' + str(code))
# Ensure that latitude and longitude are valid.
referenceLatitude = clipLatitude(referenceLatitude)
referenceLongitude = normalizeLongitude(referenceLongitude)
# Clean up the passed code.
code = code.upper()
# Compute the number of digits we need to recover.
paddingLength = SEPARATOR_POSITION_ - code.find(SEPARATOR_)
# The resolution (height and width) of the padded area in degrees.
resolution = pow(20, 2 - (paddingLength / 2))
# Distance from the center to an edge (in degrees).
halfResolution = resolution / 2.0
# Use the reference location to pad the supplied short code and decode it.
codeArea = decode(
encode(referenceLatitude, referenceLongitude)[0:paddingLength] + code)
# How many degrees latitude is the code from the reference? If it is more
# than half the resolution, we need to move it north or south but keep it
# within -90 to 90 degrees.
if (referenceLatitude + halfResolution < codeArea.latitudeCenter and
codeArea.latitudeCenter - resolution >= -LATITUDE_MAX_):
# If the proposed code is more than half a cell north of the reference location,
# it's too far, and the best match will be one cell south.
codeArea.latitudeCenter -= resolution
elif (referenceLatitude - halfResolution > codeArea.latitudeCenter and
codeArea.latitudeCenter + resolution <= LATITUDE_MAX_):
# If the proposed code is more than half a cell south of the reference location,
# it's too far, and the best match will be one cell north.
codeArea.latitudeCenter += resolution
# Adjust longitude if necessary.
if referenceLongitude + halfResolution < codeArea.longitudeCenter:
codeArea.longitudeCenter -= resolution
elif referenceLongitude - halfResolution > codeArea.longitudeCenter:
codeArea.longitudeCenter += resolution
return encode(codeArea.latitudeCenter, codeArea.longitudeCenter,
codeArea.codeLength)
"""
Remove characters from the start of an OLC code.
This uses a reference location to determine how many initial characters
can be removed from the OLC code. The number of characters that can be
removed depends on the distance between the code center and the reference
location.
The minimum number of characters that will be removed is four. If more than
four characters can be removed, the additional characters will be replaced
with the padding character. At most eight characters will be removed.
The reference location must be within 50% of the maximum range. This ensures
that the shortened code will be able to be recovered using slightly different
locations.
Args:
code: A full, valid code to shorten.
latitude: A latitude, in signed decimal degrees, to use as the reference
point.
longitude: A longitude, in signed decimal degrees, to use as the reference
point.
Returns:
Either the original code, if the reference location was not close enough,
or the .
"""
def shorten(code, latitude, longitude):
if not isFull(code):
raise ValueError('Passed code is not valid and full: ' + str(code))
if code.find(PADDING_CHARACTER_) != -1:
raise ValueError('Cannot shorten padded codes: ' + str(code))
code = code.upper()
codeArea = decode(code)
if codeArea.codeLength < MIN_TRIMMABLE_CODE_LEN_:
raise ValueError('Code length must be at least ' +
MIN_TRIMMABLE_CODE_LEN_)
# Ensure that latitude and longitude are valid.
latitude = clipLatitude(latitude)
longitude = normalizeLongitude(longitude)
# How close are the latitude and longitude to the code center.
coderange = max(abs(codeArea.latitudeCenter - latitude),
abs(codeArea.longitudeCenter - longitude))
for i in range(len(PAIR_RESOLUTIONS_) - 2, 0, -1):
# Check if we're close enough to shorten. The range must be less than 1/2
# the resolution to shorten at all, and we want to allow some safety, so
# use 0.3 instead of 0.5 as a multiplier.
if coderange < (PAIR_RESOLUTIONS_[i] * 0.3):
# Trim it.
return code[(i + 1) * 2:]
return code
"""
Clip a latitude into the range -90 to 90.
Args:
latitude: A latitude in signed decimal degrees.
"""
def clipLatitude(latitude):
return min(90, max(-90, latitude))
"""
Compute the latitude precision value for a given code length. Lengths <=
10 have the same precision for latitude and longitude, but lengths > 10
have different precisions due to the grid method having fewer columns than
rows.
"""
def computeLatitudePrecision(codeLength):
if codeLength <= 10:
return pow(20, math.floor((codeLength / -2) + 2))
return pow(20, -3) / pow(GRID_ROWS_, codeLength - 10)
"""
Normalize a longitude into the range -180 to 180, not including 180.
Args:
longitude: A longitude in signed decimal degrees.
"""
def normalizeLongitude(longitude):
while longitude < -180:
longitude = longitude + 360
while longitude >= 180:
longitude = longitude - 360
return longitude
"""
Coordinates of a decoded Open Location Code.
The coordinates include the latitude and longitude of the lower left and
upper right corners and the center of the bounding box for the area the
code represents.
Attributes:
latitude_lo: The latitude of the SW corner in degrees.
longitude_lo: The longitude of the SW corner in degrees.
latitude_hi: The latitude of the NE corner in degrees.
longitude_hi: The longitude of the NE corner in degrees.
latitude_center: The latitude of the center in degrees.
longitude_center: The longitude of the center in degrees.
code_length: The number of significant characters that were in the code.
This excludes the separator.
"""
class CodeArea(object):
def __init__(self, latitudeLo, longitudeLo, latitudeHi, longitudeHi,
codeLength):
self.latitudeLo = latitudeLo
self.longitudeLo = longitudeLo
self.latitudeHi = latitudeHi
self.longitudeHi = longitudeHi
self.codeLength = codeLength
self.latitudeCenter = min(latitudeLo + (latitudeHi - latitudeLo) / 2,
LATITUDE_MAX_)
self.longitudeCenter = min(
longitudeLo + (longitudeHi - longitudeLo) / 2, LONGITUDE_MAX_)
def __repr__(self):
return str([
self.latitudeLo, self.longitudeLo, self.latitudeHi,
self.longitudeHi, self.latitudeCenter, self.longitudeCenter,
self.codeLength
])
def latlng(self):
return [self.latitudeCenter, self.longitudeCenter]
| zongweil/open-location-code | python/openlocationcode/openlocationcode.py | Python | apache-2.0 | 23,113 |
import time
import dataLoader
from itertools import combinations
positions = dataLoader.loadData("CrowdsourcingResults.csv")
dataLoader.printPositions(positions)
print ""
print ""
bold = lambda val: ("*" + str(val) + "*")
def getHighestKey(positions, pos, key, usedPlayers=[]):
bestPlayer = None
def doBest(pos, bestPlayer=None):
for player in positions[pos]:
if player in usedPlayers:
continue
elif bestPlayer == None:
bestPlayer = player
else:
if float(player[key]) > float(bestPlayer[key]):
#print bestPlayer["Player"], "->", player["Player"]
bestPlayer = player
return bestPlayer
if pos == "DH":
# Any player can be a DH
for position in positions:
if position != "P":
bestPlayer = doBest(position, bestPlayer)
else:
bestPlayer = doBest(pos)
return bestPlayer
dataLoader.printFields(["Positions", "Player", "Exp. 2016 fWAR", "Exp. Salary"])
dataLoader.printSeparator(4)
totalWar = 0
totalSalary = 0
usedPlayers = []
positionOrder = ["CF", "LF", "RF", "1B", "2B", "3B", "SS", "C", "P", "P", "P", "P", "P"]
for pos in positionOrder:
bestPlayer = getHighestKey(positions, pos, "Exp. 2016 fWAR", usedPlayers)
dataLoader.printFields([pos, bestPlayer["Player"], bestPlayer["Exp. 2016 fWAR"], bestPlayer["Expected 2016 AAV"]])
if bestPlayer != None:
usedPlayers += [ bestPlayer ]
totalWar += float(bestPlayer["Exp. 2016 fWAR"])
totalSalary += float(bestPlayer["Expected 2016 AAV"])
dataLoader.printFields([bold("Total"), "", bold(totalWar), bold(totalSalary)])
print ""
print ""
dataLoader.printFields(["Positions", "Player", "Exp. 2016 fWAR", "Exp. Salary", "Exp. Wins/$"])
dataLoader.printSeparator(5)
totalWar = 0
totalSalary = 0
usedPlayers = []
for pos in positionOrder:
bestPlayer = getHighestKey(positions, pos, "Expected Wins/$", usedPlayers)
dataLoader.printFields([pos, bestPlayer["Player"], bestPlayer["Exp. 2016 fWAR"],
bestPlayer["Expected 2016 AAV"], bestPlayer["Expected Wins/$"]])
if bestPlayer != None:
usedPlayers += [ bestPlayer ]
totalWar += float(bestPlayer["Exp. 2016 fWAR"])
totalSalary += float(bestPlayer["Expected 2016 AAV"])
dataLoader.printFields([bold("Total"), "", bold(totalWar), bold(totalSalary), bold(totalWar / totalSalary)])
print ""
print "" | ktarrant/freeAgents | freeAgents.py | Python | mit | 2,258 |
import unittest
from app import smartdb, model
from model import MachineCurrentState, MachineInterface, Machine, MachineStates
from repositories import machine_state_repo
class TestMachineStateRepo(unittest.TestCase):
@classmethod
def setUpClass(cls):
db_uri = 'sqlite:///:memory:'
cls.smart = smartdb.SmartDatabaseClient(db_uri)
def setUp(self):
model.BASE.metadata.drop_all(self.smart.get_engine_connection())
model.BASE.metadata.create_all(self.smart.get_engine_connection())
def test_no_machine_no_state(self):
mac = "00:00:00:00:00:00"
state = MachineStates.booting
msr = machine_state_repo.MachineStateRepository(self.smart)
msr.update(mac, state)
with self.smart.new_session() as session:
res = session.query(MachineCurrentState).filter(MachineCurrentState.machine_mac == mac).first()
self.assertEqual(mac, res.machine_mac)
self.assertEqual(state, res.state_name)
self.assertEqual(None, res.machine_id)
def test_machine_exists_no_state(self):
mac = "00:00:00:00:00:00"
state = MachineStates.booting
with self.smart.new_session() as session:
uuid = "b7f5f93a-b029-475f-b3a4-479ba198cb8a"
machine = Machine(uuid=uuid)
session.add(machine)
machine_id = session.query(Machine).filter(Machine.uuid == uuid).first().id
session.add(
MachineInterface(machine_id=machine_id, mac=mac, netmask=1, ipv4="10.10.10.10", cidrv4="127.0.0.1/8",
as_boot=True, gateway="1.1.1.1", name="lol"))
session.commit()
msr = machine_state_repo.MachineStateRepository(self.smart)
msr.update(mac, state)
with self.smart.new_session() as session:
res = session.query(MachineCurrentState).filter(MachineCurrentState.machine_mac == mac).first()
self.assertEqual(mac, res.machine_mac)
self.assertEqual(state, res.state_name)
self.assertEqual(machine_id, res.machine_id)
def test_machine_exists_state_exists(self):
mac = "00:00:00:00:00:00"
state = MachineStates.booting
msr = machine_state_repo.MachineStateRepository(self.smart)
with self.smart.new_session() as session:
uuid = "b7f5f93a-b029-475f-b3a4-479ba198cb8a"
machine = Machine(uuid=uuid)
session.add(machine)
machine_id = session.query(Machine).filter(Machine.uuid == uuid).first().id
session.add(
MachineInterface(machine_id=machine_id, mac=mac, netmask=1, ipv4="10.10.10.10", cidrv4="127.0.0.1/8",
as_boot=True, gateway="1.1.1.1", name="lol"))
session.commit()
msr.update(mac, state)
new_state = MachineStates.discovery
msr.update(mac, new_state)
with self.smart.new_session() as session:
res = session.query(MachineCurrentState).filter(MachineCurrentState.machine_mac == mac).first()
self.assertEqual(mac, res.machine_mac)
self.assertEqual(new_state, res.state_name)
self.assertEqual(machine_id, res.machine_id)
ret = msr.fetch(10)
self.assertEqual([{
"fqdn": None,
"mac": mac,
"state": new_state,
"date": res.updated_date
}], ret)
| kirek007/enjoliver | app/tests/unit/test_machine_state_repo.py | Python | mit | 3,449 |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 05 09:16:37 2016
Code adapted from http://stackoverflow.com/a/26695514
@author: Benben
"""
import time
# Generator that returns time differences
def TicTocGenerator():
initaltime = 0
finaltime = time.time()
while True:
initaltime = finaltime
finaltime = time.time()
yield finaltime-initaltime # Time difference
TicToc = TicTocGenerator() # create an instance of the TicTocGen generator
def toc(start=True):
# Prints the time difference yielded by generator instance TicToc
timeDiff = next(TicToc)
if start:
print( "Elapsed time: %f seconds.\n" %timeDiff )
def tic():
# Records a time in TicToc, marks the beginning of a time interval
toc(False) | chngchinboon/intercomstats | scripts/tictocgen.py | Python | mit | 802 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-05-09 21:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=1000)),
('subject', models.CharField(max_length=100)),
('date', models.DateTimeField()),
('viewed', models.BooleanField(default=False)),
('recipient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='recipient', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sender', to=settings.AUTH_USER_MODEL)),
],
),
]
| jimga150/HealthNet | HealthNet/messaging/migrations/0001_initial.py | Python | mit | 1,156 |
# -*- coding: utf-8 -*-
import time
import wda
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from PIL import Image
# 截图距离 * time_coefficient = 按键时长
# time_coefficient:
# iphonex: 0.00125
# iphone6: 0.00196
# iphone6s plus: 0.00120
time_coefficient = 0.00120
c = wda.Client()
s = c.session()
def pull_screenshot():
c.screenshot('autojump.png')
def jump(distance):
press_time = distance * time_coefficient
press_time = press_time
print('press_time = ',press_time)
s.tap_hold(200, 200, press_time)
fig = plt.figure()
pull_screenshot()
img = np.array(Image.open('autojump.png'))
im = plt.imshow(img, animated=True)
update = True
click_count = 0
cor = []
def update_data():
return np.array(Image.open('autojump.png'))
def updatefig(*args):
global update
if update:
time.sleep(1)
pull_screenshot()
im.set_array(update_data())
update = False
return im,
def on_click(event):
global update
global ix, iy
global click_count
global cor
ix, iy = event.xdata, event.ydata
coords = [(ix, iy)]
print('now = ', coords)
cor.append(coords)
click_count += 1
if click_count > 1:
click_count = 0
cor1 = cor.pop()
cor2 = cor.pop()
distance = (cor1[0][0] - cor2[0][0])**2 + (cor1[0][1] - cor2[0][1])**2
distance = distance ** 0.5
print('distance = ', distance)
jump(distance)
update = True
fig.canvas.mpl_connect('button_press_event', on_click)
ani = animation.FuncAnimation(fig, updatefig, interval=50, blit=True)
plt.show()
| JianmingXia/StudyTest | JumpTool/wechat_jump_iOS_py3.py | Python | mit | 1,667 |
'''
Created on May 15, 2010
@author: ebakan
'''
import sys
if __name__ == '__main__':
ints = sys.stdin.readline().split()
for i in range(len(ints)):
ints[i]=int(ints[i])
while ints[0]<=6 and ints[1]<=6 and ints[2]<=6:
sys.stdout.write("R")
ints[2]+=ints[0]
if ints[2]>6:
sys.stdout.write(" Win")
break
#sys.stdout.write('\r')
#sys.stdout.flush()
string = raw_input("test")
if string=="R":
ints[0]+=ints[2]
else:
ints[0]+=ints[1]
if ints[0]>6:
sys.stdout.write("Lose")
break
sys.stdout.flush() | ebakan/ProCo | ProCo2010/Speed Round/adv01.py | Python | gpl-3.0 | 665 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Partner multiple phones',
'version': '0.008',
'category': 'Customizations',
'sequence': 16,
'complexity': 'normal',
'description': '''== Partner multiple phones module ==\n\n
This modules add a tab to manage multiple phones for a partner.\n
The phones are searchable from tree list view like in standard module.\n
This module don't break phone functionality because it keeps the phone char field in partner form.\n
''',
'author': 'ThinkOpen Solutions Brasil',
'website': 'http://www.tkobr.com',
'images': ['images/oerp61.jpeg',
],
'depends': [
'tko_contacts',
],
'data': [
'security/ir.model.access.csv',
'views/tko_partner_phones_view.xml',
'views/res_partner_view.xml',
],
'init': [],
'demo': [],
'update': [],
'test': [], # YAML files with tests
'installable': True,
'application': False,
# If it's True, the modules will be auto-installed when all dependencies
# are installed
'auto_install': False,
'certificate': '',
}
| thinkopensolutions/tkobr-addons | tko_partner_multiple_phones/__manifest__.py | Python | agpl-3.0 | 2,154 |
#!/usr/bin/env python
from distutils.core import setup
print "\n\n\n\n************************************************************"
print " If you're installing MyUW, you must also install"
print " each dependency in requirements.txt"
print "************************************************************\n\n\n\n"
setup(name='MyUW',
version='4.0',
description='',
install_requires=[] # uh... move requirements.txt into here
)
| fanglinfang/myuw | setup.py | Python | apache-2.0 | 473 |
# -*- coding: utf-8 -*-
'''
IPython notebook compatability module for highcharts-python
Adapted from python-nvd3: https://github.com/areski/python-nvd3/blob/develop/nvd3/ipynb.py
'''
try:
_ip = get_ipython()
except:
_ip = None
if _ip and (_ip.__module__.startswith('IPython') or _ip.__module__.startswith('ipykernel')):
def _print_html(chart):
'''Function to return the HTML code for the div container plus the javascript
to generate the chart. This function is bound to the ipython formatter so that
charts are displayed inline.'''
return chart.iframe
def _setup_ipython_formatter(ip):
''' Set up the ipython formatter to display HTML formatted output inline'''
from IPython import __version__ as IPython_version
from .highcharts.highcharts import Highchart
from .highmaps.highmaps import Highmap
from .highstock.highstock import Highstock
if IPython_version >= '0.11':
html_formatter = ip.display_formatter.formatters['text/html']
for chart_type in [Highchart, Highmap, Highstock]:
html_formatter.for_type(chart_type, _print_html)
_setup_ipython_formatter(_ip)
| kyper-data/python-highcharts | highcharts/ipynb.py | Python | mit | 1,226 |
#!/usr/bin/python3
# TF-IDF library downloaded from: https://github.com/hrs/python-tf-idf
# Slightly modified to be compatible with Python 3.
"""
The simplest TF-IDF library imaginable.
Add your documents as two-element lists `[docname, [list_of_words_in_the_document]]` with `addDocument(docname, list_of_words)`. Get a list of all the `[docname, similarity_score]` pairs relative to a document by calling `similarities([list_of_words])`.
See the README for a usage example.
"""
import sys
import os
class tfidf:
def __init__(self):
self.weighted = False
self.documents = []
self.corpus_dict = {}
def addDocument(self, doc_name, list_of_words):
# building a dictionary
doc_dict = {}
for w in list_of_words:
doc_dict[w] = doc_dict.get(w, 0.) + 1.0
self.corpus_dict[w] = self.corpus_dict.get(w, 0.0) + 1.0
# normalizing the dictionary
length = float(len(list_of_words))
for k in doc_dict:
doc_dict[k] = doc_dict[k] / length
# add the normalized document to the corpus
self.documents.append([doc_name, doc_dict])
def similarities(self, list_of_words):
"""Returns a list of all the [docname, similarity_score] pairs relative to a list of words."""
# building the query dictionary
query_dict = {}
for w in list_of_words:
query_dict[w] = query_dict.get(w, 0.0) + 1.0
# normalizing the query
length = float(len(list_of_words))
for k in query_dict:
query_dict[k] = query_dict[k] / length
# computing the list of similarities
sims = []
for doc in self.documents:
score = 0.0
doc_dict = doc[1]
for k in query_dict:
if k in doc_dict:
score += (query_dict[k] / self.corpus_dict[k]) + (doc_dict[k] / self.corpus_dict[k])
sims.append([doc[0], score])
return sims
| pwalch/joke-scraper | tfidf.py | Python | gpl-3.0 | 1,830 |
from test_base import TestCase
class Test_PageCheck(TestCase):
pages = (
'/mypage.php',
'/new_illust.php',
'/bookmark_new_illust.php',
'/mypixiv_new_illust.php',
'/ranking.php?mode=daily',
'/ranking.php?mode=daily&content=ugoira',
'/ranking_area.php',
'/stacc/p/activity',
'/stacc/p/activity?mode=unify',
'/user_event.php',
'/bookmark.php?rest=hide',
'/member_illust.php',
'/response.php?illust_id=11437736',
'/member_illust.php?mode=medium&illust_id=11437736',
'/member.php?id=11',
'/member_illust.php?id=11',
'/tags.php?tag=pixiv',
'/search.php?s_mode=s_tag&word=pixiv',
'/cate_r18.php',
'/new_illust_r18.php',
'/user_event.php?type=r18',
'/search_user.php'
)
def test_pages(self):
for url in self.pages:
self.open(url)
self.open_popup()
| crckyl/pixplus | test/test01_pagecheck.py | Python | mit | 852 |
# Windows specific tests
from ctypes import *
import unittest, sys
from test import support
import _ctypes_test
# Only windows 32-bit has different calling conventions.
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
@unittest.skipUnless(sizeof(c_void_p) == sizeof(c_int),
"sizeof c_void_p and c_int differ")
class WindowsTestCase(unittest.TestCase):
def test_callconv_1(self):
# Testing stdcall function
IsWindow = windll.user32.IsWindow
# ValueError: Procedure probably called with not enough arguments
# (4 bytes missing)
self.assertRaises(ValueError, IsWindow)
# This one should succeed...
self.assertEqual(0, IsWindow(0))
# ValueError: Procedure probably called with too many arguments
# (8 bytes in excess)
self.assertRaises(ValueError, IsWindow, 0, 0, 0)
def test_callconv_2(self):
# Calling stdcall function as cdecl
IsWindow = cdll.user32.IsWindow
# ValueError: Procedure called with not enough arguments
# (4 bytes missing) or wrong calling convention
self.assertRaises(ValueError, IsWindow, None)
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
class FunctionCallTestCase(unittest.TestCase):
@unittest.skipUnless('MSC' in sys.version, "SEH only supported by MSC")
@unittest.skipIf(sys.executable.endswith('_d.exe'),
"SEH not enabled in debug builds")
def test_SEH(self):
# Call functions with invalid arguments, and make sure
# that access violations are trapped and raise an
# exception.
self.assertRaises(OSError, windll.kernel32.GetModuleHandleA, 32)
def test_noargs(self):
# This is a special case on win32 x64
windll.user32.GetDesktopWindow()
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
class TestWintypes(unittest.TestCase):
def test_HWND(self):
from ctypes import wintypes
self.assertEqual(sizeof(wintypes.HWND), sizeof(c_void_p))
def test_PARAM(self):
from ctypes import wintypes
self.assertEqual(sizeof(wintypes.WPARAM),
sizeof(c_void_p))
self.assertEqual(sizeof(wintypes.LPARAM),
sizeof(c_void_p))
def test_COMError(self):
from _ctypes import COMError
if support.HAVE_DOCSTRINGS:
self.assertEqual(COMError.__doc__,
"Raised when a COM method call failed.")
ex = COMError(-1, "text", ("details",))
self.assertEqual(ex.hresult, -1)
self.assertEqual(ex.text, "text")
self.assertEqual(ex.details, ("details",))
@unittest.skipUnless(sys.platform == "win32", 'Windows-specific test')
class TestWinError(unittest.TestCase):
def test_winerror(self):
# see Issue 16169
import errno
ERROR_INVALID_PARAMETER = 87
msg = FormatError(ERROR_INVALID_PARAMETER).strip()
args = (errno.EINVAL, msg, None, ERROR_INVALID_PARAMETER)
e = WinError(ERROR_INVALID_PARAMETER)
self.assertEqual(e.args, args)
self.assertEqual(e.errno, errno.EINVAL)
self.assertEqual(e.winerror, ERROR_INVALID_PARAMETER)
windll.kernel32.SetLastError(ERROR_INVALID_PARAMETER)
try:
raise WinError()
except OSError as exc:
e = exc
self.assertEqual(e.args, args)
self.assertEqual(e.errno, errno.EINVAL)
self.assertEqual(e.winerror, ERROR_INVALID_PARAMETER)
class Structures(unittest.TestCase):
def test_struct_by_value(self):
class POINT(Structure):
_fields_ = [("x", c_long),
("y", c_long)]
class RECT(Structure):
_fields_ = [("left", c_long),
("top", c_long),
("right", c_long),
("bottom", c_long)]
dll = CDLL(_ctypes_test.__file__)
pt = POINT(10, 10)
rect = RECT(0, 0, 20, 20)
self.assertEqual(1, dll.PointInRect(byref(rect), pt))
if __name__ == '__main__':
unittest.main()
| zhjunlang/kbengine | kbe/src/lib/python/Lib/ctypes/test/test_win32.py | Python | lgpl-3.0 | 4,199 |
"""equinox_spring16_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework import routers
from equinox_api.views import ApplicationViewSet, OperationViewSet, InstancesViewSet, UserViewSet, ItemViewSet
from equinox_spring16_api import settings
router = routers.DefaultRouter()
router.register(r'applications', ApplicationViewSet)
router.register(r'operations', OperationViewSet)
router.register(r'instances', InstancesViewSet)
router.register(r'items', ItemViewSet)
router.register(r'users', UserViewSet)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include(router.urls)),
url(r'^docs/', include('rest_framework_swagger.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| ivanprjcts/equinox-spring16-API | equinox_spring16_api/equinox_spring16_api/urls.py | Python | lgpl-3.0 | 1,431 |
# -*- encoding: utf-8 -*-
"""
staticdhcpdlib.databases.generic
================================
Provides a uniform datasource API, to be implemented by technology-specific
backends.
Legal
-----
This file is part of staticDHCPd.
staticDHCPd is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
(C) Neil Tallim, 2014 <flan@uguu.ca>
(C) Anthony Woods, 2013 <awoods@internap.com>
"""
try:
from types import StringTypes
except ImportError: #py3k
StringTypes = (str,)
try:
IntTypes = (int, long,)
except NameError: #py3k
IntTypes = (int,)
import collections
import logging
import threading
import traceback
import libpydhcpserver.dhcp_types.conversion
from libpydhcpserver.dhcp_types.ipv4 import IPv4
_logger = logging.getLogger('databases.generic')
class Definition(object):
"""
A definition of a "lease" from a database.
"""
ip = None #: The :class:`IPv4 <IPv4>` to be assigned
hostname = None #: The hostname to assign (may be None)
gateways = None #: The :class:`IPv4 <IPv4>` gateways to advertise (may be None)
subnet_mask = None #: The :class:`IPv4 <IPv4>` netmask to advertise (may be None)
broadcast_address = None #: The :class:`IPv4 <IPv4>` broadcast address to advertise (may be None)
domain_name = None #: The domain name to advertise (may be None)
domain_name_servers = None #: A list of DNS IPv4s to advertise (may be None)
ntp_servers = None #: A list of NTP IPv4s to advertise (may be None)
lease_time = None #: The number of seconds for which the lease is valid
subnet = None #: The "subnet" identifier of the record in the database
serial = None #: The "serial" identifier of the record in the database
extra = None #: An object containing any metadata from the database
def __init__(self,
ip, lease_time, subnet, serial,
hostname=None,
gateways=None, subnet_mask=None, broadcast_address=None,
domain_name=None, domain_name_servers=None, ntp_servers=None,
extra=None
):
"""
Initialises a Definition.
:param ip: The IP address to assign, in any main format.
:param int lease_time: The number of seconds for which the lease is
valid.
:param basestring subnet: The "subnet" identifier of the record in the
database.
:param int serial: The "serial" identifier of the record in the
database.
:param basestring hostname: The hostname to assign.
:param gateways: The IP addresses to advertise, in any main format,
including comma-delimited string.
:param subnet_mask: The IP address to advertise, in any main format.
:param broadcast_address: The IP address to advertise, in any main
format.
:param basestring domain_name: The domain name to advertise.
:param domain_name_servers: The IP addresses to advertise, in any main
format, including comma-delimited string.
:param ntp_servers: The IP addresses to advertise, in any main format,
including comma-delimited string.
:param extra: An object containing any metadata from the
database.
"""
#Required values
self.ip = self._parse_address(ip)
if not self.ip:
raise ValueError("An IP address is required for assignment; received: %(ip)r" % {'ip': ip,})
self.lease_time = int(lease_time)
self.subnet = str(subnet)
self.serial = int(serial)
#Optional vlaues
self.hostname = hostname and str(hostname)
self.gateways = self._parse_addresses(gateways)
self.subnet_mask = self._parse_address(subnet_mask)
self.broadcast_address = self._parse_address(broadcast_address)
self.domain_name = domain_name and str(domain_name)
self.domain_name_servers = self._parse_addresses(domain_name_servers, limit=3)
self.ntp_servers = self._parse_addresses(ntp_servers, limit=3)
self.extra = extra
def _parse_address(self, address):
"""
Takes an input-value and produces an IPv4 address.
:param address: The IP address to process, in any main format.
:return: The parsed IPv4 address, or ``None`` if nothing was
provided.
"""
if isinstance(address, IPv4):
return address
if address:
return IPv4(address)
return None
def _parse_addresses(self, addresses, limit=None):
"""
Takes variable-type input and produces IPv4 addresses.
:param addresses: The IP addresses to process, in any main format,
including comma-delimited string.
:param int limit: The maximum number of addresses to return.
:return list: Any parsed IPv4 addresses, or ``None`` if nothing was
provided.
"""
if isinstance(addresses, IPv4):
return [addresses]
if addresses:
if isinstance(addresses, StringTypes):
addresses = addresses.split(',')
elif isinstance(addresses, collections.Sequence):
if all(type(i) in IntTypes for i in addresses):
return conversion.listToIPs(addresses)[:limit]
else: #Might be a set or something non-sliceable
addresses = tuple(addresses)
return [self._parse_address(i) for i in addresses[:limit]] or None
return None
class Database(object):
"""
A stub describing the features a Database object must provide.
"""
def lookupMAC(self, mac):
"""
Queries the database for the given MAC address and returns the IP and
associated details if the MAC is known.
:param mac: The MAC address to lookup.
:return: The :class:`Definition` or, if no match was found, ``None``.
:raise Exception: A problem occured while accessing the database.
"""
raise NotImplementedError("lookupMAC() must be implemented by subclasses")
def reinitialise(self):
"""
Though subclass-dependent, this will generally result in some guarantee
that the database will provide fresh data, whether that means flushing
a cache or reconnecting to the source.
"""
class CachingDatabase(Database):
"""
A partial implementation of the Database engine, adding efficient generic
caching logic and concurrency-throttling.
"""
_resource_lock = None #: A lock used to prevent the database from being overwhelmed.
_cache = None #: The caching structure to use, if caching is desired.
def __init__(self, concurrency_limit=2147483647):
"""
A fully implemented caching layer for any real database.
:param int concurrency_limit: The number of concurrent database hits to
permit, defaulting to a ridiculously large
number.
:raise Exception: Cache-initialisation failed.
"""
_logger.debug("Initialising database with a maximum of %(count)i concurrent connections" % {'count': concurrency_limit,})
self._resource_lock = threading.BoundedSemaphore(concurrency_limit)
try:
self._setupCache()
except Exception, e:
_logger.error("Cache initialisation failed:\n" + traceback.format_exc())
def _setupCache(self):
"""
Sets up the database caching environment.
:except Exception: Cache-initialisation failed.
"""
from .. import config
if config.USE_CACHE:
import _caching
if config.CACHING_MODEL == 'in-process':
if config.PERSISTENT_CACHE or config.CACHE_ON_DISK:
try:
disk_cache = _caching.DiskCache(config.PERSISTENT_CACHE and 'persistent' or 'disk', config.PERSISTENT_CACHE)
if config.CACHE_ON_DISK:
_logger.debug("Combining local caching database and persistent caching database")
self._cache = disk_cache
else:
_logger.debug("Setting up memory-cache on top of persistent caching database")
self._cache = _caching.MemoryCache('memory', chained_cache=disk_cache)
except Exception, e:
_logger.error("Unable to initialise disk-based caching:\n" + traceback.format_exc())
if config.PERSISTENT_CACHE and not config.CACHE_ON_DISK:
_logger.warn("Persistent caching is not available")
self._cache = _caching.MemoryCache('memory-nonpersist')
elif config.CACHE_ON_DISK:
_logger.warn("Caching is disabled: memory-caching was not requested, so no fallback exists")
else:
_logger.debug("Setting up memory-cache")
self._cache = _caching.MemoryCache('memory')
elif config.CACHING_MODEL == 'memcached':
_logger.debug("Setting up memcached-cache")
self._cache = _caching.MemcachedCache('memcached',
(config.MEMCACHED_SERVER, config.MEMCACHED_PORT),
config.MEMCACHED_AGE_TIME)
if self._cache:
_logger.info("Database caching enabled; top-level cache: %(cache)s" % {
'cache': self._cache,
})
else:
_logger.warn("'%(model)s' database caching could not be enabled" % {
'model': config.CACHING_MODEL,
})
else:
if config.PERSISTENT_CACHE:
_logger.warn("PERSISTENT_CACHE was set, but USE_CACHE was not")
if config.CACHE_ON_DISK:
_logger.warn("CACHE_ON_DISK was set, but USE_CACHE was not")
def reinitialise(self):
if self._cache:
try:
self._cache.reinitialise()
except Exception, e:
_logger.error("Cache reinitialisation failed:\n" + traceback.format_exc())
def lookupMAC(self, mac):
if self._cache:
try:
definition = self._cache.lookupMAC(mac)
except Exception, e:
_logger.error("Cache lookup failed:\n" + traceback.format_exc())
else:
if definition:
return definition
with self._resource_lock:
definition = self._lookupMAC(mac)
if definition and self._cache:
try:
self._cache.cacheMAC(mac, definition)
except Exception, e:
_logger.error("Cache update failed:\n" + traceback.format_exc())
return definition
class Null(Database):
"""
A database that never serves anything, useful primarily for testing or if
custom modules are loaded that work in the handleUnknownMAC() workflow.
"""
def lookupMAC(self, mac):
return None
| flan/staticdhcpd | staticDHCPd/staticdhcpdlib/databases/generic.py | Python | gpl-3.0 | 11,915 |
from theano import tensor
from theano.scan_module import until
from blocks.bricks import Initializable
from blocks.bricks.base import lazy, application
from blocks.roles import add_role, WEIGHT
from blocks.utils import shared_floatx_nans
from blocks.bricks.recurrent import BaseRecurrent, recurrent
class ConditionedRecurrent(BaseRecurrent):
"""ConditionedRecurrent network
A recurrent network that unfolds an input vector to a sequence.
Parameters
----------
wrapped : instance of :class:`BaseRecurrent`
A brick that will get the input vector.
Notes
-----
See :class:`.BaseRecurrent` for initialization parameters.
"""
def __init__(self, wrapped, **kwargs):
super(ConditionedRecurrent, self).__init__(**kwargs)
self.wrapped = wrapped
self.children = [wrapped, ]
def get_dim(self, name):
if name == 'attended':
return self.wrapped.get_dim('inputs')
if name == 'attended_mask':
return 0
return self.wrapped.get_dim(name)
@application(contexts=['attended', 'attended_mask'])
def apply(self, **kwargs):
context = kwargs['attended']
try:
kwargs['inputs'] += context.dimshuffle('x', 0, 1)
except:
kwargs['inputs'] = context.dimshuffle('x', 0, 1)
for context in ConditionedRecurrent.apply.contexts:
kwargs.pop(context)
return self.wrapped.apply(**kwargs)
@apply.delegate
def apply_delegate(self):
return self.wrapped.apply
class Unfolder(Initializable, BaseRecurrent):
"""Unfolder network
A recurrent network that unfolds an input vector to a sequence.
Parameters
----------
prototype : instance of :class:`BaseRecurrent`
A brick that will get the input vector.
flagger : instance of :class:`Brick``
A brick that will flag when to stop the loop
Notes
-----
See :class:`.Initializable` for initialization parameters.
"""
@lazy
def __init__(self, prototype, flagger, **kwargs):
super(Unfolder, self).__init__(**kwargs)
self.children = [prototype, flagger]
def get_dim(self, name):
if name in ('inputs', 'states'):
return self.children[0].dim
else:
return super(Unfolder, self).get_dim(name)
#def initial_state(self, state_name, batch_size, *args, **kwargs):
@recurrent(sequences=[], states=['states'],
outputs=['states', 'flags'],
contexts=['inputs'])
def apply(self, inputs=None, states=None, **kwargs):
outputs = self.children[0].apply(inputs=inputs,
states=states,
iterate=False,
**kwargs)
flags = self.children[1].apply(outputs).sum()
stop_condition = flags >= .5
outputs = [outputs, flags]
return outputs, until(stop_condition)
#TODO define outputs_info, define RecurrentFlag class
class UnfolderLSTM(Initializable, BaseRecurrent):
"""UnfolderLSTM network
A recurrent network that unfolds an input vector to a sequence.
Parameters
----------
prototype : instance of :class:`LSTM`
A brick that will get the input vector.
flagger : instance of :class:`Brick``
A brick that will flag when to stop the loop
Notes
-----
See :class:`.Initializable` for initialization parameters.
"""
@lazy
def __init__(self, prototype, flagger, **kwargs):
super(UnfolderLSTM, self).__init__(**kwargs)
self.children = [prototype, flagger]
def get_dim(self, name):
if name in ('inputs', 'states', 'cells'):
return self.children[0].get_dim(name)
else:
return super(UnfolderLSTM, self).get_dim(name)
#def initial_state(self, state_name, batch_size, *args, **kwargs):
@recurrent(sequences=[], states=['states', 'cells'],
outputs=['states','cells', 'flags'], contexts=['inputs'])
def apply(self, inputs=None, states=None, cells=None, **kwargs):
outputs = self.children[0].apply(inputs=inputs,
cells=cells,
states=states,
iterate=False,
**kwargs)
flags = self.children[1].apply(outputs[0]).sum()
stop_condition = flags >= .5
outputs.append(flags)
return outputs, until(stop_condition)
class DelayLine(BaseRecurrent, Initializable):
"""Store and (optionally transform) previous inputs in a delay line
"""
@lazy
def __init__(self, input_dim, memory_size, fixed=False,
**kwargs):
super(DelayLine, self).__init__(**kwargs)
self.input_dim = input_dim
self.memory_size = memory_size
self.fixed = fixed
self.output_dim = input_dim * memory_size
@property
def W(self):
return self.mu
def _allocate(self):
self.mu = shared_floatx_nans((self.output_dim-self.input_dim),
name='mu')
add_role(self.mu, WEIGHT)
if not self.fixed:
self.params.append(self.mu)
def _initialize(self):
mu, = self.params
self.weights_init.initialize(mu, self.rng)
#self.biases_init.initialize(self.delay_line, self.rng)
@application
def initial_state(self, state_name, batch_size, *args, **kwargs):
return tensor.zeros((batch_size, self.output_dim))
@recurrent(sequences=['inputs', 'mask'], states=['states'],
outputs=['states'], contexts=[])
def apply(self, inputs=None, states=None, mask=None):
'''TODO
Delay line should support mask to handle differently
sized sequences
'''
mu, = self.params
mu = tensor.clip(mu, -1., 1.)
m_new = states[:,:-self.input_dim]
m_prev = states[:,self.input_dim:]
m_act = (1.-mu)*m_prev + mu*m_new
next_states = tensor.concatenate((inputs, m_act), axis=-1)
return next_states
def get_dim(self, name):
if name == 'input_':
return self.input_dim
if name == 'output':
return self.output_dim
if name in (DelayLine.apply.sequences +
DelayLine.apply.states):
return self.output_dim
super(DelayLine, self).get_dim(name)
| EderSantana/blocks_contrib | bricks/recurrent.py | Python | mit | 6,518 |
import io
from collections import Counter
import kenlm
from extract import extract_candidates_only
model_file = '/home/alvas/test/food.arpa'
textfile = '/home/alvas/test/food.txt'
model = kenlm.LanguageModel(model_file)
fout = io.open('food.candidates', 'w', encoding='utf8')
for text, candidates in extract_candidates_only(textfile, model):
fout.write(text + '\t' + candidates + '\n') | alvations/Terminator | terminator/extract_candidates.py | Python | mit | 395 |
class ContactHelper:
def __init__(self, app):
self.app = app
def add_new_contact(self, contact):
self.open_add_contact_page()
self.fill_contact_form(contact)
self.submit_contact()
def modify_first_contact(self, contact):
wd = self.app.wd
# start modify contact
wd.find_element_by_xpath("//*[@title='Edit'][1]").click()
self.fill_contact_form(contact)
bug_element = wd.find_element_by_name('update')
wd.execute_script("arguments[0].click();", bug_element)
def del_first_contact(self):
# delete first contact from list
wd = self.app.wd
# select first element from list
wd.find_element_by_xpath("//table[@id='maintable']//input[@type='checkbox']").click()
# scroll down page
wd.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# submit deletion
wd.find_element_by_xpath("//div[@class='left']/*[@value='Delete']").click()
# confirm alert
wd.switch_to_alert().accept()
def open_add_contact_page(self):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
def fill_contact_form(self, contact):
wd = self.app.wd
# name section
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.firstName)
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.middleName)
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lastName)
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nickName)
# address section
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys(contact.title)
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys(contact.company)
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys(contact.address)
# home phone
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.homeNumber)
# mobile phone
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(contact.mobileNumber)
# work phone
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys(contact.workNumber)
# fax
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys(contact.faxNumber)
# email
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email)
# day from dropdown list (1-31)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[3]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[3]").click()
# month from drop down list (1-12)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").click()
# year
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys(contact.byear)
# add second address
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys(contact.address2)
# add second phone
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys(contact.phoneNumber2)
# add some notes
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys(contact.notes)
def submit_contact(self):
wd = self.app.wd
bug_element = wd.find_element_by_name('submit')
wd.execute_script("arguments[0].click();", bug_element)
__author__ = 'GiSDeCain'
| GiSDeCain/Python_Kurs_Ex1 | fixture/contact.py | Python | gpl-3.0 | 4,106 |
"""
user interface for viewing radiation field
"""
import sys
import os
import csv
import time
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors # for wigner log scale
import numpy as np
import logging
from ocelot.gui.settings_plot import *
from ocelot.adaptors.genesis import *
from ocelot.common.globals import * # import of constants like "h_eV_s" and
from ocelot.common.math_op import * # import of mathematical functions like gauss_fit
from ocelot.utils.xfel_utils import *
from ocelot.optics.utils import calc_ph_sp_dens
from ocelot.optics.wave import *
from ocelot.gui.colormaps2d.colormap2d import *
# in order to run decorators properly
import functools
_logger = logging.getLogger(__name__)
__author__ = "Svitozar Serkez, Andrei Trebushinin, Mykola Veremchuk"
@if_plottable
def plot_dfl_all(dfl, **kwargs):
"""
plots given RadiationField() object in 4 domain combinations
"""
plot_dfl(dfl, **kwargs)
dfl.fft_z()
plot_dfl(dfl, **kwargs)
dfl.fft_xy()
plot_dfl(dfl, **kwargs)
dfl.fft_z()
plot_dfl(dfl, **kwargs)
dfl.fft_xy()
@if_plottable
def plot_dfl(dfl, domains=None, z_lim=[], xy_lim=[], figsize=4, cmap=def_cmap, legend=True, phase=False, fig_name=None,
auto_zoom=False, column_3d=True, savefig=False, showfig=True, return_proj=False, line_off_xy=True,
slice_xy=False, log_scale=0, cmap_cutoff=0, vartype_dfl=None, **kwargs):
"""
Plots dfl radiation object in 3d using matplotlib.
:param dfl: RadiationField() object
:param domains: longitudinal domain + transverse domain ('t' or 'f' + 's' or 'k') (example: 'tk' - time/inversespace domain)
:param z_lim: sets the boundaries to CUT the dfl object in z to ranges of e.g. [2,5] um or nm depending on freq_domain=False of True
:param xy_lim: sets the boundaries to SCALE the dfl object in x and y to ranges of e.g. [2,5] um or urad depending on far_field=False of True
:param figsize: rescales the size of the figure
:param cmap: color map which will be used for plotting (http://matplotlib.org/users/colormaps.html)
:param legend: not used yet
:param phase: bool type variable, can replace Z projection or spectrum with phase front distribution z dimensions correspondingly
:param fig_name: the desired name of the output figure, would be used as suffix to the image filename if savefig==True
:param auto_zoom: bool type variable, automatically scales xyz the images to the (1%?) of the intensity limits
:param column_3d: bool type variable, plots top and side views of the radiation distribution
:param savefig: bool type variable, allow to save figure to image (savefig='png' (default) or savefig='eps', etc...)
:param showfig: bool type variable, allow to display figure (slower)
:param return_proj: bool type variable, returns [xy_proj,yz_proj,xz_proj,x,y,z] array.
:param line_off_xy: bool type variable, if True, the transverse size of radiation are calculated at x=0 and y=0 position, otherwise marginal distribution are used
:param slice_xy: bool type variable, if True, slices will be plotted; if False, projections will be plotted
:param log_scale: bool type variable, if True, log scale will be used for potting
:param cmap_cutoff: 0 <= cmap_cutoff <= 1; all pixels that have intensity lower than cmap_cutoff will be seted to white color
:param vartype_dfl: the data type to store dfl in memory [either complex128 (two 64-bit floats) or complex64 (two 32-bit floats)], may save memory
:param kwargs:
:return:
"""
import matplotlib.colors as colors
if showfig == False and savefig == False:
return
filePath = dfl.filePath
text_present = 1
_logger.info('plotting radiation field (dfl)')
start_time = time.time()
if dfl.Nx() == 1 or dfl.Ny() == 1:
_logger.warning(ind_str + 'plot_dfl() works only with RadiationFields, with dfl.Nx(), dfl.Ny() > 1')
# print('dfl type is ',type(dfl))
# if isinstance(dfl, RadiationField):
# # if dfl.__class__ != RadiationField:
# raise ValueError('wrong radiation object: should be RadiationField')
if vartype_dfl is not None:
dfl_copy = RadiationField()
dfl_copy.copy_param(dfl, version=2)
dfl_copy.fld = dfl.fld.astype(vartype_dfl)
else:
dfl_copy = deepcopy(dfl)
if domains is None:
domains = dfl_copy.domains()
else:
dfldomain_check(domains)
if 'k' in domains:
far_field = True
else:
far_field = False
if 'f' in domains:
freq_domain = True
else:
freq_domain = False
suffix = ''
# if fig_name is None:
# suffix = ''
# else:
# suffix = '_'+fig_name
if dfl_copy.Nz() != 1:
# Make sure it is time-dependent
ncar_z = dfl_copy.Nz()
leng_z = dfl_copy.Lz()
z = np.linspace(0, leng_z, ncar_z)
else:
column_3d = False
phase = True
freq_domain = False
z_lim = []
xlamds = dfl_copy.xlamds
# number of mesh points
ncar_x = dfl_copy.Nx()
leng_x = dfl_copy.Lx() # transverse size of mesh [m]
ncar_y = dfl_copy.Ny()
leng_y = dfl_copy.Ly()
E_pulse = dfl_copy.E()
if dfl_copy.Nz() != 1:
if freq_domain:
if dfl_copy.domain_z == 't':
dfl_copy.fft_z()
# z = dfl_copy.scale_z() * 1e9
# dfl_copy.fld = dfl_copy.fld[::-1, :, :]
# z = z[::-1]
# unit_z = r'nm'
# z_label = r'$\lambda$ [' + unit_z + ']'
z = h_eV_s * speed_of_light / dfl_copy.scale_z()
unit_z = r'eV'
z_label = r'$E_{{ph}}$ [{}]'.format(unit_z)
z_labelv = r'[arb. units]'
z_title = 'Spectrum'
z_color = 'red'
suffix += '_fd'
else:
if dfl_copy.domain_z == 'f':
dfl_copy.fft_z()
z = dfl_copy.scale_z() * 1e6
unit_z = r'$\mu$m'
z_label = '$s$ [' + unit_z + ']'
z_labelv = r'Power [W]'
z_title = 'Z projection'
z_color = 'blue'
else:
z = 0
if z_lim != []:
if len(z_lim) == 1:
z_lim = [z_lim, z_lim]
if z_lim[0] > z_lim[1]:
z_lim[0] = -inf
z_lim[1] = inf
if z_lim[1] < np.amin(z) or z_lim[1] > np.amax(z):
z_lim[1] = np.amax(z)
# print(' set top lim to max')
if z_lim[0] > np.amax(z) or z_lim[0] < np.amin(z):
z_lim[0] = np.amin(z)
# print(' set low lim to min')
_logger.debug(ind_str + 'setting z-axis limits to ' + str(np.amin(z)) + ':' + str(z_lim[0]) + '-' + str(
z_lim[1]) + ':' + str(np.amax(z))) # tmp
z_lim_1 = np.where(z <= z_lim[0])[0][-1]
z_lim_2 = np.where(z >= z_lim[1])[0][0]
if z_lim_1 == z_lim_2 and z_lim_1 == 0:
z_lim_2 = z_lim_1 + 1
elif z_lim_1 == z_lim_2 and z_lim_1 != 0:
z_lim_1 = z_lim_2 - 1
dfl_copy.fld = dfl_copy.fld[z_lim_1:z_lim_2, :, :]
z = z[z_lim_1:z_lim_2]
ncar_z = dfl_copy.shape[0]
suffix += '_zoom_%.2f-%.2f' % (np.amin(z), np.amax(z))
if far_field:
if dfl_copy.domain_xy == 's':
dfl_copy.fft_xy()
x = dfl_copy.scale_x() * 1e6
y = dfl_copy.scale_y() * 1e6
unit_xy = r'$\mu$rad'
x_label = r'$\theta_x$ [' + unit_xy + ']'
y_label = r'$\theta_y$ [' + unit_xy + ']'
suffix += '_ff'
x_title = 'X divergence'
y_title = 'Y divergence'
xy_title = 'Far field intensity'
x_y_color = 'green'
else:
if dfl_copy.domain_xy == 'k':
dfl_copy.fft_xy()
x = dfl_copy.scale_x() * 1e6
y = dfl_copy.scale_y() * 1e6
unit_xy = r'$\mu$m'
x_label = 'x [' + unit_xy + ']'
y_label = 'y [' + unit_xy + ']'
x_title = 'X projection'
y_title = 'Y projection'
xy_title = 'Intensity'
x_y_color = 'blue'
dfl_copy.fld = dfl_copy.fld.astype(np.complex64)
xy_proj = dfl_copy.int_xy()
xy_proj_ph = np.angle(np.sum(dfl_copy.fld, axis=0)) # tmp # tmp
if slice_xy:
yz_proj = dfl_copy.intensity()[:, :, dfl_copy.Nx() // 2]
xz_proj = dfl_copy.intensity()[:, dfl_copy.Ny() // 2, :]
xz_title = 'Top slice y=0'
yz_title = 'Side slice x=0'
z_proj = dfl_copy.intensity()[:, dfl_copy.Ny() // 2, dfl_copy.Nx() // 2]
z_title += ' (on-axis)'
else:
yz_proj = dfl_copy.int_zy()
xz_proj = dfl_copy.int_zx()
xz_title = 'Top projection'
yz_title = 'Side projection'
z_proj = dfl_copy.int_z()
dx = abs(x[1] - x[0])
dy = abs(y[1] - y[0])
if log_scale:
suffix += '_log'
if fig_name is None:
if dfl_copy.fileName() == '':
fig = plt.figure('Radiation distribution' + suffix)
else:
fig = plt.figure('Radiation distribution' + suffix + ' ' + dfl_copy.fileName())
else:
fig = plt.figure(fig_name + suffix)
del dfl_copy
fig.clf()
fig.set_size_inches(((3 + 2 * column_3d) * figsize, 3 * figsize), forward=True)
# cmap = plt.get_cmap(def_cmap) # jet inferno viridis #change to convenient
cmap_ph = plt.get_cmap('hsv')
if line_off_xy:
x_line = xy_proj[int((ncar_y - 1) / 2), :]
y_line = xy_proj[:, int((ncar_x - 1) / 2)]
x_title += ' lineoff'
y_title += ' lineoff'
else:
x_line = np.sum(xy_proj, axis=0)
y_line = np.sum(xy_proj, axis=1)
if np.max(x_line) != 0 and np.max(y_line) != 0:
x_line, y_line = x_line / np.max(x_line), y_line / np.max(y_line)
if cmap_cutoff not in [None, False, 0]:
cmap = matplotlib.cm.get_cmap(cmap)
cmap.set_under("w")
xy_proj[xy_proj < xy_proj.max() * cmap_cutoff] = -1e-10
yz_proj[yz_proj < yz_proj.max() * cmap_cutoff] = -1e-10
xz_proj[xz_proj < xz_proj.max() * cmap_cutoff] = -1e-10
if log_scale:
xy_proj[xy_proj <= 0] = None
yz_proj[yz_proj <= 0] = None
xz_proj[xz_proj <= 0] = None
z_proj[z_proj <= 0] = None
ax_int = fig.add_subplot(2, 2 + column_3d, 1)
if log_scale:
intplt = ax_int.pcolormesh(x, y, xy_proj, norm=colors.LogNorm(vmin=np.nanmin(xy_proj), vmax=np.nanmax(xy_proj)),
cmap=cmap)
else:
intplt = ax_int.pcolormesh(x, y, xy_proj, cmap=cmap, vmin=0)
ax_int.set_title(xy_title, fontsize=15)
ax_int.set_xlabel(r'' + x_label)
ax_int.set_ylabel(y_label)
if np.size(z) > 1 and kwargs.get('showtext', True):
ax_int.text(0.01, 0.01, r'$E_{p}$=%.2e J' % (E_pulse), horizontalalignment='left', verticalalignment='bottom',
fontsize=12, color='white', transform=ax_int.transAxes)
if phase == True:
ax_ph = fig.add_subplot(2, 2 + column_3d, 4 + column_3d, sharex=ax_int, sharey=ax_int)
ax_ph.pcolormesh(x, y, xy_proj_ph, cmap=cmap_ph)
ax_ph.axis([np.min(x), np.max(x), np.min(y), np.max(y)])
ax_ph.set_title('Phase', fontsize=15)
else:
ax_z = fig.add_subplot(2, 2 + column_3d, 4 + column_3d)
if log_scale:
ax_z.semilogy(z, z_proj, linewidth=1.5, color=z_color)
else:
ax_z.plot(z, z_proj, linewidth=1.5, color=z_color)
ax_z.set_title(z_title, fontsize=15)
ax_z.set_xlabel(z_label)
ax_z.set_ylabel(z_labelv)
ax_z.set_ylim(ymin=0)
ax_proj_x = fig.add_subplot(2, 2 + column_3d, 3 + column_3d, sharex=ax_int)
ax_proj_x.set_title(x_title, fontsize=15)
if sum(x_line) != 0:
try:
x_line_f, rms_x = gauss_fit(x, x_line) # fit with Gaussian, and return fitted function and rms
except RuntimeWarning:
x_line_f = np.zeros_like(x_line)
rms_x = 0
try:
fwhm_x = fwhm3(x_line)[1] * dx # measure FWHM
except ValueError:
fwhm_x = 0
else:
x_line_f = np.zeros_like(x_line)
rms_x = 0
fwhm_x = 0
if log_scale:
ax_proj_x.semilogy(x, x_line, linewidth=2, color=x_y_color)
ax_proj_x.semilogy(x, x_line_f, color='grey')
ax_proj_x.set_ylim(ymin=np.amin(x_line), ymax=1)
else:
ax_proj_x.plot(x, x_line, linewidth=2, color=x_y_color)
ax_proj_x.plot(x, x_line_f, color='grey')
ax_proj_x.set_ylim(ymin=0, ymax=1)
if kwargs.get('showtext', True):
try:
ax_proj_x.text(0.95, 0.95,
'fwhm={:.3g} '.format(fwhm_x) + r' [{:}]'.format(unit_xy) + '\nrms={:.3g}'.format(
rms_x) + r' [{:}]'.format(unit_xy), horizontalalignment='right', verticalalignment='top',
transform=ax_proj_x.transAxes, fontsize=12)
except:
pass
ax_proj_x.set_xlabel(x_label)
ax_proj_y = fig.add_subplot(2, 2 + column_3d, 2, sharey=ax_int)
ax_proj_y.set_title(y_title, fontsize=15)
if sum(y_line) != 0:
try:
y_line_f, rms_y = gauss_fit(y, y_line) # fit with Gaussian, and return fitted function and rms
except RuntimeWarning:
y_line_f = np.zeros_like(y_line)
rms_y = 0
try:
fwhm_y = fwhm3(y_line)[1] * dy # measure FWHM
except ValueError:
fwhm_y = 0
else:
y_line_f = np.zeros_like(y_line)
rms_y = 0
fwhm_y = 0
if log_scale:
ax_proj_y.semilogx(y_line, y, linewidth=2, color=x_y_color)
ax_proj_y.semilogx(y_line_f, y, color='grey')
ax_proj_y.set_xlim(xmin=np.nanmin(y_line), xmax=1)
else:
ax_proj_y.plot(y_line, y, linewidth=2, color=x_y_color)
ax_proj_y.plot(y_line_f, y, color='grey')
ax_proj_y.set_xlim(xmin=0, xmax=1)
if kwargs.get('showtext', True):
try:
ax_proj_y.text(0.95, 0.95,
'fwhm={:.3g} '.format(fwhm_y) + r' [{:}]'.format(unit_xy) + '\nrms={:.3g}'.format(
rms_y) + r' [{:}]'.format(unit_xy), horizontalalignment='right', verticalalignment='top',
transform=ax_proj_y.transAxes, fontsize=12)
except:
pass
ax_proj_y.set_ylabel(y_label)
# if log_scale:
# ax_proj_x.set_yscale('log')
# ax_proj_y.set_xscale('log')
# if not phase:
# ax_z.set_yscale('log')
if column_3d:
if log_scale:
cut_off = 1e-6
yz_proj[yz_proj < np.nanmax(yz_proj) * cut_off] = 0
xz_proj[xz_proj < np.nanmax(xz_proj) * cut_off] = 0
# cut-off = np.amin([yz_proj[yz_proj!=0].min(), xz_proj[xz_proj!=0].min()]) / 10
# yz_proj += minmin
# xz_proj += minmin
min_xz_proj = np.nanmin(xz_proj[xz_proj != 0])
min_yz_proj = np.nanmin(yz_proj[yz_proj != 0])
# if np.amin(xz_proj) == 0:
# min_xz_proj = 0
# else:
# min_xz_proj=xz_proj[xz_proj!=0].min()
# if np.amin(yz_proj) == 0:
# min_yz_proj = 0
# else:
# min_yz_proj=yz_proj[yz_proj!=0].min()
if phase == True:
ax_proj_xz = fig.add_subplot(2, 2 + column_3d, 6)
else:
ax_proj_xz = fig.add_subplot(2, 2 + column_3d, 6, sharex=ax_z)
if log_scale:
ax_proj_xz.pcolormesh(z, x, np.swapaxes(xz_proj, 1, 0),
norm=colors.LogNorm(vmin=min_xz_proj, vmax=np.nanmax(xz_proj)), cmap=cmap)
else:
ax_proj_xz.pcolormesh(z, x, np.swapaxes(xz_proj, 1, 0), cmap=cmap, vmin=0)
ax_proj_xz.set_title(xz_title, fontsize=15)
ax_proj_xz.set_xlabel(z_label)
ax_proj_xz.set_ylabel(x_label)
ax_proj_yz = fig.add_subplot(2, 2 + column_3d, 3, sharey=ax_int, sharex=ax_proj_xz)
if log_scale:
ax_proj_yz.pcolormesh(z, y, np.swapaxes(yz_proj, 1, 0),
norm=colors.LogNorm(vmin=min_yz_proj, vmax=np.nanmax(yz_proj)), cmap=cmap)
else:
ax_proj_yz.pcolormesh(z, y, np.swapaxes(yz_proj, 1, 0), cmap=cmap, vmin=0)
ax_proj_yz.set_title(yz_title, fontsize=15)
ax_proj_yz.set_xlabel(z_label)
ax_proj_yz.set_ylabel(y_label)
cbar = 0
if cbar:
fig.subplots_adjust(top=0.95, bottom=0.05, right=0.85, left=0.1)
cbar_int = fig.add_axes([0.89, 0.15, 0.015, 0.7])
cbar = plt.colorbar(intplt, cax=cbar_int) # pad = -0.05 ,fraction=0.01)
# cbar.set_label(r'[$ph/cm^2$]',size=10)
cbar.set_label(r'a.u.', size=10)
if auto_zoom != False:
size_x = np.max(abs(x[np.nonzero(x_line > 0.005)][[0, -1]]))
size_y = np.max(abs(x[np.nonzero(x_line > 0.005)][[0, -1]]))
size_xy = np.max([size_x, size_y])
# print(size_xy) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# print(zlim_calc) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if z_lim == []:
zlim_calc = z[np.nonzero(z_proj > np.max(z_proj) * 0.005)][[0, -1]]
if column_3d == True:
ax_proj_xz.set_xlim(zlim_calc)
ax_proj_xz.set_ylim([-size_xy, size_xy])
if phase == False:
# _logger.debug(ind_str + 'scaling z to {:}'.format(zlim_calc))
ax_z.set_xlim(zlim_calc)
# elif phase == False and z_lim == []:
# ax_z.set_xlim(zlim_calc)
# _logger.debug(ind_str + 'scaling xy to {:}'.format(size_xy))
# elif column_3d == True:
# ax_int.axis('equal')
ax_int.axis([-size_xy, size_xy, -size_xy, size_xy])
suffix += '_zmd'
else:
if column_3d == True:
ax_proj_xz.axis('tight')
ax_proj_yz.axis('tight')
elif column_3d == False and phase == False:
ax_z.axis('tight')
# ax_int.set_aspect('equal')
ax_int.autoscale(tight=True)
if len(xy_lim) == 2:
ax_int.axis([-xy_lim[0], xy_lim[0], -xy_lim[1], xy_lim[1]])
ax_proj_xz.set_ylim([-xy_lim[0], xy_lim[0]])
elif len(xy_lim) == 1:
ax_int.axis([-xy_lim[0], xy_lim[0], -xy_lim[0], xy_lim[0]])
ax_proj_xz.set_ylim([-xy_lim[0], xy_lim[0]])
fig.subplots_adjust(wspace=0.4, hspace=0.4)
plt.draw()
if savefig != False:
if savefig == True:
savefig = 'png'
_logger.debug(ind_str + 'saving *{:}.{:}'.format(suffix, savefig))
fig.savefig(filePath + suffix + '.' + str(savefig), format=savefig)
_logger.debug(ind_str + 'done in {:.2f} seconds'.format(time.time() - start_time))
plt.draw()
if showfig == True:
_logger.debug(ind_str + 'showing dfl')
rcParams["savefig.directory"] = os.path.dirname(filePath)
plt.show()
else:
# plt.close('all')
plt.close(fig)
if return_proj:
return [xy_proj, yz_proj, xz_proj, x, y, z]
else:
return
@if_plottable
def plot_wigner(wig_or_out, z=np.inf, x_units='um', y_units='ev', x_lim=(None, None), y_lim=(None, None), downsample=1,
autoscale=None, figsize=3, cmap='seismic', fig_name=None, savefig=False, showfig=True,
plot_proj=1, plot_text=1, plot_moments=0, plot_cbar=0, log_scale=0, **kwargs):
"""
Plots wigner distribution (WD) with marginals
:param wig_or_out: may be WignerDistribution() or GenesisOutput() object
:param z: (if isinstance(wig_or_out, GenesisOutput)) location at which WD will be calculated
:param x_units: [m or fs] units to display power scale
:param y_units: [nm or eV] units to display spectrum scale
:param x_lim: scaling limits for x in given units, (min,max) or [min,max], e.g: (None,6)
:param x_lim: scaling limits for y in given units, (min,max) or [min,max], e.g: (None,6)
:param downsample: speeds up plotting by displaying only 1/downsample**2 points
:param autoscale: find x_lim and x_lim values automatically. Only (values > max_value * autoscale) will be displayed
:param figsize: rescales the size of the figure
:param cmap: colormar (http://matplotlib.org/users/colormaps.html)
:param fig_name: the desired name of the output figure, would be used as suffix to the image filename if savefig==True
:param savefig: bool type variable, allow to save figure to image (savefig='png' (default) or savefig='eps', etc...)
:param showfig: bool type variable, allow to display figure (slower)
:param plot_proj: plot marginal distributions
:param plot_text: show text
:param plot_moments: plot moments as lines on top of Wigner distribution
:param plot_cbar: plots colorbar
:param log_scale: plots wigner distribution in logarithmic scale
:param kwargs:
:return: None
"""
if showfig == False and savefig == False:
return
_logger.info('plotting Wigner distribution')
if not hasattr(wig_or_out, 'wig') and hasattr(wig_or_out, 'calc_radsize'):
W = wigner_out(wig_or_out, z)
elif hasattr(wig_or_out, 'wig'):
W = wig_or_out
else:
raise ValueError('Unknown object for Wigner plot')
if fig_name is None:
if W.fileName() == '':
fig_text = 'Wigner distribution'
else:
fig_text = 'Wigner distribution ' + W.fileName()
else:
fig_text = fig_name
if W.z != None:
fig_text += ' ' + str(W.z) + 'm'
if autoscale:
fig_text += ' autsc'
fig = plt.figure(fig_text)
plt.clf()
fig.set_size_inches((4.5 * figsize, 3.25 * figsize), forward=True)
power = W.power()
spec = W.spectrum()
wigner = W.wig
wigner_lim = np.amax(abs(W.wig))
if plot_moments:
inst_freq = W.inst_freq()
group_delay = W.group_delay()
if x_units == 'fs':
power_scale = -W.s / speed_of_light * 1e15
p_label_txt = 't [fs]'
if plot_moments:
group_delay = group_delay / speed_of_light * 1e15
else:
power_scale = W.s * 1e6
p_label_txt = 's [$\mu$m]'
if plot_moments:
group_delay = group_delay * 1e6
if y_units in ['ev', 'eV']:
spec_scale = W.phen
f_label_txt = '$E_{photon}$ [eV]'
if plot_moments:
inst_freq = inst_freq
else:
spec_scale = W.freq_lamd
f_label_txt = '$\lambda$ [nm]'
if plot_moments:
inst_freq = h_eV_s * speed_of_light * 1e9 / inst_freq
if plot_proj:
# definitions for the axes
left, width = 0.18, 0.57
bottom, height = 0.14, 0.55
left_h = left + width + 0.02 - 0.02
bottom_h = bottom + height + 0.02 - 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.15, height]
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
axScatter = plt.axes(rect_scatter, sharex=axHistx, sharey=axHisty)
else:
axScatter = plt.axes()
# cmap='RdBu_r'
# axScatter.imshow(wigner, cmap=cmap, vmax=wigner_lim, vmin=-wigner_lim)
if log_scale != 0:
if log_scale == 1:
log_scale = 0.01
wigplot = axScatter.pcolormesh(power_scale[::downsample], spec_scale[::downsample],
wigner[::downsample, ::downsample], cmap=cmap,
norm=colors.SymLogNorm(linthresh=wigner_lim * log_scale, linscale=2,
vmin=-wigner_lim, vmax=wigner_lim),
vmax=wigner_lim, vmin=-wigner_lim)
else:
wigplot = axScatter.pcolormesh(power_scale[::downsample], spec_scale[::downsample],
wigner[::downsample, ::downsample], cmap=cmap, vmax=wigner_lim, vmin=-wigner_lim)
if plot_cbar:
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
cbaxes = inset_axes(axScatter, width="50%", height="3%", loc=1)
fig.colorbar(wigplot, cax=cbaxes, orientation='horizontal')
if plot_text:
if hasattr(wig_or_out, 'is_spectrogram'):
if wig_or_out.is_spectrogram:
axScatter.text(0.98, 0.98, 'Spectrogram', horizontalalignment='right', verticalalignment='top',
transform=axScatter.transAxes, color='red')
axScatter.text(0.02, 0.98, r'$W_{{max}}$= {:.2e}'.format(np.amax(wigner)), horizontalalignment='left',
verticalalignment='top', transform=axScatter.transAxes) # fontsize=12,
if hasattr(W, 'on_axis'):
if W.on_axis == True:
axScatter.text(0.5, 0.98, r"(on axis)", fontsize=10, horizontalalignment='center',
verticalalignment='top', transform=axScatter.transAxes)
else:
axScatter.text(0.5, 0.98, r"(assuming full spatial coherence)", fontsize=10,
horizontalalignment='center', verticalalignment='top', transform=axScatter.transAxes)
if plot_moments:
weight_power = power / np.max(power)
weight_power[weight_power < np.nanmax(weight_power) / 1e2] = 0
idx_power_fine = np.where(weight_power > np.nanmax(weight_power) / 1e2)
weight_spec = spec / np.max(spec)
weight_spec[weight_spec < np.nanmax(weight_spec) / 1e2] = 0
idx_spec_fine = np.where(weight_spec > np.nanmax(weight_spec) / 1e2)
plt.scatter(power_scale[idx_power_fine], inst_freq[idx_power_fine], s=weight_power[idx_power_fine], c='black',
linewidths=2)
plt.scatter(group_delay[idx_spec_fine], spec_scale[idx_spec_fine], s=weight_spec[idx_spec_fine], c='green',
linewidths=2)
# axScatter.plot(power_scale[::downsample], inst_freq[::downsample], "-k")
# axScatter.plot(group_delay[::downsample], spec_scale[::downsample], "-g")
if autoscale == 1:
autoscale = 1e-2
if autoscale not in [0, None]:
max_power = np.amax(power)
max_spectrum = np.amax(spec)
idx_p = np.where(power > max_power * autoscale)[0]
idx_s = np.where(spec > max_spectrum * autoscale)[0]
x_lim_appl = [power_scale[idx_p[0]], power_scale[idx_p[-1]]]
x_lim_appl = np.array(x_lim_appl)
x_lim_appl.sort()
y_lim_appl = [spec_scale[idx_s[0]], spec_scale[idx_s[-1]]]
y_lim_appl = np.array(y_lim_appl)
y_lim_appl.sort()
else:
x_lim_appl = (np.amin(power_scale), np.amax(power_scale))
y_lim_appl = (np.amin(spec_scale), np.amax(spec_scale))
if x_units == 'fs':
x_lim_appl = np.flipud(x_lim_appl)
if x_lim[0] is not None:
x_lim_appl[0] = x_lim[0]
if x_lim[1] is not None:
x_lim_appl[1] = x_lim[1]
if y_lim[0] is not None:
y_lim_appl[0] = y_lim[0]
if y_lim[1] is not None:
y_lim_appl[1] = y_lim[1]
if plot_proj:
axHistx.plot(power_scale, power)
if plot_text:
axHistx.text(0.02, 0.95, r'E= {:.2e} J'.format(W.energy()), horizontalalignment='left',
verticalalignment='top', transform=axHistx.transAxes) # fontsize=12,
axHistx.set_ylabel('Power [W]')
if spec.max() <= 0:
axHisty.plot(spec, spec_scale)
else:
axHisty.plot(spec / spec.max(), spec_scale)
axHisty.set_xlabel('Spectrum [a.u.]')
axScatter.axis('tight')
axScatter.set_xlabel(p_label_txt)
axScatter.set_ylabel(f_label_txt)
axHistx.set_ylim(ymin=0)
axHisty.set_xlim(xmin=0)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHistx.yaxis.major.locator.set_params(nbins=4)
axHisty.xaxis.major.locator.set_params(nbins=2)
axHistx.set_xlim(x_lim_appl[0], x_lim_appl[1])
axHisty.set_ylim(y_lim_appl[0], y_lim_appl[1])
if log_scale != 0:
axHistx.set_ylim(np.nanmin(power), np.nanmax(power))
axHisty.set_xlim(np.nanmin(spec), np.nanmax(spec))
axHisty.set_xscale('log')
axHistx.set_yscale('log')
else:
axScatter.axis('tight')
axScatter.set_xlabel(p_label_txt)
axScatter.set_ylabel(f_label_txt)
# axScatter.set_xlim(x_lim[0], x_lim[1])
# axScatter.set_ylim(y_lim[0], y_lim[1])
if savefig != False:
if savefig == True:
savefig = 'png'
if W.z is None:
save_path = W.filePath + '_wig.' + str(savefig)
# fig.savefig(W.filePath + '_wig.' + str(savefig), format=savefig)
else:
save_path = W.filePath + '_wig_' + str(W.z) + 'm.' + str(savefig)
# fig.savefig(W.filePath + '_wig_' + str(W.z) + 'm.' + str(savefig), format=savefig)
_logger.debug(ind_str + 'saving to {}'.format(save_path))
fig.savefig(save_path, format=savefig)
plt.draw()
if showfig == True:
dir = os.path.dirname(W.filePath)
rcParams["savefig.directory"] = dir
plt.show()
else:
# plt.close('all')
plt.close(fig)
@if_plottable
def plot_dfl_waistscan(sc_res, fig_name=None, figsize=4, showfig=True, savefig=False):
_logger.info('plot dfl waist scan')
if showfig == False and savefig == False:
return
if fig_name is None:
if sc_res.fileName() == '':
fig = plt.figure('Waist scan')
else:
fig = plt.figure(sc_res.fileName() + ' waist scan')
else:
fig = plt.figure(fig_name)
plt.clf()
fig.set_size_inches((3 * figsize, 2 * figsize), forward=True)
ax_int = fig.add_subplot(1, 1, 1)
ax_int.plot(sc_res.z_pos, sc_res.phdens_max, 'k', label='max', linewidth=2)
ax_int.plot(sc_res.z_pos, sc_res.phdens_onaxis, 'grey', label='on-axis')
ax_int.set_xlabel('z [m]')
ax_int.set_ylabel('photon density [arb.units]')
ax_int.legend(loc='lower left')
ax_size = ax_int.twinx()
ax_size.plot(sc_res.z_pos, sc_res.fwhm_x * 1e6, 'g--', label='fwhm_x')
ax_size.plot(sc_res.z_pos, sc_res.fwhm_y * 1e6, 'b--', label='fwhm_y')
ax_size.plot(sc_res.z_pos, sc_res.std_x * 1e6, 'g:', label='std_x')
ax_size.plot(sc_res.z_pos, sc_res.std_y * 1e6, 'b:', label='std_y')
ax_size.set_ylabel('size [um]')
ax_size.legend(loc='lower right')
plt.draw()
if savefig != False:
if savefig == True:
savefig = 'png'
_logger.debug(ind_str + 'saving *.' + savefig)
_logger.debug(ind_str + 'to ' + sc_res.filePath + '_{:.2f}m-{:.2f}m-waistscan.'.format(sc_res.z_pos[0],
sc_res.z_pos[-1]) + str(
savefig))
fig.savefig(
sc_res.filePath + '_{:.2f}m-{:.2f}m-waistscan.'.format(sc_res.z_pos[0], sc_res.z_pos[-1]) + str(savefig),
format=savefig)
if showfig:
_logger.debug(ind_str + 'showing fig')
plt.show()
else:
plt.close('all')
@if_plottable
def plot_trf(trf, mode='tr', autoscale=0, showfig=True, savefig=None, fig_name=None):
"""
plots TransferFunction() object,
mode:
'tr' - transmission
'ref' - reflection
autoscale = scale down to several FWHMma in frequency and several bumps in time
showfig - display on screen or not
savefig - path to save png (if any)
"""
n_width = 8
l = len(trf.k)
L = 2 * pi / (trf.k[1] - trf.k[0])
trf_s_td = np.linspace(0, -L, l) * 1e6
trf_s_fd = trf.ev()
# trf_s_fd = trf.k
if autoscale:
trf_s_fd_xlim = np.array([trf.mid_k - n_width * trf.dk, trf.mid_k + n_width * trf.dk])
trf_s_fd_xlim = h_eV_s * speed_of_light / (2 * pi / trf_s_fd_xlim)
trf_s_fd_xlim = np.sort(trf_s_fd_xlim)
if mode == 'tr':
trf_fd = deepcopy(trf.tr)
elif mode == 'ref':
trf_fd = deepcopy(trf.ref)
else:
raise ValueError('mode argument should be "tr" or "ref"')
trf_fd_tmp = trf_fd / (abs(trf_s_td[-1]) / l)
trf_td = np.fft.ifft(np.fft.fftshift(trf_fd_tmp))
trf_td = abs(trf_td) ** 2
del trf_fd_tmp
if hasattr(trf, 'cryst'):
title = trf.cryst.lattice.element_name + ' ' + str(trf.cryst.ref_idx) + ' ' + mode
else:
title = ''
if fig_name is None:
trf_fig = plt.figure('Filter ' + title)
else:
trf_fig = plt.figure(fig_name)
trf_fig.set_size_inches((9, 11), forward=True)
if title != '':
trf_fig.suptitle(title)
ax_fd_abs = trf_fig.add_subplot(3, 1, 1)
ax_fd_abs.clear()
ax_fd_ang = trf_fig.add_subplot(3, 1, 2, sharex=ax_fd_abs)
ax_fd_ang.clear()
ax_td = trf_fig.add_subplot(3, 1, 3)
ax_td.clear()
trf_fig.subplots_adjust(hspace=0)
trf_fig.subplots_adjust(top=0.95, bottom=0.2, right=0.85, left=0.15)
ax_fd_abs.plot(trf_s_fd, np.abs(trf_fd) ** 2, 'k')
ax_fd_ang.plot(trf_s_fd, np.angle(trf_fd), 'g')
ax_td.semilogy(trf_s_td, trf_td)
ax_fd_abs.set_ylabel(r'|amplitude|$^2$')
ax_fd_ang.set_ylabel('phase')
ax_fd_ang.set_xlabel('ph.energy')
ax_td.set_ylabel('impulse responce (power)')
ax_td.set_xlabel('s [um]')
ax_fd_abs.axis('tight')
ax_fd_abs.set_ylim([0, 1])
ax_fd_ang.set_ylim([-np.pi, np.pi])
if autoscale:
ax_fd_abs.set_xlim(trf_s_fd_xlim)
if autoscale:
ax_td.set_xlim(-n_width * pi / trf.dk * 1e6, 0)
idx = np.argwhere(trf_s_td > -n_width * pi / trf.dk * 1e6)[-1]
ax_td.set_ylim(np.amin(trf_td[1:idx]), np.amax(trf_td[1:idx]))
ax_fd_abs.grid(True)
ax_fd_ang.grid(True)
ax_td.grid(True)
for label in ax_fd_abs.get_xticklabels():
label.set_visible(False)
# ax_td.axis('tight')
pos1 = ax_td.get_position() # get the original position
pos2 = [pos1.x0 + 0, pos1.y0 - 0.1, pos1.width / 1.0, pos1.height / 0.9]
ax_td.set_position(pos2)
plt.draw()
if savefig != None and savefig.__class__ == str:
trf_fig.savefig(savefig, format='png')
# if savefig == True:
# savefig = 'png'
# fig.savefig(g.filePath + '_z_' + str(z) + 'm.' + str(savefig), format=savefig)
if showfig:
plt.show()
else:
plt.close('all')
@if_plottable
def plot_stokes_values(S, fig=None, d_pol=0, norm=0, showfig=True, gw=1, direction='z', plot_func='step', **kwargs):
# if type(S) != StokesParameters:
# raise ValueError('Not a StokesParameters object')
if direction == 'z':
sc = S.sc_z * 1e6
Scp = S[:, 0, 0] ##### tbd: calculate middle?
elif direction == 'x':
sc = S.sc_x * 1e6
Scp = S[0, 0, :]
elif direction == 'y':
sc = S.sc_y * 1e6
Scp = S[0, :, 0]
if np.size(sc) <= 1:
_logger.warning('plot_stokes_values needs more than a single point to plot (np.size(sc) <= 1)')
return
if d_pol != 0:
gw = 0
norm = 1
if fig == None:
plt.figure('Stokes S')
plt.clf()
elif type(fig) == matplotlib.figure.Figure:
plt.figure(fig.number)
else:
plt.figure(fig)
plt.clf()
if gw:
mult = 1e-9
plt.ylabel('$S_0$ [GW]')
elif norm:
mult = 1 / np.amax(Scp.s0)
else:
mult = 1
plt.ylabel('$S_0$ [W]')
plt.xlabel('s [$\mu$m]')
kwargs = {'linewidth': 2}
if plot_func == 'step':
plot_function = plt.step
kwargs['where'] = 'mid'
elif plot_func == 'line':
plot_function = plt.plot
else:
raise ValueError
if d_pol == 'lin':
# plt.step(sc, np.sqrt(S.s1**2+S.s2**2), linewidth=2, where='mid',color=[0.5,0.5,0.5], linestyle='--')
plot_function(sc, Scp.deg_pol_l(), linestyle='-', color='#1f77b4', **kwargs)
elif d_pol == 1:
plot_function(sc, Scp.deg_pol(), linestyle='-', color='#1f77b4', **kwargs)
else:
pass
plot_function(sc, Scp.s1 * mult, color='g', **kwargs)
plot_function(sc, Scp.s2 * mult, color='r', **kwargs)
plot_function(sc, Scp.s3 * mult, color='c', **kwargs)
plot_function(sc, Scp.s0 * mult, color='b', **kwargs)
# plt.step(sc, S.s1, linewidth=2, where='mid',color='m')
# plt.step(sc, S.s2, linewidth=2, where='mid',color='r')
# plt.step(sc, S.s3, linewidth=2, where='mid',color='c')
# plt.step(sc, S.s0, linewidth=2, where='mid',color='k')
if d_pol == 'lin':
plt.legend(['$D_{lin}$', '$S_1$', '$S_2$', '$S_3$', '$S_0$'], loc='lower center', ncol=5,
mode="expand", borderaxespad=0.5, frameon=1).get_frame().set_alpha(0.4)
elif d_pol == 1:
plt.legend(['$D_{pol}$', '$S_1$', '$S_2$', '$S_3$', '$S_0$'], loc='lower center', ncol=5,
mode="expand", borderaxespad=0.5, frameon=1).get_frame().set_alpha(0.4)
else:
plt.legend(['$S_1$', '$S_2$', '$S_3$', '$S_0$'], fontsize=13, ncol=4, loc='upper left',
frameon=1).get_frame().set_alpha(0.4)
# plt.legend(['$S_1$','$S_2$','$S_3$','$S_0$'], loc='lower center', ncol=5, mode="expand", borderaxespad=0.5, frameon=1).get_frame().set_alpha(0.4)
plt.xlim([np.amin(sc), np.amax(sc)])
if norm:
plt.ylim([-1, 1])
plt.draw()
if showfig:
plt.show()
else:
plt.close('all')
@if_plottable
def plot_stokes_angles(S, fig=None, showfig=True, direction='z', plot_func='scatter'):
# if type(S) != StokesParameters:
# raise ValueError('Not a StokesParameters object')
if direction == 'z':
sc = S.sc_z * 1e6
Scp = S[:, 0, 0]
elif direction == 'x':
sc = S.sc_x * 1e6
Scp = S[0, 0, :]
elif direction == 'y':
sc = S.sc_y * 1e6
Scp = S[0, :, 0]
# sc = S.sc * 1e6
if np.size(sc) <= 1:
_logger.warning('plot_stokes_angles needs more than a single point to plot (np.size(sc) <= 1)')
return
if fig == None:
plt.figure('Stokes angles')
plt.clf()
else:
plt.figure(fig.number)
plt.clf()
kwargs = {'linewidth': 2}
if plot_func == 'scatter':
psize = Scp.deg_pol()
kwargs['s'] = psize
plot_function = plt.scatter
elif plot_func == 'step':
plot_function = plt.step
kwargs['where'] = 'mid'
elif plot_func == 'line':
plot_function = plt.plot
else:
raise ValueError
# plt.step(sc, S.chi(), sc, S.psi(),linewidth=2)
plot_function(sc, Scp.psi(), color='b', **kwargs)
# if plot_func == 'scatter':
# kwargs['s'] = psize
plot_function(sc, Scp.chi(), color='g', **kwargs)
# if scatter:
# psize = Scp.P_pol()
# psize /= np.amax(psize)
# plt.scatter(sc, Scp.chi(),psize,linewidth=2,color='g')
# plt.scatter(sc, Scp.psi(),psize,linewidth=2,color='b')
# else:
# plt.step(sc, Scp.chi(), linewidth=2, where='mid', color='g')
# plt.step(sc, Scp.psi(), linewidth=2, where='mid', color='b')
plt.legend(['$\chi$', '$\psi$']) # ,loc='best')
plt.xlabel('s [$\mu$m]')
plt.ylabel('[rad]')
plt.ylim([-np.pi / 2, np.pi / 2])
plt.xlim([np.amin(sc), np.amax(sc)])
plt.draw()
if showfig:
plt.show()
else:
plt.close('all')
@if_plottable
def plot_stokes_3d(stk_params, x_plane='max_slice', y_plane='max_slice', z_plane='max_slice', interpolation=None,
cmap_lin='brightwheel', cmap_circ='bwr', figsize=4, fig_name='Visualization Stokes parameters',
normalization='s0_max', cbars=True, savefig=False, showfig=True, text_present=True, **kwargs):
"""
Plot 6 images with normalized Stokes parameters on them
:param stk_params: 3d ocelot.optics.wave.StokesParameters() type object
:param x_plane: this variable responds on which value on x-axis the 3d stk_params will intersect.
It can take 3 different recognition:
'max_slice': the intersection of 3d stk_params will contain the max value s0 in stk_params
'proj': at the third subplot will be shown the projection of 3d stk_params in x direction
<number> in [m]: the position of intersection on x-axis
:param y_plane: this variable responds on which value on y-axis the 3d stk_params will intersect.
It can take 3 different recognition:
'max_slice': the intersection of 3d stk_params will contain the max value s0 in stk_params
'proj': at the third subplot will be shown the projection of 3d stk_params in y direction
<number> in [m]: the position of intersection on y-axis
:param z_plane: this variable responds on which value on z-axis the 3d stk_params will intersect.
It can take 3 different recognition:
'max_slice': the intersection of 3d stk_params will contain the max value s0 in stk_params
'proj': at the third subplot will be shown the projection of 3d stk_params in z direction
<number> in [m]: the position of intersection on z-axis
:param interpolation: str type variable wich responds for interpolation before plotting linear polarized part
:param cmap_lin: numpy array with shape (nwidth, nheight, 4) that contains the 4 rgba values in hue (width)
and lightness (height).
Can be obtained by a call to get_cmap2d(name).
or:
name where name is one of the following strings:
'brightwheel', 'darkwheel', 'hardwheel', 'newwheel',
'smoothwheel', 'wheel'
:param cmap_circ:--------------------
:param figsize: size of the figure
:param fig_name: name of the figure
:param cbars: bool type variable which responds for showing of colorbars
:param savefig: bool type variable which responds for saving of the figure
:param showfig: bool type variable which responds for showing of the figure
:param text_present: bool type variable which responds for showing text on subplots
:param kwargs:
"""
if showfig == False and savefig == False:
return
_logger.info('plotting stokes parameters')
start_time = time.time()
cbax1_dir = kwargs.pop('cbax1_dir', 1)
ny_plots = 2
# Plotting data
fig = plt.figure(fig_name)
fig.clf()
fig.set_size_inches((5 * figsize, 3 * figsize), forward=True)
z, y, x = stk_params.s0.shape
ax1 = fig.add_subplot(ny_plots, 3, 1)
linear_plt = plot_stokes_sbfg_lin(ax1, stk_params, slice=z_plane, plane='z', cmap2d=cmap_lin, plot_title=None,
x_label='x', y_label='y', text_present=text_present, interpolation=interpolation,
normalization=normalization, result=1, **kwargs)
ax2 = fig.add_subplot(ny_plots, 3, 2, sharey=ax1)
plot_stokes_sbfg_lin(ax2, stk_params, slice=x_plane, plane='x', cmap2d=cmap_lin, plot_title='Linear polarization',
x_label='z', y_label='y', text_present=text_present, interpolation=interpolation,
normalization=normalization, **kwargs)
ax3 = fig.add_subplot(ny_plots, 3, 3, sharex=ax2)
plot_stokes_sbfg_lin(ax3, stk_params, slice=y_plane, plane='y', cmap2d=cmap_lin, plot_title=None, x_label='z',
y_label='x', text_present=text_present, interpolation=interpolation,
normalization=normalization, **kwargs)
ax4 = fig.add_subplot(ny_plots, 3, 4, sharex=ax1, sharey=ax1)
circular_plt = plot_stokes_sbfg_circ(ax4, stk_params, slice=z_plane, plane='z', cmap=cmap_circ, plot_title=None,
x_label='x', y_label='y', text_present=text_present, result=1,
interpolation=interpolation, normalization=normalization, **kwargs)
ax5 = fig.add_subplot(ny_plots, 3, 5, sharex=ax2, sharey=ax2)
plot_stokes_sbfg_circ(ax5, stk_params, slice=x_plane, plane='x', cmap=cmap_circ, plot_title='Circular polarization',
x_label='z', y_label='y', text_present=text_present, interpolation=interpolation,
normalization=normalization, **kwargs)
ax6 = fig.add_subplot(ny_plots, 3, 6, sharex=ax3, sharey=ax3)
plot_stokes_sbfg_circ(ax6, stk_params, slice=y_plane, plane='y', cmap=cmap_circ, plot_title=None, x_label='z',
y_label='x', text_present=text_present, interpolation=interpolation,
normalization=normalization, **kwargs)
if cbars:
cbax1 = fig.add_axes([0.91, 0.56, 0.04, 0.321])
if cbax1_dir == 1:
ph = np.ones((100, 100)) * np.linspace(1, -1, 100)[:, np.newaxis]
I = np.ones((100, 100)) * np.linspace(0, 1, 100)[np.newaxis, :]
imshow2d(np.array([ph, I]), ax=cbax1, cmap2d=cmap_lin, huevmin=-1, huevmax=1, lightvmin=0, lightvmax=1,
extent=[0, 1, np.pi / 2, -np.pi / 2], aspect='auto')
plt.yticks(np.linspace(np.pi / 2, -np.pi / 2, 3),
['$-\pi/2$', '0', '$\pi/2$']) # ['0','$\pi/2$','$\pi$','$3\pi/2$','$2\pi$']
cbax1.yaxis.set_label_position("right")
cbax1.yaxis.tick_right()
cbax1.set_ylabel('$\psi$')
if normalization == 's0':
cbax1.set_xlabel('$ \sqrt{S_1^2+S_2^2} / S_0$')
elif normalization == 's0_max':
cbax1.set_xlabel('$ \sqrt{S_1^2+S_2^2} / max(S_0)$')
else:
cbax1.set_xlabel('$ \sqrt{S_1^2+S_2^2}$')
cbax1.tick_params(axis='both', which='major', labelsize=10)
else:
ph = np.ones((100, 100)) * np.linspace(1, -1, 100)[np.newaxis, :]
I = np.ones((100, 100)) * np.linspace(0, 1, 100)[:, np.newaxis]
imshow2d(np.array([ph, I]), ax=cbax1, cmap2d=cmap_lin, huevmin=-1, huevmax=1, lightvmin=0, lightvmax=1,
extent=[np.pi / 2, -np.pi / 2, 1, 0], aspect='auto')
plt.xticks(np.linspace(-np.pi / 2, np.pi / 2, 3),
['$-\pi/2$', '0', '$\pi/2$']) # ['0','$\pi/2$','$\pi$','$3\pi/2$','$2\pi$']
cbax1.yaxis.set_label_position("right")
cbax1.yaxis.tick_right()
cbax1.set_xlabel('$\psi$')
if normalization == 's0':
cbax1.set_ylabel('$ \sqrt{S_1^2+S_2^2} / S_0$')
elif normalization == 's0_max':
cbax1.set_ylabel('$ \sqrt{S_1^2+S_2^2} / max(S_0)$')
else:
cbax1.set_ylabel('$ \sqrt{S_1^2+S_2^2}$')
cbax1.tick_params(axis='both', which='major', labelsize=10)
cbax2 = fig.add_axes([0.91, 0.11, 0.04, 0.321]) # This is the position for the colorbar [x, y, width, height]
cbar_circ_im = plt.colorbar(circular_plt, cax=cbax2)
if normalization == 's0':
cbax2.set_ylabel('$S_3 / S_0$')
elif normalization == 's0_max':
cbax2.set_ylabel('$S_3 / max(S_0)$')
else:
cbax2.set_ylabel('S3')
cbax2.tick_params(axis='both', which='major', labelsize=10)
fig.subplots_adjust(wspace=0.4, hspace=0.4)
_logger.info(ind_str + 'done in {:.2f} seconds'.format(time.time() - start_time))
plt.draw()
if savefig != False:
if savefig == True:
savefig = 'png'
_logger.debug(ind_str + 'saving figure')
fig.savefig(savefig)
if showfig:
_logger.debug(ind_str + 'showing Stokes Parameters')
plt.show()
else:
plt.close('all')
@if_plottable
def plot_stokes_sbfg_lin(ax, stk_params, slice, plane, cmap2d='brightwheel', plot_title=None, x_label='', y_label='',
result=0, text_present=True, interpolation=None, normalization='s0_max', **kwargs):
"""
Plot normalized intensity and angle of the linear polarization of the light
:param ax: matplotlib.pyplot.AxesSubplot on which the data will be plotted
:param stk_params: 3d ocelot.optics.wave.StokesParameters() type object
:param plane: the direction in which the projection/intersection of {stk_params} will be done
:param slice: this variable responds on which value on {plane} direction the 3d stk_params will intersect.
It can take 3 different recognition:
'max_slice': the intersection of 3d stk_params will contain the max value s0 in stk_params
'proj': at the third subplot will be shown the projection of 3d stk_params in {plane} direction
<number> in [m]: the position of intersection on {plane} direction
:param cmap2d: numpy array with shape (nwidth, nheight, 4) that contains the 4 rgba values in hue (width)
and lightness (height).
Can be obtained by a call to get_cmap2d(name).
or:
name where name is one of the following strings:
'brightwheel', 'darkwheel', 'hardwheel', 'newwheel',
'smoothwheel', 'wheel'
:param plot_title: title of the plot
:param x_label: label of the x axis
:param y_label: label of the y axis
:param result: a bool type variable; if bool == True the function will return linear_plt of AxesImage type
:param text_present: bool type variable which responds for showing text on subplots
:param interpolation: str type variable wich responds for interpolation before plotting linear polarized part
:param kwargs:
:return:
"""
# Getting intersections of stk_params for ploting data
z_max, y_max, x_max = np.unravel_index(stk_params.s0.argmax(), stk_params.s0.shape) # getting max element position
if plane in ['x', 2]:
swap_axes = True
extent = [stk_params.sc_z[0] * 1e6, stk_params.sc_z[-1] * 1e6, stk_params.sc_y[0] * 1e6,
stk_params.sc_y[-1] * 1e6]
if slice == 'max_slice':
stk_params_plane = stk_params.slice_2d_idx(x_max, plane=plane)[:, :, 0]
slice_pos = stk_params.sc_x[x_max]
elif slice == 'proj':
stk_params_plane = stk_params.proj(plane=plane, mode='mean')[:, :, 0]
slice_pos = 0
else:
slice_pos = find_nearest_idx(stk_params.sc_x, slice)
stk_params_plane = stk_params.slice_2d_idx(slice_pos, plane=plane)[:, :, 0]
slice_pos = stk_params.sc_x[slice_pos]
elif plane in ['y', 1]:
swap_axes = True
extent = [stk_params.sc_z[0] * 1e6, stk_params.sc_z[-1] * 1e6, stk_params.sc_x[0] * 1e6,
stk_params.sc_x[-1] * 1e6]
if slice == 'max_slice':
stk_params_plane = stk_params.slice_2d_idx(y_max, plane=plane)[:, 0, :]
slice_pos = stk_params.sc_y[y_max]
elif slice == 'proj':
stk_params_plane = stk_params.proj(plane=plane, mode='mean')[:, 0, :]
slice_pos = 0
else:
slice_pos = find_nearest_idx(stk_params.sc_y, slice)
stk_params_plane = stk_params.slice_2d_idx(slice_pos, plane=plane)[:, 0, :]
slice_pos = stk_params.sc_y[slice_pos]
elif plane in ['z', 0]:
swap_axes = False
extent = [stk_params.sc_x[0] * 1e6, stk_params.sc_x[-1] * 1e6, stk_params.sc_y[0] * 1e6,
stk_params.sc_y[-1] * 1e6]
if slice == 'max_slice':
stk_params_plane = stk_params.slice_2d_idx(z_max, plane=plane)[0, :, :]
slice_pos = stk_params.sc_z[z_max]
elif slice == 'proj':
stk_params_plane = stk_params.proj(plane=plane, mode='mean')[0, :, :]
slice_pos = 0
else:
slice_pos = find_nearest_idx(stk_params.sc_z, slice)
stk_params_plane = stk_params.slice_2d_idx(slice_pos, plane=plane)[0, :, :]
slice_pos = stk_params.sc_z[slice_pos]
else:
_logger.error(ind_str + 'argument "plane" should be in ["x","y","z",0,1,2]')
raise ValueError('argument "plane" should be in ["x","y","z",0,1,2]')
# Normalization
if normalization is None:
norm = 1
elif normalization == 's0':
norm = stk_params_plane.s0
elif normalization == 's0_max':
norm = np.amax(stk_params.s0)
else:
raise ValueError('"normalization" should be in [None, "s0", "s0_max"]')
if swap_axes:
lin_pol_plane = np.swapaxes((stk_params_plane.P_pol_l() / norm), 0, 1)
psi_plane = np.swapaxes((2 * stk_params_plane.psi() / np.pi), 0, 1)
else:
lin_pol_plane = stk_params_plane.P_pol_l() / norm
psi_plane = 2 * stk_params_plane.psi() / np.pi
m, n = psi_plane.shape
linear_plt = imshow2d(np.array([psi_plane, lin_pol_plane]), ax=ax, cmap2d=cmap2d, extent=extent,
interpolation=interpolation, aspect='auto', huevmin=-1, huevmax=1, lightvmin=0, lightvmax=1,
origin='lower', **kwargs)
if plot_title is not None:
ax.set_title(plot_title, fontsize=15)
ax.set_xlabel(x_label + ' [$\mu$m]')
ax.set_ylabel(y_label + ' [$\mu$m]')
if text_present:
# print(plane, slice_pos*1e6)
dic = {'proj': 'projection', 'max_slice': 'slice at {:}={:.3f} $\mu$m'}
ax.text(0.97, 0.97, dic.get(slice, 'slice at {:}={:.3f} $\mu$m (max int)').format(plane, slice_pos * 1e6),
horizontalalignment='right',
verticalalignment='top', transform=ax.transAxes, fontsize=10)
if result:
return linear_plt
@if_plottable
def plot_stokes_sbfg_circ(ax, stk_params, slice, plane, cmap='seismic', plot_title=None, x_label='', y_label='',
result=0, text_present=True, interpolation=None, normalization='s0_max', **kwargs):
"""
Plot normalized Stokes parameter S3
:param ax: matplotlib.pyplot.AxesSubplot on which the data will be plotted
:param stk_params: 3d ocelot.optics.wave.StokesParameters() type object
:param plane: the direction in which the projection/intersection of {stk_params} will be done
:param slice: this variable responds on which value on {plane} direction the 3d stk_params will intersect.
It can take 3 different recognition:
'max_slice': the intersection of 3d stk_params will contain the max value s0 in stk_params
'proj': at the third subplot will be shown the projection of 3d stk_params in {plane} direction
<number> in [m]: the position of intersection on {plane} direction
:param cmap: colormap which will be used for plotting data
:param plot_title: title of the plot
:param x_label: label of the x axis
:param y_label: label of the y axis
:param result: a bool type variable; if bool == True the function will return linear_plt of AxesImage type
:param text_present: bool type variable which responds for showing text on subplots
:param interpolation: str type variable wich responds for interpolation before plotting linear polarized part
:param kwargs:
:return:
"""
# Getting intersections of stk_params for ploting data
z_max, y_max, x_max = np.unravel_index(stk_params.s0.argmax(), stk_params.s0.shape) # getting max element position
if plane in ['x', 2]:
swap_axes = True
extent = [stk_params.sc_z[0] * 1e6, stk_params.sc_z[-1] * 1e6, stk_params.sc_y[0] * 1e6,
stk_params.sc_y[-1] * 1e6]
if slice == 'max_slice':
stk_params_plane = stk_params.slice_2d_idx(x_max, plane=plane)[:, :, 0]
slice_pos = stk_params.sc_x[x_max]
elif slice == 'proj':
stk_params_plane = stk_params.proj(plane=plane, mode='mean')[:, :, 0]
slice_pos = 0
else:
slice_pos = find_nearest_idx(stk_params.sc_x, slice)
stk_params_plane = stk_params.slice_2d_idx(slice_pos, plane=plane)[:, :, 0]
slice_pos = stk_params.sc_x[slice_pos]
elif plane in ['y', 1]:
swap_axes = True
extent = [stk_params.sc_z[0] * 1e6, stk_params.sc_z[-1] * 1e6, stk_params.sc_x[0] * 1e6,
stk_params.sc_x[-1] * 1e6]
if slice == 'max_slice':
stk_params_plane = stk_params.slice_2d_idx(y_max, plane=plane)[:, 0, :]
slice_pos = stk_params.sc_y[y_max]
elif slice == 'proj':
stk_params_plane = stk_params.proj(plane=plane, mode='mean')[:, 0, :]
slice_pos = 0
else:
slice_pos = find_nearest_idx(stk_params.sc_y, slice)
stk_params_plane = stk_params.slice_2d_idx(slice_pos, plane=plane)[:, 0, :]
slice_pos = stk_params.sc_y[slice_pos]
elif plane in ['z', 0]:
swap_axes = False
extent = [stk_params.sc_x[0] * 1e6, stk_params.sc_x[-1] * 1e6, stk_params.sc_y[0] * 1e6,
stk_params.sc_y[-1] * 1e6]
if slice == 'max_slice':
stk_params_plane = stk_params.slice_2d_idx(z_max, plane=plane)[0, :, :]
slice_pos = stk_params.sc_z[z_max]
elif slice == 'proj':
stk_params_plane = stk_params.proj(plane=plane, mode='mean')[0, :, :]
slice_pos = 0
else:
slice_pos = find_nearest_idx(stk_params.sc_z, slice)
stk_params_plane = stk_params.slice_2d_idx(slice_pos, plane=plane)[0, :, :]
slice_pos = stk_params.sc_z[slice_pos]
else:
_logger.error(ind_str + 'argument "plane" should be in ["x","y","z",0,1,2]')
raise ValueError('argument "plane" should be in ["x","y","z",0,1,2]')
# Normalization
if normalization is None:
norm = 1
elif normalization == 's0':
norm = stk_params_plane.s0
elif normalization == 's0_max':
norm = np.amax(stk_params.s0)
else:
raise ValueError('"normalization" should be in [None, "s0", "s0_max"]')
if swap_axes:
s3_plane = np.swapaxes((stk_params_plane.s3 / norm), 0, 1)
else:
s3_plane = stk_params_plane.s3 / norm
m, n = s3_plane.shape
circular_plt = ax.imshow(s3_plane, cmap=cmap, vmin=-1, vmax=1, interpolation=interpolation, aspect='auto',
extent=extent, origin='lower', **kwargs)
if plot_title is not None:
ax.set_title(plot_title, fontsize=15)
ax.set_xlabel(x_label + ' [$\mu$m]')
ax.set_ylabel(y_label + ' [$\mu$m]')
if text_present:
dic = {'proj': 'projection', 'max_slice': 'slice at {:}={:.3f} $\mu$m (max int)'}
ax.text(0.97, 0.97, dic.get(slice, 'slice at {:}={:.3f} $\mu$m').format(plane, slice_pos * 1e6),
horizontalalignment='right',
verticalalignment='top', transform=ax.transAxes, fontsize=10)
if result:
return circular_plt
def plot_hprofile(*args, **kwargs):
plot_1d_hprofile(*args, **kwargs)
def plot_1d_hprofile(height_profile, figsize=4, fig_name='Height profile', savefig=False, showfig=True,
**kwargs):
"""
This function plotting the height map and PSD of HeightProfile object
:param height_profile: HeightProfile object from ocelot
:param figsize: size of figure
:param fig_name: name of figure
:param savefig: bool type flag, responding for saving figure
:param showfig: bool type flag, responding for showing figure
:param kwargs:
"""
if (showfig == False) and (savefig == False):
return
_logger.info('plotting height_profile')
_logger.warning(ind_str + 'in beta')
start_time = time.time()
# Plotting data
fig = plt.figure(fig_name)
# fig.canvas.set_window_title(fig_name)
fig.clf()
fig.set_size_inches((4 * figsize, 1.5 * figsize), forward=True)
ax1 = fig.add_subplot(1, 2, 1)
ax1.plot(height_profile.s * 1000, height_profile.h, **kwargs)
ax1.set_title('height errors', fontsize=15)
ax1.set_xlabel('s [mm]')
ax1.set_ylabel('height [m]')
ax2 = fig.add_subplot(1, 2, 2)
ax2.loglog(*height_profile.psd(), marker='*', **kwargs)
ax2.set_title('power spectral density', fontsize=15)
ax2.set_xlabel('k [1/m]')
ax2.set_ylabel('PSD [m^3]')
ax2.text(0.97, 0.97, 'surface height errors RMS = {0:.02e} [m]'.format(height_profile.hrms()),
horizontalalignment='right',
verticalalignment='top', transform=ax2.transAxes, fontsize=12)
fig.subplots_adjust(wspace=0.4, hspace=0.4)
plt.draw()
_logger.debug(ind_str + 'done in {:.2f} seconds'.format(time.time() - start_time))
if savefig != False:
if savefig == True:
savefig = 'png'
_logger.debug(ind_str + 'saving figure')
fig.savefig(fig_name + '.' + savefig)
if showfig:
_logger.debug('showing HeightProfile')
plt.show()
else:
plt.close('all')
| ocelot-collab/ocelot | ocelot/gui/dfl_plot.py | Python | gpl-3.0 | 61,275 |
# Copyright (C) 2002-2007 Python Software Foundation
# Contact: email-sig@python.org
"""Email address parsing code.
Lifted directly from rfc822.py. This should eventually be rewritten.
"""
__all__ = [
'mktime_tz',
'parsedate',
'parsedate_tz',
'quote',
]
import time
SPACE = ' '
EMPTYSTRING = ''
COMMASPACE = ', '
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
data = data.split()
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(',') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
tzoffset = None
tz = tz.upper()
if _timezones.has_key(tz):
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if isinstance(t, tuple):
return t[:9]
else:
return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a UTC timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = time.mktime(data[:8] + (0,))
return t - data[9] - time.timezone
def quote(str):
"""Add quotes around a string."""
return str.replace('\\', '\\\\').replace('"', '\\"')
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of RFC 2822 in
front of you.
Note: this class interface is deprecated and may be removed in the future.
Use rfc822.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.FWS = self.LWS + self.CR
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
def gotonext(self):
"""Parse up to the start of the next address."""
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else:
break
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
while self.pos < len(self.field):
ad = self.getaddress()
if ad:
result += ad
else:
result.append(('', ''))
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(SPACE.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(SPACE.join(plist) + ' (' +
' '.join(self.commentlist) + ')', routeaddr)]
else:
returnlist = [(SPACE.join(plist), routeaddr)]
else:
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = False
self.pos += 1
self.gotonext()
adlist = ''
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = False
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = True
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
if self.field[self.pos] == '.':
aslist.append('.')
self.pos += 1
elif self.field[self.pos] == '"':
aslist.append('"%s"' % self.getquote())
elif self.field[self.pos] in self.atomends:
break
else:
aslist.append(self.getatom())
self.gotonext()
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return EMPTYSTRING.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return EMPTYSTRING.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else:
sdlist.append(self.getatom())
return EMPTYSTRING.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments=True):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment.
If self is not looking at an instance of `beginchar' then
getdelimited returns the empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = False
self.pos += 1
while self.pos < len(self.field):
if quote:
slist.append(self.field[self.pos])
quote = False
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == '\\':
quote = True
else:
slist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', False)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', True)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', False)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.FWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
| ericlink/adms-server | playframework-dist/play-1.1/python/Lib/email/_parseaddr.py | Python | mit | 15,467 |
#MenuTitle: Export Open Instances to InDesign 1.0
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Export all open instances in OTF in Indesign font's folder.
"""
import os, glob, shutil
from os.path import expanduser
# path to the folder where all files will be saved
home = expanduser("~")
desktop = '/Desktop/'
# folder = font.familyName
folder = 'exported_from_glyphs'
path = home+desktop+folder
indesign_folder = r'/Applications/Adobe InDesign CC 2015/Fonts/'
print(folder)
# check if the folder already exists. If not, create the folder
if os.path.exists(path):
print('The %s already exists in %s' %(folder, path))
else:
os.mkdir(path)
print('Folder %s created in %s' %(folder, path))
# remove all files before save the new ones
all_files = glob.glob(path+'/*')
for files in all_files:
os.remove(files)
# check for all open fonts and export all instances to the created folder
for font in Glyphs.fonts:
for instance in font.instances:
instance.generate(FontPath = path)
Glyphs.showNotification('Export fonts', 'The fonts were exported successfully.')
# change the all_files variable to indesign path
all_files = glob.glob(indesign_folder+folder+'/*')
# remove folder in InDesign path, if exists. Else, just move the folder from the desktop to InDesign font's folder
if os.path.exists(indesign_folder+folder):
for files in all_files:
os.remove(files)
os.rmdir(indesign_folder+folder)
shutil.move(path, indesign_folder)
else:
shutil.move(path, indesign_folder)
| filipenegrao/glyphsapp-scripts | old_stuff/exportAllOpenInstances2Indesign.py | Python | apache-2.0 | 1,545 |
import colorsys
import random
import math
import kmath
class KColor:
def __init__(self, red, green, blue, alpha):
self.red = red
self.green = green
self.blue = blue
self.alpha = alpha
def normalize(self):
m = max(self.red, self.green, self.blue)
self.red /= m
self.green /= m
self.blue /= m
def to_hsv(self):
return colorsys.rgb_to_hsv(self.red, self.green, self.blue)
def hsv_to_rgba(hsv):
rgb = colorsys.hsv_to_rgb(hsv[0], hsv[1], hsv[2])
return [rgb[0], rgb[1], rgb[2], 1.0]
def rgba_to_hsv(c):
def lerp_rgba(ca, cb, n_steps):
r = []
scale = 1/(n_steps-1)
for i in range(0, n_steps):
t = i*scale
c = [0.0, 0.0, 0.0, 0.0]
c[0] = kmath.lerp(ca[0], cb[0], t)
c[1] = kmath.lerp(ca[1], cb[1], t)
c[2] = kmath.lerp(ca[2], cb[2], t)
c[3] = kmath.lerp(ca[3], cb[3], t)
return r
def blackbody_spd(temperature):
def evaluate(wl: float):
wlm = wl * 1.0E-9
wl5m = wlm * wlm
wl5m *= wl5m
wl5m *= wlm
two_hcc2 = 1.19104259E-16
hcok = 0.0143877506
ef = hcok / (wlm * temperature)
return two_hcc2 / (wl5m * (math.exp(ef) - 1.0))
return evaluate
# http://jcgt.org/published/0002/02/01/
def xyz_for_wavelength(wl):
x1 = (wl-442.0)*(0.0624 if (wl < 442.0) else 0.0374)
x2 = (wl-599.8)*(0.0264 if (wl < 599.8) else 0.0323)
x3 = (wl-501.1)*(0.0490 if (wl < 501.1) else 0.0382)
x = 0.362*math.exp(-0.5*x1*x1) + 1.056*math.exp(-0.5*x2*x2) - 0.065*math.exp(-0.5*x3*x3)
y1 = (wl-568.8)*(0.0213 if (wl < 568.8) else 0.0247)
y2 = (wl-530.9)*(0.0613 if (wl < 530.9) else 0.0322)
y = 0.821*math.exp(-0.5*y1*y1) + 0.286*math.exp(-0.5*y2*y2)
z1 = (wl-437.0)*(0.0845 if (wl < 437.0) else 0.0278)
z2 = (wl-459.0)*(0.0385 if (wl < 459.0) else 0.0725)
z = 1.217*math.exp(-0.5*z1*z1) + 0.681*math.exp(-0.5*z2*z2)
return [x, y, z]
# sRGB color space
def xyz_to_rgba(c):
r = c[0]*3.240479 + c[1]*-1.537150 + c[2]*-0.498535
g = c[0]*-0.969256 + c[1]*1.875991 + c[2]*0.041556
b = c[0]*0.055648 + c[1]*-0.204043 + c[2]*1.057311
return [r, g, b, 1.0]
def rgba_to_xyz(c):
x = c[0]*0.412453 + c[1]*0.357580 + c[2]*0.180423
y = c[0]*0.212671 + c[1]*0.715160 + c[2]*0.072169
z = c[0]*0.019334 + c[1]*0.119193 + c[2]*0.950227
return [x, y, z]
def SPDtoRGB(spd: SPD):
xyz = XYZColor(0,0,0)
for wl in range(400,700):
xyz += spd.evaluate(wl)*XYZColor.for_wavelength(wl)
rgb = XYZtoRGB(xyz)
return rgb
| simian201/Kykliskos | src/color.py | Python | gpl-3.0 | 2,612 |
import os
import csv
import numpy as np
from eqep.interpolation.rbf_pgvinterpolator import RbfPGVInterpolator
class EarthQuake:
"""Stores the data of an earthquake
It has a grid of coordinates with a certain (interpolated)
peak ground velocity for every cell.
This class is usually used in combination with a `PGVInterpolator`:
>>> earthquake = EarthQuake((0, 10), (0, 10), (10, 10))
>>> earthquake.interpolate_pgvs(eqdata, RbfPGVInterpolator())
Attributes
----------
longs, lats : array of float
store the longitudes and latitudes of the grid
pgvs : array of float
stores the peak ground velocities corresponding to the grid cells
"""
def __init__(self, long_range, lat_range, steps,
pgvs=None):
"""Creates a new `EarthQuake` for the specified area
Parameters
----------
long_range : tuple of float
the longitude range: (min, max)
lat_range : tuple of float
the latitude range: (min, max)
steps : tuple of int
the number of steps in these ranges
pgvs : array of floats, optional
the peak ground velocities corresponding to the grid defined with
the other parameters
"""
# initialize the longs and lats
self.longs, self.lats = np.meshgrid(np.linspace(long_range[0],
long_range[1],
steps[0]),
np.linspace(lat_range[0],
lat_range[1],
steps[1]))
if pgvs is None or pgvs.shape == self.longs.shape:
self.pgvs = pgvs
else:
raise AttributeError('`pgvs` must equal the shape of the '
'long- and lat-grid')
def interpolate_pgvs(self, data, interpolator=RbfPGVInterpolator()):
"""Interpolates the pgvs for the whole earthquake grid
Parameters
----------
data : EQData
the data to interpolate to a full earthquake
interpolator : PGVInterpolator
the algorithm to use for interpolation
"""
self.pgvs = interpolator.interpolate(data, self.longs, self.lats)
def save_csv(self, filename):
"""Saves this object as a CSV-file
The data is persisted in the following format: ::
# comment
long, lat, pgv
long, lat, pgv
...
Parameters
----------
filename : str
the name of the file ending with ".csv" (may also be a path)
"""
if self.pgvs is None:
raise ValueError('`pgvs` must not be "None".')
# check if path to filename already exists, if not create it
if '/' in filename and not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
# write header comment and the values to csv file
with open(filename, 'w') as file:
file.write('# longitude, latitude, peak ground velocity\n\n')
csv_writer = csv.writer(file, lineterminator='\n')
for long, lat, pgv in zip(self.longs.flatten(), self.lats.flatten(),
self.pgvs.flatten()):
csv_writer.writerow([long, lat, pgv])
| TGM-HIT/eqep-api | eqep/data/earthquake.py | Python | mit | 3,507 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import urllib3
except:
print('BlastPDB requires urllib3')
class BlastPDB:
"""BlastPDB - run Blast online on the PDB database.
This can be used in Jupiter based RNA notebooks, e.g.
https://github.com/mmagnus/rna-pdb-tools/blob/master/rp18.ipynb
Warning: getBlastPDB1 has been permanently removed as part of our announced shutdown on December 9th, 2020.
https://www.rcsb.org/pdb/rest/getBlastPDB1
Usage::
>>> p = BlastPDB('GGGUCAGGCCGGCGAAAGUCGCCACAGUUUGGGGAAAGCUGUGCAGCCUGUAACCCCCCCACGAAAGUGGG')
>>> p.search()
>>> p.result #doctest: +ELLIPSIS
u'<HTML>\\n<TITLE>BLAST Search Results</TITLE>...
:param seq: string
"""
def __init__(self, seq):
self.seq = seq
self.result = ''
def search(self):
"""Search online the seq."""
http = urllib3.PoolManager()
response = http.request('GET', 'http://www.rcsb.org/pdb/rest/getBlastPDB1?sequence=' +
self.seq +
'&eCutOff=10.0&matrix=BLOSUM62&outputFormat=HTML')
if response.status == 200:
self.result = response.data.decode()
# main
if __name__ == '__main__':
import doctest
doctest.testmod()
| m4rx9/rna-pdb-tools | rna_tools/BlastPDB.py | Python | mit | 1,305 |
#!/usr/bin/env python3
import string
import sys
import os
import subprocess
import json
import argparse
import re
DMCC_ROOT = os.path.join( os.getcwd(), ".." )
LOG_FILE = open( "build_rules.log", "w" )
RULES_DIR = os.path.join( DMCC_ROOT, "BuildRules" )
DEPLOY_DIR = os.path.join( DMCC_ROOT, "Deploy" )
MAKE_DIR = os.path.join( DMCC_ROOT, "Build", "Make" )
RULES_INFO = os.path.join( DEPLOY_DIR, "build_rules.json" )
WHITE_PUNCT = string.punctuation + string.whitespace
def clearMakeDir():
process = subprocess.Popen( [ "git", "clean", "-f", "-x" ], cwd=MAKE_DIR )
return process.wait()
# TODO: The purpose of the next few lines is to figure out if there are
# any new makefiles to process. Can we get make to do this somehow?
def mostRecentTimestampInTree( path, filt ):
# print( path + str( type( path ) ) )
if os.path.isfile( path ):
if filt( path ):
return os.path.getmtime( path )
else:
return None
elif os.path.isdir( path ):
most_recent = None
for f in os.listdir( path ):
recent = mostRecentTimestampInTree(
os.path.join( path, f ), filt )
if ( ( not recent is None ) and
( most_recent is None or recent > most_recent ) ):
most_recent = recent
return most_recent
else:
print( "Interesting. What kind of thing is 'path'?" )
return None
def mkSuffix( path ):
return os.path.basename( path ).endswith( ".mk" )
MOST_RECENT_RULE = mostRecentTimestampInTree( RULES_DIR, mkSuffix )
RULES_INFO_TIME = os.path.getmtime( RULES_INFO )
# >&2 echo "Dir: $MOST_RECENT_RULE File: $RULES_INFO_TIME"
# print( RULES_INFO_TIME )
# print( MOST_RECENT_RULE )
if RULES_INFO_TIME >= MOST_RECENT_RULE:
print( "Build rules up-to-date.", file=LOG_FILE )
for line in open( RULES_INFO ):
print( line, end="" )
sys.exit( 0 )
else:
print( "More recent build rule. Updating ...", file=LOG_FILE )
def runMake( mkfile, target, extra_opts, cwd=None ):
targ_list = [] if target == "" else [ target ]
# -s = silent (don't echo commands)
# -f = provide makefile name
if cwd is None:
m = [ "make" ] + extra_opts + [ "-s", "-f", mkfile ] + targ_list
process = subprocess.Popen( m,
stdout=subprocess.PIPE, stderr=subprocess.PIPE )
else:
m = [ "make" ] + extra_opts + [ "-s", "-f", mkfile ] + targ_list
process = subprocess.Popen( m, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE )
( output_bytes, err_bytes ) = process.communicate()
if output_bytes is None:
output = None
else:
output = output_bytes.decode( "utf-8" )
if err_bytes is None:
err = None
else:
err = err_bytes.decode( "utf-8" )
return ( process.wait(), output, err )
def tag_foo( tag_string ):
name_parts = tag_string.split( "_" );
if len( name_parts ) > 1:
kind = name_parts[0]
tag_value = " ".join( name_parts[1:] )
else:
kind = ""
tag_value = name_parts[0]
if tag_value.endswith( ".mk" ):
tag_value = tag_value[ 0 : len( tag_value ) - 3 ]
return ( kind, tag_value )
projects = []
def processMakefile( full_path, app_path, fname_tags ):
print( "Processing Makefile %s ..." % app_path, file=LOG_FILE )
# print( "Processing Makefile %s ..." % app_path )
proj = { 'path': app_path }
tags = []
for ( kind, tag_value ) in fname_tags:
tags.append( { 'kind':kind, 'tag':tag_value } )
( has_tags, tags_output, tags_err ) = runMake( full_path, "tags", [] )
for tag in ( tags_output.splitlines() if has_tags == 0 else [] ):
( kind, tag_value ) = tag_foo( tag )
tags.append( { 'kind':kind, 'tag':tag_value } )
proj[ "tags" ] = tags
( has_targs, targs_output, targs_err ) = runMake( full_path, "targets", [] )
target_names = ( targs_output.splitlines() if has_targs == 0 else [] )
r = re.compile( '^(visible_\S*)\s*:' )
with open( full_path ) as f:
for line in f.readlines():
m = r.match( line )
if m is not None:
target_names.append( m.group( 1 ) )
targets = []
for target_name in target_names:
target = { 'name': target_name }
clearMakeDir()
runMake( full_path, "init", [], cwd=MAKE_DIR )
( _, sOut, sErr ) = runMake(
full_path, target_name, [ "-k" ], cwd=MAKE_DIR )
deps = []
for err_line in ( [] if sErr is None else sErr.splitlines() ):
preFile = "No rule to make target"
postFile = "needed by"
pre = err_line.find( preFile )
post = err_line.find( postFile )
if pre != -1 and post != -1:
dep = err_line[ pre + len( preFile ) : post ]
deps.append( dep.strip( WHITE_PUNCT ) )
if len( deps ) > 0:
target[ "deps" ] = deps
targets.append( target )
if len( targets ) > 0:
proj[ "targets" ] = targets
projects.append( proj )
def crawl( full_path, app_path, filename, fname_tags ):
if app_path == "Examples" and False: # TODO: config
return
fname_tags = fname_tags[:]
if filename != "":
fname_tags.append( tag_foo( filename ) )
if os.path.isdir( full_path ):
for f in os.listdir( full_path ):
crawl( os.path.join( full_path, f ),
os.path.join( app_path, f ),
f, fname_tags )
elif os.path.isfile( full_path ):
if not mkSuffix( filename ):
return
processMakefile( full_path, app_path, fname_tags )
else:
# Interesting. What is f?
pass
crawl( RULES_DIR, "", "", [] )
print( json.dumps( projects ) )
| benjaminy/DoesMyCodeCompile | Source/process_build_rules.py | Python | apache-2.0 | 5,830 |
from django.db import models
class Tag(models.Model):
market = models.ManyToManyField('markets.Market', related_name='tags')
tag = models.CharField(verbose_name='Tag', unique=True, max_length=255)
| we-inc/mms-snow-white-and-the-seven-pandas | webserver/apps/tags/models.py | Python | mit | 207 |
#!/usr/bin/env python
"""
Copyright (C) 2013 Legoktm
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import unittest
from mtirc import bot
from mtirc import cache
from mtirc import lib
from mtirc import settings
class CacheTests(unittest.TestCase):
def setUp(self):
config = settings.config
config['nick'] = 'unittestbot'
config['connections']['card.freenode.net']['channels'] = ['#bottest']
self.bot = bot.Bot(config)
def test_file_cache(self):
config = dict(settings.config)
config['cache']['type'] = settings.CACHE_PICKLE
mc = cache.Cache(config['cache'])
mc.set('123', 'test')
self.assertEqual(mc.get('123'), 'test')
self.assertTrue('123' in mc)
self.assertEqual(mc.dump(), {'123': 'test', u'errors': {}})
mc.delete('123')
self.assertFalse('123' in mc)
def test_memcache_cache(self):
config = dict(settings.config)
config['cache']['type'] = settings.CACHE_MEMCACHE
mc = cache.Cache(config['cache'])
#self.assertTrue(mc.use_mc) # This ensures that we're actually using memcache, not file cache
mc.set('123', 'test')
self.assertEqual(mc.get('123'), 'test')
self.assertTrue('123' in mc)
mc.delete('123')
self.assertFalse('123' in mc)
def test_redis_cache(self):
config = dict(settings.config)
config['cache']['type'] = settings.CACHE_REDIS
config['cache']['port'] = 6379 # redis default
r = cache.Cache(config['cache'])
r.set('123', 'test')
self.assertEqual(r.get('123'), 'test')
self.assertTrue('123' in r)
r.delete('123')
self.assertFalse('123' in r)
class LibTests(unittest.TestCase):
def setUp(self):
self.edit = u'\x0314[[\x0307Hassan Rouhani\x0314]]\x034 \x0310 \x0302http://en.wikipedia.org/w/index.php?diff=560860840&oldid=560857945\x03 \x035*\x03 \x030337.98.125.156\x03 \x035*\x03 (+179) \x0310/* After the Islamic Revolution */\x03'
self.action = u'\x0314[[\x0307Special:Log/abusefilter\x0314]]\x034 hit\x0310 \x0302\x03 \x035*\x03 \x030382.93.10.193\x03 \x035*\x03 \x031082.93.10.193 triggered [[Special:AbuseFilter/260|filter 260]], performing the action "edit" on [[\x0302Jack Dorsey\x0310]]. Actions taken: Disallow ([[Special:AbuseLog/8932011|details]])\x03'
def test_color_stripping(self):
self.assertEqual(lib.COLOR_RE.sub('', self.edit), u'[[Hassan Rouhani]] http://en.wikipedia.org/w/index.php?diff=560860840&oldid=560857945 * 37.98.125.156 * (+179) /* After the Islamic Revolution */')
def test_edit_parsing(self):
self.assertEqual(lib.parse_edit(self.edit), {'url': u'http://en.wikipedia.org/w/index.php?diff=560860840&oldid=560857945', 'bot': u'', 'summary': u'/* After the Islamic Revolution */', 'user': u'37.98.125.156', 'new': u'', 'diff': u'+179', 'patrolled': u'', 'page': u'Hassan Rouhani', 'minor': u''})
def test_action_parsing(self):
self.assertEqual(lib.parse_edit(self.action), {'user': u'82.93.10.193', 'log': u'hit', 'summary': u'82.93.10.193 triggered [[Special:AbuseFilter/260|filter 260]], performing the action "edit" on [[Jack Dorsey]]. Actions taken: Disallow ([[Special:AbuseLog/8932011|details]])'})
if __name__ == "__main__":
unittest.main()
| legoktm/mtirc | tests/main.py | Python | mit | 4,313 |
# Copyright 2014 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""NSX DHCP/metadata support
Revision ID: 1421183d533f
Revises: 8f682276ee4
Create Date: 2013-10-11 14:33:37.303215
"""
revision = '1421183d533f'
down_revision = '8f682276ee4'
migration_for_plugins = [
'neutron.plugins.nicira.NeutronPlugin.NvpPluginV2',
'neutron.plugins.nicira.NeutronServicePlugin.NvpAdvancedPlugin',
'neutron.plugins.vmware.plugin.NsxPlugin',
'neutron.plugins.vmware.plugin.NsxServicePlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
'lsn',
sa.Column('net_id',
sa.String(length=36), nullable=False),
sa.Column('lsn_id',
sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('lsn_id'))
op.create_table(
'lsn_port',
sa.Column('lsn_port_id',
sa.String(length=36), nullable=False),
sa.Column('lsn_id',
sa.String(length=36), nullable=False),
sa.Column('sub_id',
sa.String(length=36), nullable=False, unique=True),
sa.Column('mac_addr',
sa.String(length=32), nullable=False, unique=True),
sa.ForeignKeyConstraint(['lsn_id'], ['lsn.lsn_id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('lsn_port_id'))
def downgrade(active_plugins=None, options=None):
pass
| CingHu/neutron-ustack | neutron/db/migration/alembic_migrations/versions/1421183d533f_nsx_dhcp_metadata.py | Python | apache-2.0 | 2,169 |
#!/usr/bin/python
# -*- coding:utf-8 -*-
"""
@author: Raven
@contact: aducode@126.com
@site: https://github.com/aducode
@file: __init__.py
@time: 2016/2/9 23:05
""" | aducode/Gaeapy | gaea/__init__.py | Python | apache-2.0 | 165 |
import ambush
import VS
import Director
import directions_mission
class ambush_scan(ambush.ambush):
def __init__(self,savevar,systems,delay,faction,numenemies,dyntype='',dynfg='',greetingText=["You have been scanned and contraband has been found in your hold.","You should have dumped it while you had the chance.","Now you die!"], directions=[], destination='',AdjustFaction=True,cargotype="Brilliance",altGreetingText=["Thank you for delivering the cargo to us instead.","We appreciate your candor in this matter."]):
ambush.ambush.__init__(self,savevar,systems,delay,faction,numenemies,dyntype,dynfg,greetingText,directions,destination,AdjustFaction)
self.altGreetingText=altGreetingText
self.cargotype=cargotype
self.counter=0
def FriendlyLaunch(self):
self.havelaunched=1
import launch
L=launch.Launch()
L.fg="Shadow"
if(self.dyntype==""):
import faction_ships
self.dyntype=faction_ships.getRandomFighter(self.faction)
L.type=self.dyntype
L.dyntype=self.dyntype
L.num=self.numenemies
L.faction=self.faction
L.minradius=3000
L.maxradius=4000
try:
import faction_ships
L.minradius*=faction_ships.launch_distance_factor
L.maxradius*=faction_ships.launch_distance_factor
except:
pass
you=VS.getPlayerX(self.cp)
friendly=L.launch(you)
import universe
universe.greet(self.altGreetingText,friendly,you);
def Execute(self):
ambush.ambush.Execute(self)
if(self.inescapable):
you=VS.getPlayerX(self.cp)
if you.getUnitSystemFile()==self.systems[0]:
self.timer=VS.GetGameTime();
if you.GetCargo(self.cargotype).GetQuantity()==0:
self.inescapable=0
un=VS.getUnit(self.counter)
if(un):
if un.getName()==self.cargotype or un.getName()=="Mission_Cargo":
self.counter=0
un=VS.getUnit(0)
while(un):
self.counter+=1
un=VS.getUnit(self.counter)
while (self.counter>0):
self.counter-=1
un=VS.getUnit(self.counter)
if (un):
if un.getName()==self.cargotype or un.getName()=="Mission_Cargo":
un.Kill()
if not self.havelaunched:
self.FriendlyLaunch()
else:
#print self.cargotype +" not matched with "+un.getName()
self.counter+=1
else:
self.counter=0
| vegastrike/Assets-Production | modules/missions/ambush_scan.py | Python | gpl-2.0 | 2,799 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'painindex.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'', include('painindex_app.urls', namespace="painindex_app", app_name="painindex_app")),
)
| xanv/painindex | painindex/urls.py | Python | mit | 398 |
import datetime
import os
from utilities import load_template
def create_sidebar(caches, index_template, target, cache_dir):
header_template = load_template('sidebar_header')
item_template = load_template('sidebar_item')
img_template = load_template('sidebar_img')
desc_template = load_template('sidebar_desc')
footer_template = load_template('sidebar_footer')
index_prefix = []
index_suffix = []
with open(index_template, 'r') as f:
prefix = True
for line in f.readlines():
if 'SIDEBAR-CONTENT' in line:
prefix = False
elif prefix:
index_prefix.append(line)
else:
index_suffix.append(line)
with open(target, 'w') as f:
for line in index_prefix:
f.write(line)
f.write(header_template.replace('##COUNT##', str(len(caches))))
for cache in caches:
it = item_template
it = it.replace('##CODE##', cache._code)
it = it.replace('##DATE##', cache._date.strftime('%Y-%m-%d'))
it = it.replace('##NAME##', cache._name)
it = it.replace('##OWNER##', cache._owner)
it = it.replace('##URL##', cache._url)
finds = '{} Funde'.format(cache._founds)
if cache._founds is 0:
finds = 'keine Funde'
elif cache._founds is 1:
finds = 'ein Fund'
it = it.replace('##FINDS##', finds)
with_coords = 0
for log in cache._logs:
if log._coordinates:
with_coords += 1
finds2 = f'{with_coords} Logs mit Koordinaten'
if with_coords == 0:
finds2 = 'keine Logs mit Koordinaten'
elif with_coords == 1:
finds2 = 'ein Log mit Koordinaten'
it = it.replace('##FINDS2##', finds2)
desc = ''
if cache._short_description is not None:
desc = desc_template.replace('##TEXT##', cache._short_description)
it = it.replace('##DESC##', desc)
img = ''
thumb = os.path.join(cache_dir, 'small', f'{cache._code}.jpg')
if os.path.exists(thumb):
img = img_template.replace('##URL##', f'img/small/{cache._code}.jpg')
it = it.replace('##IMG##', img)
f.write(it)
f.write(footer_template.replace('##DATE##', datetime.datetime.now().isoformat()))
for line in index_suffix:
f.write(line)
| flopp/safari | py/sidebargen.py | Python | mit | 2,550 |
# General ES Constants
COUNT = 'count'
CREATE = 'create'
DOCS = 'docs'
FIELD = 'field'
FIELDS = 'fields'
HITS = 'hits'
ID = '_id'
INDEX = 'index'
INDEX_NAME = 'index_name'
ITEMS = 'items'
KILOMETERS = 'km'
MAPPING_DYNAMIC = 'dynamic'
MAPPING_MULTI_FIELD = 'multi_field'
MAPPING_NULL_VALUE = 'null_value'
MILES = 'mi'
OK = 'ok'
PROPERTIES = 'properties'
PROPERTY_TYPE = 'type'
SCORE = '_score'
SOURCE = '_source'
TOTAL = 'total'
TTL = '_ttl'
TYPE = '_type'
UID = '_uid'
UNIT = 'unit'
URL = 'url'
URLS = 'urls'
# Matching / Filtering
AND = "and"
BOOL = 'bool'
DOC_TYPE = 'doc_type'
FILTER = 'filter'
FILTERED = 'filtered'
MATCH_ALL = 'match_all'
MUST = 'must'
MUST_NOT = 'must_not'
OR = "or"
QUERY = 'query'
SHOULD = 'should'
SORT = 'sort'
TERMS = 'terms'
TERM = 'term'
# Sorting / Misc.
ASC = 'asc'
DESC = 'desc'
FACET_FILTER = 'facet_filter'
FACETS = 'facets'
FROM = 'from'
OFFSET = 'offset'
ORDER = 'order'
SIZE = 'size'
TO = 'to'
# Runtime Constants
DEFAULT_PAGE_SIZE = 20
| wan/bungee | bungee/const.py | Python | bsd-2-clause | 978 |
from rctk.layouts.layouts import Layout
class TabbedLayout(Layout):
type = "tabbed"
| rctk/rctk | rctk/layouts/tabbed.py | Python | bsd-2-clause | 89 |
#!/usr/bin/env python
# We attempted to make this program work with both python2 and python3
"""This script takes a set of files and a cluster configuration describing a set of machines.
It uploads the files to the given machines in round-robin fashion.
The script can also be given an optional schema file.
This file will be uploaded to all machines.
The list of machines is provided in a Hillview configuration file."""
# pylint: disable=invalid-name
from argparse import ArgumentParser, REMAINDER
import os.path
from hillviewCommon import ClusterConfiguration, get_config, get_logger
logger = get_logger("upload-data")
created_folders = set()
def create_remote_folder(remoteHost, folder):
"""Creates a folder on a remote machine"""
shortcut = "" + remoteHost.host + ":" + folder
if shortcut in created_folders:
return
remoteHost.create_remote_folder(folder)
created_folders.add(shortcut)
def copy_file_to_remote_host(rh, source, folder, copyOption):
"""Copy files in the specified folder to the remote machine"""
create_remote_folder(rh, folder)
rh.copy_file_to_remote(source, folder, copyOption)
def copy_everywhere(config, file, folder, copyOption):
"""Copy specified file to all worker machines"""
assert isinstance(config, ClusterConfiguration)
message = "Copying " + file + " to all hosts"
logger.info(message)
config.run_on_all_workers(lambda rh: copy_file_to_remote_host(rh, file, folder, copyOption))
def copy_files(config, folder, filelist, copyOption):
"""Copy the files to the given machines in round-robin fashion"""
assert isinstance(config, ClusterConfiguration)
message = "Copying " + str(len(filelist)) + " files to all hosts in round-robin"
logger.info(message)
index = 0
workers = config.get_workers()
for f in filelist:
rh = workers[index]
index = (index + 1) % len(workers)
copy_file_to_remote_host(rh, f, folder, copyOption)
def main():
"""Main function"""
parser = ArgumentParser(epilog="The argument in the list are uploaded in round-robin " +
"to the worker machines in the cluster")
parser.add_argument("config", help="json cluster configuration file")
parser.add_argument("-d", "--directory",
help="destination folder where output is written" +\
" (if relative it is with respect to config.service_folder)")
parser.add_argument("-L", "--symlinks", help="Follow symlinks instead of ignoring them",
action="store_true")
parser.add_argument("--common", "-s", help="File that is loaded to all machines", action="append")
parser.add_argument("files", help="Files to copy", nargs=REMAINDER)
args = parser.parse_args()
config = get_config(parser, args)
folder = args.directory
if folder is None:
logger.error("Directory argument is mandatory")
parser.print_help()
exit(1)
if args.symlinks:
copyOptions = "-L"
else:
copyOptions = ""
if not os.path.isabs(folder):
folder = os.path.join(config.service_folder, folder)
message = "Folder is relative, using " + folder
logger.info(message)
for c in args.common:
copy_everywhere(config, c, folder, copyOptions)
if args.files:
copy_files(config, folder, args.files, copyOptions)
else:
logger.info("No files to upload to the machines provided in a Hillview configuration")
logger.info("Done.")
if __name__ == "__main__":
main()
| mbudiu-vmw/hiero | bin/upload-data.py | Python | apache-2.0 | 3,589 |
"""
http://code.google.com/codejam/contest/8284486/dashboard
"""
from .util import (SolverBase, sum_of_int_cube, sum_of_int_square,
sum_of_int)
class SquareCountSolver(SolverBase):
def __call__(self):
result = []
for line in self._iter_input():
n_dots, n_col = self._split_line_to_list(line)
grid = DotGrid(n_dots, n_col)
result.append(grid())
self._write_result(result)
class DotGrid(object):
threshold = 1000000007
def __init__(self, n_dots, n_col):
self.n_row = n_dots
self.n_col = n_col
def __call__(self):
# sum[(r-k)*(c-k)*k], 1 <= k <= min(r, c)
n = min(self.n_row, self.n_col)
n_square = sum_of_int_cube(n, self.threshold)
square_sum = sum_of_int_square(n, self.threshold)
n_square -= self._mod((self.n_row + self.n_col) * square_sum)
n_square += self.n_row * self.n_col * sum_of_int(n, self.threshold)
return self._mod(n_square)
def _mod(self, x):
return x % self.threshold
if __name__ == "__main__":
import sys
# python -m puzzle.square_count A-large-practice
SquareCountSolver(sys.argv[1])()
| aliciawyy/dmining | puzzle/square_count.py | Python | apache-2.0 | 1,204 |
import uuid
from django.db import models
class TestModel(models.Model):
name = models.CharField(max_length=50, default='test data')
class TestForeignKey(models.Model):
name = models.CharField(max_length=50)
test_fk = models.ForeignKey(TestModel, on_delete=models.CASCADE)
class TestM2M(models.Model):
name = models.CharField(max_length=50)
test_m2m = models.ManyToManyField(TestModel)
class TestUUIDModel(models.Model):
id = models.UUIDField(
primary_key=True, unique=True, editable=False, default=uuid.uuid4
)
name = models.CharField(max_length=50, default='test data')
class TestUUIDForeignKey(models.Model):
id = models.UUIDField(
primary_key=True, unique=True, editable=False, default=uuid.uuid4
)
name = models.CharField(max_length=50)
test_fk = models.ForeignKey(TestUUIDModel, on_delete=models.CASCADE)
class TestUUIDM2M(models.Model):
id = models.UUIDField(
primary_key=True, unique=True, editable=False, default=uuid.uuid4
)
name = models.CharField(max_length=50)
test_m2m = models.ManyToManyField(TestUUIDModel)
class TestBigIntModel(models.Model):
id = models.BigAutoField(primary_key=True)
name = models.CharField(max_length=50, default='test data')
class TestBigIntForeignKey(models.Model):
id = models.BigAutoField(primary_key=True)
name = models.CharField(max_length=50)
test_fk = models.ForeignKey(TestBigIntModel, on_delete=models.CASCADE)
class TestBigIntM2M(models.Model):
id = models.BigAutoField(primary_key=True)
name = models.CharField(max_length=50)
test_m2m = models.ManyToManyField(TestBigIntModel)
| soynatan/django-easy-audit | easyaudit/tests/test_app/models.py | Python | gpl-3.0 | 1,664 |
def f():
return 'string'
with f() as a:
a = 3
a = {}
| clark800/pystarch | test/testcases/with.py | Python | mit | 64 |
# HF XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# HF X
# HF X f90wrap: F90 to Python interface generator with derived type support
# HF X
# HF X Copyright James Kermode 2011
# HF X
# HF X These portions of the source code are released under the GNU General
# HF X Public License, version 2, http://www.gnu.org/copyleft/gpl.html
# HF X
# HF X If you would like to license the source code under different terms,
# HF X please contact James Kermode, james.kermode@gmail.com
# HF X
# HF X When using this software, please cite the following reference:
# HF X
# HF X http://www.jrkermode.co.uk/f90wrap
# HF X
# HF XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
from __future__ import print_function
import copy
import logging
import re
from f90wrap import fortran as ft
class AccessUpdater(ft.FortranTransformer):
"""Visit module contents and update public_symbols and
private_symbols lists to be consistent with (i) default module
access; (ii) public and private statements at module level;
(iii) public and private statement in types; (iv) public
and private attributes of individual elements."""
def __init__(self):
self.mod = None
self.type = None
def update_access(self, node, mod, default_access, in_type=False):
if default_access == 'public':
if ('private' not in getattr(node, 'attributes', []) and
node.name not in mod.private_symbols):
# symbol should be marked as public if it's not already
if not in_type and node.name not in mod.public_symbols:
logging.debug('marking public symbol ' + node.name)
mod.public_symbols.append(node.name)
else:
# symbol should be marked as private if it's not already
if not in_type and (node.name not in mod.private_symbols and
'callback' not in getattr(node, 'attributes', [])):
logging.debug('marking private symbol ' + node.name)
mod.private_symbols.append(node.name)
elif default_access == 'private':
if ('public' not in getattr(node, 'attributes', []) and
node.name not in mod.public_symbols):
# symbol should be marked as private if it's not already
if not in_type and (node.name not in mod.private_symbols and
'callback' not in getattr(node, 'attributes', [])):
logging.debug('marking private symbol ' + node.name)
mod.private_symbols.append(node.name)
else:
# symbol should be marked as public if it's not already
if not in_type and node.name not in mod.public_symbols:
logging.debug('marking public symbol ' + node.name)
mod.public_symbols.append(node.name)
else:
raise ValueError('bad default access %s for reference %s' %
(mod.default_access, mod.name))
def visit_Module(self, mod):
# keep track of the current module
self.mod = mod
mod = self.generic_visit(mod)
self.mod = None
return mod
def visit_Procedure(self, node):
if self.mod is None:
return self.generic_visit(node)
self.update_access(node, self.mod, self.mod.default_access)
return self.generic_visit(node)
def visit_Interface(self, node):
if self.mod is None:
return self.generic_visit(node)
self.update_access(node, self.mod, self.mod.default_access)
return self.generic_visit(node)
def visit_Type(self, node):
if self.mod is None:
return self.generic_visit(node)
self.type = node
self.update_access(node, self.mod, self.mod.default_access)
node.default_access = 'public'
if 'private' in node.attributes:
node.default_access = 'private'
node = self.generic_visit(node)
self.type = None
return node
def visit_Element(self, node):
if self.type is not None:
self.update_access(node, self.mod, self.type.default_access, in_type=True)
else:
self.update_access(node, self.mod, self.mod.default_access)
return node
class PrivateSymbolsRemover(ft.FortranTransformer):
"""
Transform a tree by removing private symbols
"""
def __init__(self):
self.mod = None
def visit_Module(self, mod):
# keep track of the current module
self.mod = mod
mod = self.generic_visit(mod)
self.mod = None
return mod
def visit_Procedure(self, node):
if self.mod is None:
return self.generic_visit(node)
if node.name in self.mod.private_symbols:
logging.debug('removing private symbol %s' % node.name)
return None
if hasattr(node, 'attributes') and 'private' in node.attributes:
return None
return self.generic_visit(node)
def visit_Interface(self, node):
# remove entirely private interfaces
if node.name in self.mod.private_symbols:
logging.debug('removing private symbol %s' % node.name)
return None
# do not call generic_visit(), so we don't
# remove private procedures within public
# interfaces, as these should still be wrapped
return node
visit_Type = visit_Procedure
visit_Element = visit_Procedure
def remove_private_symbols(node):
"""
Walk the tree starting at *node*, removing all private symbols.
This function first applies the AccessUpdater transformer to
ensure module *public_symbols* and *private_symbols* are up to
date with *default_access* and individual `public` and `private`
attributes.
"""
node = AccessUpdater().visit(node)
node = PrivateSymbolsRemover().visit(node)
return node
class UnwrappablesRemover(ft.FortranTransformer):
def __init__(self, callbacks, types, constructors, destructors):
self.callbacks = callbacks
self.types = types
self.constructors = constructors
self.destructors = destructors
def visit_Interface(self, node):
# don't wrap operator overloading routines
if node.name.startswith('operator('):
return None
return self.generic_visit(node)
def visit_Procedure(self, node):
# special case: keep all constructors and destructors, although
# they may have pointer arguments
for suff in self.constructors + self.destructors:
if node.name.endswith(suff):
return self.generic_visit(node)
# don't wrap operator overloading routines
if node.name.startswith('operator('):
return None
# FIXME don't wrap callback arguments
if 'callback' in node.attributes:
return None
args = node.arguments[:]
if isinstance(node, ft.Function):
args.append(node.ret_val)
for arg in args:
# only callback functions in self.callbacks
if 'callback' in arg.attributes:
if node.name not in self.callbacks:
logging.debug('removing callback routine %s' % node.name)
return None
else:
continue
if 'optional' in arg.attributes:
# we can remove the argument instead of the whole routine
return self.generic_visit(node)
else:
# no allocatables or pointers
if 'allocatable' in arg.attributes or 'pointer' in arg.attributes:
logging.debug('removing routine %s due to allocatable/pointer arguments' % node.name)
return None
dims = [attrib for attrib in arg.attributes if attrib.startswith('dimension')]
# # no complex scalars (arrays are OK)
# if arg.type.startswith('complex') and len(dims) == 0:
# logging.debug('removing routine %s due to complex scalar arguments' % node.name)
# return None
# no derived types apart from those in self.types
if arg.type.startswith('type') and ft.split_type(arg.type) not in self.types:
logging.debug('removing routine %s due to unsupported derived type %s' %
(node.name, arg.type))
return None
# no arrays of derived types
if arg.type.startswith('type') and len(dims) != 0:
logging.debug('removing routine %s due to unsupported derived type array %s' %
(node.name, arg.type))
return None
return self.generic_visit(node)
def visit_Argument(self, node):
if not hasattr(node, 'attributes'):
return self.generic_visit(node)
if not 'optional' in node.attributes:
return self.generic_visit(node)
# remove optional allocatable/pointer arguments
if 'allocatable' in node.attributes or 'pointer' in node.attributes:
logging.debug('removing optional argument %s due to allocatable/pointer attributes' %
node.name)
return None
dims = [attrib for attrib in node.attributes if attrib.startswith('dimension')]
# remove optional complex scalar arguments
if node.type.startswith('complex') and len(dims) == 0:
logging.debug('removing optional argument %s as it is a complex scalar' % node.name)
return None
# remove optional derived types not in self.types
if node.type.startswith('type') and ft.split_type(node.type) not in self.types:
logging.debug('removing optional argument %s due to unsupported derived type %s' %
(node.name, node.type))
return None
# remove arrays of derived types
if node.type.startswith('type') and len(dims) != 0:
logging.debug('removing optional argument %s due to unsupported derived type array %s' %
(node.name, node.type))
return None
return self.generic_visit(node)
def visit_Type(self, node):
"""
Remove unwrappable elements inside derived types
"""
if node.name not in self.types:
logging.debug('removing type %s' % node.name)
return None
else:
elements = []
for element in node.elements:
# Get the number of dimensions of the element (if any)
dims = filter(lambda x: x.startswith('dimension'), element.attributes)
# Skip this if the type is not do-able
if 'pointer' in element.attributes and dims != []:
logging.debug('removing %s.%s due to pointer attribute' %
(node.name, element.name))
continue
if element.type.lower() == 'type(c_ptr)':
logging.debug('removing %s.%s as type(c_ptr) unsupported' %
(node.name, element.name))
continue
if element.type.startswith('type') and element.type not in self.types:
logging.debug('removing %s.%s as type %s unsupported' %
(node.name, element.name, element.type))
continue
elements.append(element)
node.elements = elements
return self.generic_visit(node)
def visit_Module(self, node):
"""
Remove unwrappable elements inside modules.
As above, but also includes derived type elements from modules
that do not have the "target" attribute
"""
elements = []
for element in node.elements:
# Get the number of dimensions of the element (if any)
dims = filter(lambda x: x.startswith('dimension'), element.attributes)
if 'pointer' in element.attributes and dims != []:
logging.debug('removing %s.%s due to pointer attribute' %
(node.name, element.name))
continue
if element.type.lower() == 'type(c_ptr)':
logging.debug('removing %s.%s as type(c_ptr) unsupported' %
(node.name, element.name))
continue
if element.type.startswith('type') and 'target' not in element.attributes:
logging.debug('removing %s.%s as missing "target" attribute' %
(node.name, element.name))
continue
if element.type.startswith('type') and element.type not in self.types:
logging.debug('removing %s.%s as type %s unsupported' %
(node.name, element.name, element.type))
continue
# parameter arrays in modules live only in the mind of the compiler
if 'parameter' in element.attributes and dims != []:
logging.debug('removing %s.%s as it has "parameter" attribute' %
(node.name, element.name))
continue
elements.append(element)
node.elements = elements
return self.generic_visit(node)
def fix_subroutine_uses_clauses(tree, types):
"""Walk over all nodes in tree, updating subroutine uses
clauses to include the parent module and all necessary
modules from types"""
for mod, sub, arguments in ft.walk_procedures(tree):
sub.uses = set()
sub.mod_name = None
if mod is not None:
sub_name = sub.name
if hasattr(sub, 'call_name'):
sub_name = sub.call_name
sub.uses.add((mod.name, (sub_name,)))
sub.mod_name = mod.name
for arg in arguments:
if arg.type.startswith('type') and ft.strip_type(arg.type) in types:
sub.uses.add((types[ft.strip_type(arg.type)].mod_name, (ft.strip_type(arg.type),)))
return tree
def fix_element_uses_clauses(tree, types):
"""
Add uses clauses to derived type elements in modules
"""
for mod in ft.walk_modules(tree):
for el in mod.elements:
el.uses = set()
if el.type.startswith('type') and ft.strip_type(el.type) in types:
el.uses.add((types[el.type].mod_name, (ft.strip_type(el.type),)))
return tree
def set_intent(attributes, intent):
"""Remove any current "intent" from attributes and replace with intent given"""
attributes = [attr for attr in attributes if not attr.startswith('intent')]
attributes.append(intent)
return attributes
def convert_derived_type_arguments(tree, init_lines, sizeof_fortran_t):
for mod, sub, arguments in ft.walk_procedures(tree, include_ret_val=True):
sub.types = set()
sub.transfer_in = []
sub.transfer_out = []
sub.allocate = []
sub.deallocate = []
if 'constructor' in sub.attributes:
sub.arguments[0].attributes = set_intent(sub.arguments[0].attributes, 'intent(out)')
if 'destructor' in sub.attributes:
logging.debug('deallocating arg "%s" in %s' % (sub.arguments[0].name, sub.name))
sub.deallocate.append(sub.arguments[0].name)
for arg in arguments:
if not hasattr(arg, 'type') or not arg.type.startswith('type'):
continue
# save original Fortran intent since we'll be overwriting it
# with intent of the opaque pointer
arg.attributes = arg.attributes + ['fortran_' + attr for attr in
arg.attributes if attr.startswith('intent')]
typename = ft.strip_type(arg.type)
arg.wrapper_type = 'integer'
arg.wrapper_dim = sizeof_fortran_t
sub.types.add(typename)
if typename in init_lines:
use, (exe, exe_optional) = init_lines[typename]
if use is not None:
sub.uses.add((use, [typename]))
arg.init_lines = (exe_optional, exe)
if 'intent(out)' in arg.attributes:
arg.attributes = set_intent(arg.attributes, 'intent(out)')
sub.transfer_out.append(arg.name)
if 'pointer' not in arg.attributes:
logging.debug('allocating arg "%s" in %s' % (arg.name, sub.name))
sub.allocate.append(arg.name)
else:
arg.attributes = set_intent(arg.attributes, 'intent(in)')
sub.transfer_in.append(arg.name)
return tree
def convert_array_intent_out_to_intent_inout(tree):
"""
Find all intent(out) array arguments and convert to intent(inout)
"""
for mod, sub, arguments in ft.walk_procedures(tree, include_ret_val=True):
for arg in arguments:
dims = [attr for attr in arg.attributes if attr.startswith('dimension') ]
if dims == []:
continue
if len(dims) != 1:
raise ValueError('more than one dimension attribute found for arg %s' % arg.name)
if 'intent(out)' in arg.attributes:
arg.attributes = set_intent(arg.attributes, 'intent(inout)')
return tree
class StringLengthConverter(ft.FortranVisitor):
"""Convert lengths of all character strings to standard format
Looks in all Procedure arguments and Type elements.
Changes from '(len=*)' or '(*)' syntax to *(*) syntax.
"""
def __init__(self, string_lengths, default_string_length):
self.string_lengths = string_lengths
self.default_string_length = default_string_length
def visit_Declaration(self, node):
if not node.type.startswith('character'):
return
try:
lind = node.type.index('(')
rind = node.type.rindex(')')
typ = node.type[:lind] + '*' + node.type[lind:rind + 1].replace('len=', '')
string_length = typ[11:-1]
# Try to get length of string arguments
if not string_length == '*' and not all([x in '0123456789' for x in string_length]):
string_length = self.string_lengths.get(string_length, self.default_string_length)
# Default string length for intent(out) strings
if string_length == '*' and 'intent(out)' in node.attributes:
string_length = self.default_string_length
except ValueError:
string_length = 1
node.type = 'character*(%s)' % str(string_length)
class ArrayDimensionConverter(ft.FortranVisitor):
"""
Transform unspecified dimensions into additional dummy arguments
e.g. the following code
subroutine foo(a)
integer a(:)
end subroutine foo
becomes:
subroutine foo(a, n0)
integer a(n0)
integer n0
!f2py intent(hide), depend(a) :: n0 = shape(a,0)
end subroutine foo
"""
valid_dim_re = re.compile(r'^(([-0-9.e]+)|(size\([_a-zA-Z0-9\+\-\*\/,]*\))|(len\(.*\)))$')
@staticmethod
def split_dimensions(dim):
"""Given a string like "dimension(a,b,c)" return the list of dimensions ['a','b','c']."""
dim = dim[10:-1] # remove "dimension(" and ")"
br = 0
d = 1
ds = ['']
for c in dim:
if c != ',': ds[-1] += c
if c == '(': br += 1
elif c == ')': br -= 1
elif c == ',':
if br == 0: ds.append('')
else: ds[-1] += ','
return ds
def visit_Procedure(self, node):
n_dummy = 0
for arg in node.arguments:
dims = [attr for attr in arg.attributes if attr.startswith('dimension') ]
if dims == []:
continue
if len(dims) != 1:
raise ValueError('more than one dimension attribute found for arg %s' % arg.name)
ds = ArrayDimensionConverter.split_dimensions(dims[0])
new_dummy_args = []
new_ds = []
for i, d in enumerate(ds):
if ArrayDimensionConverter.valid_dim_re.match(d):
if d.startswith('len'):
arg.f2py_line = ('!f2py %s %s, dimension(%s) :: %s' % \
(arg.type,
','.join([attr for attr in arg.attributes if not attr.startswith('dimension')]),
d.replace('len', 'slen'), arg.name))
new_ds.append(d)
continue
dummy_arg = ft.Argument(name='n%d' % n_dummy, type='integer', attributes=['intent(hide)'])
if 'intent(out)' not in arg.attributes:
dummy_arg.f2py_line = ('!f2py intent(hide), depend(%s) :: %s = shape(%s,%d)' %
(arg.name, dummy_arg.name, arg.name, i))
new_dummy_args.append(dummy_arg)
new_ds.append(dummy_arg.name)
n_dummy += 1
if new_dummy_args != []:
logging.debug('adding dummy arguments %r to %s' % (new_dummy_args, node.name))
arg.attributes = ([attr for attr in arg.attributes if not attr.startswith('dimension')] +
['dimension(%s)' % ','.join(new_ds)])
node.arguments.extend(new_dummy_args)
class MethodFinder(ft.FortranTransformer):
def __init__(self, types, constructor_names, destructor_names, short_names, move_methods):
self.types = types
self.constructor_names = constructor_names
self.destructor_names = destructor_names
self.short_names = short_names
self.move_methods = move_methods
def visit_Interface(self, node):
new_procs = []
for proc in node.procedures:
if isinstance(proc, ft.Procedure):
new_proc = self.visit_Procedure(proc, interface=node)
if new_proc is not None:
new_procs.append(new_proc)
else:
new_procs.append(proc)
if new_procs == []:
# interface is now empty: all routines have been moved into Interfaces inside types
return None
else:
# some procedures remain so we need to keep the Interface around
node.procedures = new_procs
return node
def visit_Procedure(self, node, interface=None):
if (len(node.arguments) == 0 or
(node.arguments[0] is not None and
node.arguments[0].type not in self.types)):
# procedure is not a method, so leave it alone
return node
# remove prefix from subroutine name to get method name
typ = self.types[node.arguments[0].type]
node.method_name = node.name
prefices = [typ.name + '_']
if typ.name in self.short_names:
prefices.append(self.short_names[typ.name] + '_')
for prefix in prefices:
if node.name.startswith(prefix):
node.method_name = node.name[len(prefix):]
# label constructors and destructors
if node.method_name in self.constructor_names:
node.attributes.append('constructor')
elif node.method_name in self.destructor_names:
node.attributes.append('destructor')
if (self.move_methods or
'constructor' in node.attributes or
'destructor' in node.attributes):
node.attributes.append('method')
node.type_name = typ.name
if interface is None:
# just a regular method - move into typ.procedures
typ.procedures.append(node)
logging.debug('added method %s to type %s' %
(node.method_name, typ.name))
else:
# this method was originally inside an interface,
# so we need to replicate Interface inside the Type
for intf in typ.interfaces:
if intf.name == interface.name:
intf.procedures.append(node)
logging.debug('added method %s to interface %s in type %s' %
(node.method_name, intf.name, typ.name))
break
else:
intf = ft.Interface(interface.name,
interface.filename,
interface.doc,
interface.lineno,
[node])
typ.interfaces.append(intf)
logging.debug('added method %s to new interface %s in type %s' %
(node.method_name, intf.name, typ.name))
# remove method from parent since we've added it to Type
return None
else:
return node
def collapse_single_interfaces(tree):
"""Collapse interfaces which contain only a single procedure."""
class _InterfaceCollapser(ft.FortranTransformer):
"""Replace interfaces with only one procedure by that procedure"""
def visit_Interface(self, node):
if len(node.procedures) == 1:
proc = node.procedures[0]
proc.doc = node.doc + proc.doc
logging.debug('collapsing single-component interface %s' % proc.name)
return proc
else:
return node
class _ProcedureRelocator(ft.FortranTransformer):
"""Filter interfaces and procedures into correct lists"""
def visit_Type(self, node):
logging.debug('visiting %r' % node)
interfaces = []
procedures = []
for child in ft.iter_child_nodes(node):
if isinstance(child, ft.Interface):
interfaces.append(child)
elif isinstance(child, ft.Procedure):
procedures.append(child)
else:
# other child nodes should be left where they are
pass
node.interfaces = interfaces
node.procedures = procedures
return self.generic_visit(node)
visit_Module = visit_Type
tree = _InterfaceCollapser().visit(tree)
tree = _ProcedureRelocator().visit(tree)
return tree
def add_missing_constructors(tree):
for node in ft.walk(tree):
if not isinstance(node, ft.Type):
continue
for child in ft.iter_child_nodes(node):
if isinstance(child, ft.Procedure):
if 'constructor' in child.attributes:
logging.info('found constructor %s' % child.name)
break
else:
logging.info('adding missing constructor for %s' % node.name)
new_node = ft.Subroutine('%s_initialise' % node.name,
node.filename,
['Automatically generated constructor for %s' % node.name],
node.lineno,
[ft.Argument(name='this',
filename=node.filename,
doc=['Object to be constructed'],
lineno=node.lineno,
attributes=['intent(out)'],
type='type(%s)' % node.name)],
node.uses,
['constructor', 'skip_call'],
mod_name=node.mod_name,
type_name=node.name)
new_node.method_name = '__init__'
node.procedures.append(new_node)
return tree
def add_missing_destructors(tree):
for node in ft.walk(tree):
if not isinstance(node, ft.Type):
continue
for child in ft.iter_child_nodes(node):
if isinstance(child, ft.Procedure):
if 'destructor' in child.attributes:
logging.info('found destructor %s' % child.name)
break
else:
logging.info('adding missing destructor for %s' % node.name)
new_node = ft.Subroutine('%s_finalise' % node.name,
node.filename,
['Automatically generated destructor for %s' % node.name],
node.lineno,
[ft.Argument(name='this',
filename=node.filename,
doc=['Object to be destructed'],
lineno=node.lineno,
attributes=['intent(inout)'],
type='type(%s)' % node.name)],
node.uses,
['destructor', 'skip_call'],
mod_name=node.mod_name,
type_name=node.name)
new_node.method_name = '__del__'
node.procedures.append(new_node)
return tree
class FunctionToSubroutineConverter(ft.FortranTransformer):
"""Convert all functions to subroutines, with return value as an
intent(out) argument after the last non-optional argument"""
def visit_Function(self, node):
# insert ret_val after last non-optional argument
arguments = node.arguments[:]
i = 0
for i, arg in enumerate(arguments):
if 'optional' in arg.attributes:
break
arguments.insert(i, node.ret_val)
arguments[i].name = 'ret_' + arguments[i].name
arguments[i].attributes.append('intent(out)')
new_node = ft.Subroutine(node.name,
node.filename,
node.doc,
node.lineno,
arguments,
node.uses,
node.attributes,
mod_name=node.mod_name)
if hasattr(node, 'call_name'):
new_node.call_name = node.call_name
if hasattr(node, 'type'):
new_node.type = node.type
new_node.orig_name = node.orig_name
new_node.orig_node = node # keep a reference to the original node
return new_node
class IntentOutToReturnValues(ft.FortranTransformer):
"""
Convert all Subroutine and Function intent(out) arguments to return values
"""
def visit_Procedure(self, node):
if 'constructor' in node.attributes:
node.arguments[0].attributes = set_intent(node.arguments[0].attributes,
'intent(out)')
ret_val = []
ret_val_doc = None
if isinstance(node, ft.Function) and node.ret_val is not None:
ret_val.append(node.ret_val)
if node.ret_val_doc is not None:
ret_val_doc = node.ret_val_doc
arguments = []
for arg in node.arguments:
if 'intent(out)' in arg.attributes:
ret_val.append(arg)
else:
arguments.append(arg)
if ret_val == []:
new_node = node # no changes needed
else:
new_node = ft.Function(node.name,
node.filename,
node.doc,
node.lineno,
arguments,
node.uses,
node.attributes,
ret_val,
ret_val_doc,
mod_name=node.mod_name,
type_name=node.type_name)
new_node.orig_node = node
if hasattr(node, 'method_name'):
new_node.method_name = node.method_name
return new_node
class RenameReservedWords(ft.FortranVisitor):
def __init__(self, types, name_map=None):
self.types = types
self.name_map = {}
if name_map is not None:
self.name_map.update(name_map)
# rename Python keywords by appending an underscore
import keyword
self.name_map.update(dict((key, key + '_') for key in keyword.kwlist))
# apply same renaming as f2py
import numpy.f2py.crackfortran
self.name_map.update(numpy.f2py.crackfortran.badnames)
# remove some of these which are not Python reserved words
del self.name_map['stdout']
del self.name_map['stderr']
del self.name_map['stdin']
def visit_Argument(self, node):
if not hasattr(node, 'orig_name'):
node.orig_name = node.name
node.name = self.name_map.get(node.name, node.name)
if isinstance(node, ft.Argument):
# replace names in dimension attribute expressions
for (old_name, new_name) in self.name_map.items():
new_attribs = []
for attrib in node.attributes:
if attrib.startswith('dimension('):
new_attribs.append(attrib.replace(old_name, new_name))
else:
new_attribs.append(attrib)
node.attributes = new_attribs
return self.generic_visit(node)
visit_Procedure = visit_Argument
visit_Element = visit_Argument
visit_Module = visit_Argument
visit_Type = visit_Argument
class RenameArgumentsPython(ft.FortranVisitor):
def __init__(self, types):
self.types = types
def visit_Procedure(self, node):
if hasattr(node, 'method_name'):
if 'constructor' in node.attributes:
node.ret_val[0].py_name = 'self'
elif len(node.arguments) >= 1 and node.arguments[0].type in self.types:
node.arguments[0].py_name = 'self'
elif hasattr(node, 'attributes') and 'callback' in node.attributes:
self.visit_Argument(node)
return self.generic_visit(node)
def visit_Argument(self, node):
if not hasattr(node, 'py_name'):
node.py_name = node.name
if node.type.startswith('type'):
node.py_value = node.py_name + '._handle'
else:
node.py_value = node.py_name
return node
class RenameInterfacesPython(ft.FortranVisitor):
def visit_Interface(self, node):
for proc in node.procedures:
if hasattr(proc, 'method_name'):
proc.method_name = '_'+proc.method_name
else:
proc.method_name = '_'+proc.name
return node
class OnlyAndSkip(ft.FortranTransformer):
"""
This class does the job of removing nodes from the tree
which are not necessary to write wrappers for (given user-supplied
values for only and skip).
Currently it takes a list of subroutines and a list of modules to write
wrappers for. If empty, it does all of them.
"""
def __init__(self, kept_subs, kept_mods):
self.kept_subs = kept_subs
self.kept_mods = kept_mods
def visit_Procedure(self, node):
if len(self.kept_subs) > 0:
if node not in self.kept_subs:
return None
return self.generic_visit(node)
def visit_Module(self, node):
if len(self.kept_mods) > 0:
if node not in self.kept_mods:
return None
return self.generic_visit(node)
class NormaliseTypes(ft.FortranVisitor):
"""
Convert all type names to standard form and resolve kind names
"""
def __init__(self, kind_map):
self.kind_map = kind_map
def visit_Declaration(self, node):
node.type = ft.normalise_type(node.type, self.kind_map)
return self.generic_visit(node)
visit_Argument = visit_Declaration
class SetInterfaceProcedureCallNames(ft.FortranVisitor):
"""
Set call names of procedures within overloaded interfaces to the name of the interface
"""
def visit_Interface(self, node):
for proc in node.procedures:
logging.info('setting call_name of %s to %s' % (proc.name, node.name))
proc.call_name = node.name
return node
def transform_to_generic_wrapper(tree, types, callbacks, constructors,
destructors, short_names, init_lines,
only_subs, only_mods, argument_name_map,
move_methods):
"""
Apply a number of rules to *tree* to make it suitable for passing to
a F90 and Python wrapper generators. Transformations performed are:
* Removal of procedures and modules not provided by the user
* Removal of private symbols
* Removal of unwrappable routines and optional arguments
* Addition of missing constructor and destructor wrappers
* Conversion of all functions to subroutines
* Updatting call names of procedures within interfaces
* Update of subroutine uses clauses
"""
tree = OnlyAndSkip(only_subs, only_mods).visit(tree)
tree = remove_private_symbols(tree)
tree = UnwrappablesRemover(callbacks, types, constructors, destructors).visit(tree)
tree = MethodFinder(types, constructors, destructors, short_names, move_methods).visit(tree)
SetInterfaceProcedureCallNames().visit(tree)
tree = collapse_single_interfaces(tree)
tree = fix_subroutine_uses_clauses(tree, types)
tree = fix_element_uses_clauses(tree, types)
tree = add_missing_constructors(tree)
tree = add_missing_destructors(tree)
tree = convert_array_intent_out_to_intent_inout(tree)
RenameReservedWords(types, argument_name_map).visit(tree)
return tree
def transform_to_f90_wrapper(tree, types, callbacks, constructors,
destructors, short_names, init_lines,
string_lengths, default_string_length,
sizeof_fortran_t, kind_map):
"""
Additional Fortran-specific transformations:
* Conversion of derived type arguments to opaque integer arrays
via Fortran transfer() intrinsic.
* Normalise type declarations
"""
FunctionToSubroutineConverter().visit(tree)
tree = convert_derived_type_arguments(tree, init_lines, sizeof_fortran_t)
StringLengthConverter(string_lengths, default_string_length).visit(tree)
ArrayDimensionConverter().visit(tree)
NormaliseTypes(kind_map).visit(tree)
return tree
def transform_to_py_wrapper(tree, types):
"""
Additional Python-specific transformations:
* Convert intent(out) arguments to additional return values
* Rename arguments (e.g. this -> self)
* Prefix procedure names within interfaces with an underscore
"""
IntentOutToReturnValues().visit(tree)
RenameArgumentsPython(types).visit(tree)
RenameInterfacesPython().visit(tree)
return tree
def find_referenced_modules(mods, tree):
"""
Given a set of modules in a parse tree, find any modules (recursively)
used by these.
Parameters
----------
mods : set
initial modules to search, must be included in the tree.
tree : `fortran.Root()` object.
the full fortran parse tree from which the mods have been taken.
Returns
-------
all_mods : set
Module() objects which are recursively used by the given modules.
"""
new_mods = copy.copy(mods)
while new_mods != set():
temp = list(new_mods)
for m in temp:
for m2 in m.uses:
for m3 in ft.walk_modules(tree):
if m3.name == m2:
new_mods.add(m3)
new_mods -= mods
mods |= new_mods
return mods
def find_referenced_types(mods, tree):
"""
Given a set of modules in a parse tree, find any types either defined in
or referenced by the module, recursively.
Parameters
----------
mods : set
initial modules to search, must be included in the tree.
tree : the full fortran parse tree from which the mods have been taken.
tree : `fortran.Root` object.
the full fortran parse tree from which the mods have been taken.
Returns
-------
kept_types : set of Type() objects which are referenced or defined in the
modules given, or recursively referenced by those types.
"""
# Get used types now
kept_types = set()
for mod in mods:
for t in mod.types:
kept_types.add(t)
for el in mod.elements:
if el.type.startswith('type'):
for mod2 in ft.walk_modules(tree):
for mt in mod2.types:
if mt.name in el.type:
kept_types.add(mt)
# kept_types is now all types defined/referenced directly in kept_mods. But we also
# need those referenced by them.
new_set = copy.copy(kept_types)
while new_set != set():
temp_set = list(new_set)
for t in temp_set:
for el in t.elements:
if el.type.startswith('type'): # a referenced type, need to find def
for mod2 in ft.walk_modules(tree):
for mt in mod2.types:
if mt.name in el.type:
new_set.add(mt)
# take out all the original types from new_set
new_set -= kept_types
# update the kept_types with new ones
kept_types |= new_set
return kept_types
| davidovitch/f90wrap | f90wrap/transform.py | Python | gpl-2.0 | 42,373 |
from django.db import models
from django.contrib import admin
import datetime, time
from mptt.models import MPTTModel, TreeForeignKey
class Location(MPTTModel):
name = models.CharField(max_length=45)
description = models.TextField(blank=True)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
def __unicode__(self):
return self.name
class MPTTMeta:
order_insertion_by = ['name']
class LoadType(MPTTModel):
name = models.CharField(max_length=45)
abbreviation = models.CharField(max_length=10, blank=True)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children')
def __unicode__(self):
return self.name
class MPTTMeta:
order_insertion_by = ['name']
class Eguage(models.Model):
URL = models.CharField(max_length=200,unique=True,default="http://",help_text="Must start with http:// and end with /. \n Example: http://egauge2880.egaug.es/")
username = models.CharField(max_length=50,null=True,blank=True)
password = models.CharField(max_length=50,null=True,blank=True)
name = models.CharField(max_length=100,unique=True)
checked_at = models.IntegerField(blank=True,null=True,default="0",editable=False)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
def __unicode__(self):
return self.name
class Eport(models.Model):
name = models.CharField(max_length=45)
description = models.CharField(max_length=1024, blank=True)
eguage = models.ForeignKey(Eguage)
location = models.ForeignKey(Location)
load_type = models.ForeignKey(LoadType)
voltage = models.IntegerField(default=480,null=True,blank=True)
phases = models.IntegerField(default=3,null=True,blank=True)
amperage = models.IntegerField(default=600,null=True,blank=True)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class WeatherData(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
condition = models.CharField(max_length=25)
temp_f = models.DecimalField(max_digits=5, decimal_places=1)
feelslike_f = models.DecimalField(max_digits=5, decimal_places=1, blank=True)
relative_humidity = models.DecimalField(max_digits=5, decimal_places=1, blank=True)
wind_mph = models.IntegerField(blank=True)
wind_dir = models.CharField(max_length=10, blank=True)
pressure_mb = models.IntegerField(blank=True)
uv = models.FloatField(blank=True)
solarradiation = models.DecimalField(max_digits=5, decimal_places=1, blank=True)
precip_1hr_in = models.DecimalField(max_digits=5, decimal_places=2, blank=True)
precip_today_in = models.DecimalField(max_digits=5, decimal_places=2, blank=True)
def created_at_utc(self):
return self.created_at.mktime(self.created_at.timetuple())
class EnergyData(models.Model):
created_at = models.DateTimeField(auto_now_add = True)
time_stamp = models.DateTimeField()
eguage_id = models.ForeignKey(Eguage)
eport_id = models.ForeignKey(Eport)
watts = models.BigIntegerField()
| holachek/ecosense | app/grapher/models.py | Python | mit | 3,408 |
import sys, os
import csv
import mysql.connector
from mysql.connector.constants import ClientFlag
import traceback
class SendDataToMysql:
def __init__(self):
self = self
def add_test_cases_to_h2o(self):
#Connect to mysql database
h2o = mysql.connector.connect(client_flags=[ClientFlag.LOCAL_FILES],user='root', password='0xdata', host='172.16.2.178', database='h2o')
cursor = h2o.cursor()
#Send data to mysql database
try:
#Sending accuracyTestCases.csv
cursor.execute("LOAD DATA LOCAL INFILE '../h2o-test-accuracy/src/test/resources/accuracyTestCases.csv' INTO "
"TABLE TestCases COLUMNS TERMINATED BY ',' LINES TERMINATED BY '\n' IGNORE 1 LINES;")
#Commit query
h2o.commit()
except:
traceback.print_exc()
h2o.rollback()
assert False, "Failed to add accuracy test cases to h2o database!"
def add_accuracy_data(self):
#Connect to mysql database
h2o = mysql.connector.connect(client_flags=[ClientFlag.LOCAL_FILES],user='root', password='0xdata', host='172.16.2.178', database='h2o')
cursor = h2o.cursor()
#Send data to mysql database
try:
#Sending accuracyDatasets
cursor.execute("LOAD DATA LOCAL INFILE '../h2o-test-accuracy/src/test/resources/accuracyDataSets.csv' INTO "
"TABLE AccuracyDatasets COLUMNS TERMINATED BY ',' LINES TERMINATED BY '\n' IGNORE 1 LINES;")
#Commit query
h2o.commit()
except:
traceback.print_exc()
h2o.rollback()
assert False, "Failed to add accuracy test cases to h2o database!"
def drop_join_test_cases_tables(self):
#Connect to mysql database
h2o = mysql.connector.connect(user='root', password='0xdata', host='172.16.2.178', database='h2o')
cursor = h2o.cursor()
try:
drop_join_test_cases_query = """
DROP TABLES IF EXISTS TestCasesResults;
"""
cursor.execute(drop_join_test_cases_query)
except:
traceback.print_exc()
h2o.rollback()
assert False, "Failed to drop TestCasesResults table!"
def join_test_cases_results(self):
#Connect to mysql database
h2o = mysql.connector.connect(client_flags=[ClientFlag.LOCAL_FILES],user='root', password='0xdata', host='172.16.2.178', database='h2o')
cursor = h2o.cursor()
#Drop table if exists before re creating
self.drop_join_test_cases_tables()
try:
join_query = """
CREATE TABLE TestCasesResults AS(
SELECT *
FROM AccuracyTestCaseResults
LEFT JOIN TestCases
ON AccuracyTestCaseResults.testcase_id = TestCases.test_case_id
LEFT JOIN AccuracyDatasets
ON TestCases.training_data_set_id = AccuracyDatasets.data_set_id);
"""
cursor.execute(join_query)
except:
traceback.print_exc()
h2o.rollback()
assert False, "Failed to join AccuracyTestCaseResults, TestCases, and AccuracyDatasets!"
cursor.close()
h2o.close()
if __name__ == '__main__':
#SendDataToMysql().add_test_cases_to_h2o()
#SendDataToMysql().add_accuracy_data()
SendDataToMysql().join_test_cases_results() | YzPaul3/h2o-3 | scripts/send_to_mysql.py | Python | apache-2.0 | 3,583 |
# F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
# Headers in this file shall remain intact.
import collections
import functools
import sys
import uuid
from zope.interface import implements
from twisted.internet import reactor
from twisted.python import failure
from twisted.trial import unittest, util
from twisted.scripts import trial
from feat.database import emu as database
from feat.agencies import agency, journaler, message, recipient
from feat.agencies.messaging import emu, rabbitmq
from feat.agents.base import agent
from feat.common import log, defer, decorator, journal, time, signal
from feat.interface.generic import ITimeProvider
from feat.agencies.messaging.interface import ISink
from feat.agents.application import feat
from . import factories
from twisted.trial.unittest import FailTest
try:
_getConfig = trial.getConfig
except AttributeError:
# trial.getConfig() is only available when using flumotion-trial
_getConfig = dict
log.init('test.log')
def delay(value, delay):
'''Returns a deferred whose callback will be triggered
after the specified delay with the specified value.'''
d = defer.Deferred()
time.callLater(delay, d.callback, value)
return d
def break_chain(value):
'''Breaks a deferred callback chain ensuring the rest will be called
asynchronously in the next reactor loop.'''
return delay_callback(value, 0)
def delay_errback(failure, delay):
'''Returns a deferred whose errback will be triggered
after the specified delay with the specified value.'''
d = defer.Deferred()
time.callLater(delay, d.errback, failure)
return d
def break_errback_chain(failure):
'''Breaks a deferred errback chain ensuring the rest will be called
asynchronously in the next reactor loop.'''
return delay_errback(failure, 0)
delay_callback = delay
break_callback_chain = break_chain
def attr(*args, **kwargs):
"""Decorator that adds attributes to objects.
It can be used to set the 'slow', 'skip', or 'todo' flags in test cases.
"""
def wrap(func):
for name in args:
# these are just True flags:
setattr(func, name, True)
for name, value in kwargs.items():
setattr(func, name, value)
return func
return wrap
class TestCase(unittest.TestCase, log.LogProxy, log.Logger):
implements(ITimeProvider)
log_category = "test"
# define names of class variables here, which values can be change
# with the @attr decorator
configurable_attributes = []
skip_coverage = False
def __init__(self, methodName=' impossible-name '):
log_keeper = log.get_default() or log.FluLogKeeper()
log.LogProxy.__init__(self, log_keeper)
log.Logger.__init__(self, self)
# Twisted changed the TestCase.__init__ signature several
# times.
#
# In versions older than 2.1.0 there was no __init__ method.
#
# In versions 2.1.0 up to 2.4.0 there is a __init__ method
# with a methodName kwarg that has a default value of None.
#
# In version 2.5.0 the default value of the kwarg was changed
# to "runTest".
#
# In versions above 2.5.0 God only knows what's the default
# value, as we do not currently support them.
import inspect
if not inspect.ismethod(unittest.TestCase.__init__):
# it's Twisted < 2.1.0
unittest.TestCase.__init__(self)
else:
# it's Twisted >= 2.1.0
if methodName == ' impossible-name ':
# we've been called with no parameters, use the
# default parameter value from the superclass
defaults = inspect.getargspec(unittest.TestCase.__init__)[3]
methodName = defaults[0]
unittest.TestCase.__init__(self, methodName=methodName)
self.log_name = self.id()
# Skip slow tests if '--skip-slow' option is enabled
if _getConfig().get('skip-slow'):
if self.getSlow() and not self.getSkip():
self.skip = 'slow test'
# Handle configurable attributes
for attr in self.configurable_attributes:
value = util.acquireAttribute(self._parents, attr, None)
if value is not None:
setattr(self, attr, value)
def assert_not_skipped(self):
if self.skip_coverage and sys.gettrace():
raise unittest.SkipTest("Test Skipped during coverage")
def setUp(self):
log.test_reset()
self.assert_not_skipped()
# Scale time if configured
scale = util.acquireAttribute(self._parents, 'timescale', None)
if scale is not None:
time.scale(scale)
else:
time.reset()
self.info("Test running with timescale: %r", time._get_scale())
def getSlow(self):
"""
Return whether this test has been marked as slow. Checks on the
instance first, then the class, then the module, then packages. As
soon as it finds something with a C{slow} attribute, returns that.
Returns C{False} if it cannot find anything.
"""
return util.acquireAttribute(self._parents, 'slow', False)
def wait_for(self, check, timeout, freq=0.5, kwargs=dict()):
d = time.wait_for_ex(check, timeout, freq=freq, kwargs=kwargs,
logger=self)
d.addErrback(lambda f: self.fail(f.value))
return d
def is_agency_idle(self, agency):
return all([agent.is_idle() for agent in agency.get_agents()])
@defer.inlineCallbacks
def wait_agency_for_idle(self, agency, timeout, freq=0.5):
try:
check = lambda: self.is_agency_idle(agency)
yield self.wait_for(check, timeout, freq)
except unittest.FailTest:
for agent in agency.get_agents():
activity = agent.show_activity()
if activity is None:
continue
self.info(activity)
raise
def cb_after(self, arg, obj, method):
'''
Returns defered fired after the call of method on object.
Can be used in defered chain like this:
d.addCallback(doSomeStuff)
d.addCallback(self._cb_after, obj=something, method=some_method)
d.addCallback(jobAfterCallOfSomeMethod)
This will fire last callback after something.some_method has been
called.
Parameter passed to the last callback is either return value of
doSomeStuff, or, if this is None, the return value of stubbed method.
'''
old_method = obj.__getattribute__(method)
d = defer.Deferred()
def new_method(*args, **kwargs):
obj.__setattr__(method, old_method)
ret = old_method(*args, **kwargs)
cb_arg = arg or (not isinstance(ret, defer.Deferred) and ret)
reactor.callLater(0, d.callback, cb_arg)
return ret
obj.__setattr__(method, new_method)
return d
def assertCalled(self, obj, name, times=1, params=None):
assert isinstance(obj, Mock), "Got: %r" % obj
calls = obj.find_calls(name)
times_called = len(calls)
template = "Expected %s method to be called %d time(s), "\
"was called %d time(s)"
self.assertEqual(times, times_called,\
template % (name, times, times_called))
if params:
for call in calls:
self.assertEqual(len(params), len(call.args))
for param, arg in zip(params, call.args):
self.assertTrue(isinstance(arg, param))
return obj
def assertIsInstance(self, _, klass):
self.assertTrue(isinstance(_, klass),
"Expected instance of %r, got %r instead" % (klass, _.__class__))
return _
def assertIs(self, expr1, expr2, msg=None):
self.assertEqual(id(expr1), id(expr2),
msg or ("Expected same instances and got %r and %r"
% (expr1, expr2)))
def assertIsNot(self, expr1, expr2, msg=None):
self.assertNotEqual(id(expr1), id(expr2),
msg or ("Expected different instances and got "
"two %r" % (expr1, )))
def assertAsyncEqual(self, chain, expected, value, *args, **kwargs):
'''Adds an asynchronous assertion for equality to the specified
deferred chain.
If the chain is None, a new fired one will be created.
The checks are serialized and done in order of declaration.
If the value is a Deferred, the check wait for its result,
if not it compare rightaway.
If value is a callable, it is called with specified arguments
and keyword WHEN THE PREVIOUS CALL HAS BEEN DONE.
Used like this::
d = defer.succeed(None)
d = self.assertAsyncEqual(d, EXPECTED, FIRED_DEFERRED)
d = self.assertAsyncEqual(d, EXPECTED, VALUE)
d = self.assertAsyncEqual(d, 42, asyncDouble(21))
d = self.assertAsyncEqual(d, 42, asyncDouble, 21)
return d
Or::
return self.assertAsyncEqual(None, EXPECTED, FIRED_DEFERRED)
'''
def check(result):
self.assertEqual(expected, result)
return result
if chain is None:
chain = defer.succeed(None)
return chain.addBoth(self._assertAsync, check, value, *args, **kwargs)
def assertAsyncIterEqual(self, chain, expected, value, *args, **kwargs):
def check(result):
self.assertEqual(expected, list(result))
return result
if chain is None:
chain = defer.succeed(None)
return chain.addBoth(self._assertAsync, check, value, *args, **kwargs)
def assertFails(self, exception_class, method, *args, **kwargs):
d = method(*args, **kwargs)
self.assertFailure(d, exception_class)
return d
@defer.inlineCallbacks
def asyncEqual(self, expected, async_value):
self.assertTrue(isinstance(async_value, defer.Deferred))
value = yield async_value
self.assertEqual(value, expected)
@defer.inlineCallbacks
def asyncIterEqual(self, expected, async_iter):
self.assertTrue(isinstance(async_iter, defer.Deferred))
iterator = yield async_iter
self.assertTrue(isinstance(iterator, collections.Iterable))
self.assertEqual(list(iterator), expected)
@defer.inlineCallbacks
def asyncErrback(self, error_class, fun, *args, **kwargs):
result = fun(*args, **kwargs)
self.assertTrue(isinstance(result, defer.Deferred))
try:
res = yield result
self.fail("Expecting asynchronous error %s "
"and got result: %r" % (error_class.__name__, res))
except Exception, e:
if isinstance(e, FailTest):
raise
self.assertTrue(isinstance(e, error_class),
"Expecting asynchronous error %s "
"and got %s" % (error_class.__name__,
type(e).__name__))
def assertAsyncFailure(self, chain, errorKlasses, value, *args, **kwargs):
'''Adds an asynchronous assertion for failure to the specified chain.
If the chain is None, a new fired one will be created.
The checks are serialized and done in order of declaration.
If the value is a Deferred, the check wait for its result,
if not it compare rightaway.
If value is a callable, it is called with specified arguments
and keyword WHEN THE PREVIOUS CALL HAS BEEN DONE.
Used like this::
d = defer.succeed(None)
d = self.assertAsyncFailure(d, ERROR_CLASSES, FIRED_DEFERRED)
d = self.assertAsyncFailure(d, ERROR_CLASSES, FUNCTION, ARG)
d = self.assertAsyncFailure(d, [ValueError, TypeError], fun(21))
d = self.assertAsyncFailure(d, [ValueError], fun, 21)
return d
'''
def check(failure):
if isinstance(errorKlasses, collections.Sequence):
self.assertTrue(failure.check(*errorKlasses))
else:
self.assertTrue(failure.check(errorKlasses))
return None # Resolve the error
if chain is None:
chain = defer.succeed(None)
return chain.addBoth(self._assertAsync, check, value, *args, **kwargs)
def assertAsyncRaises(self, chain, ErrorClass, fun, *args, **kwargs):
def check(param):
self.assertRaises(ErrorClass, fun, *args, **kwargs)
return None # Resolve the error
if chain is None:
chain = defer.succeed(None)
return chain.addBoth(check)
def stub_method(self, obj, method, handler):
handler = functools.partial(handler, obj)
obj.__setattr__(method, handler)
return obj
def tearDown(self):
log.test_reset()
time.reset()
signal.reset()
### ITimeProvider Methods ###
def get_time(self):
return time.time()
### Private Methods ###
def _assertAsync(self, param, check, value, *args, **kwargs):
if isinstance(param, failure.Failure):
if param.check(AssertionError):
param.raiseException()
if isinstance(value, defer.Deferred):
value.addBoth(check)
return value
if args is not None and callable(value):
return self._assertAsync(param, check, value(*args, **kwargs))
return check(value)
class Mock(object):
def __init__(self):
self._called = []
def find_calls(self, name):
return filter(lambda x: x.name == name, self._called)
@staticmethod
@decorator.simple_function
def stub(method):
def decorated(self, *args, **kwargs):
call = MockCall(method.__name__, args, kwargs)
self._called.append(call)
return decorated
@staticmethod
@decorator.simple_function
def record(method):
def decorated(self, *args, **kwargs):
call = MockCall(method.__name__, args, kwargs)
self._called.append(call)
return method(self, *args, **kwargs)
return decorated
class MockCall(object):
def __init__(self, name, args, kwargs):
self.name = name
self.args = args
self.kwargs = kwargs
class AgencyTestHelper(object):
protocol_type = None
protocol_id = None
remote_id = None
def setUp(self):
self.agency = agency.Agency()
self.guid = None
self._messaging = emu.RabbitMQ()
mesg = rabbitmq.Client(self._messaging, 'agency_queue')
self._db = database.Database()
writer = journaler.SqliteWriter(self)
journal = journaler.Journaler()
journal.configure_with(writer)
d = writer.initiate()
d.addCallback(defer.drop_param, self.agency.initiate,
self._db, journal, mesg)
return d
def setup_endpoint(self):
'''
Sets up the destination for tested component to send messages to.
This returns:
- endpoint: Recipient instance pointing to the queue above
(use it for reply-to fields)
- queue: Queue instance we use may call .get() on to get
messages from components being tested
@returns: tuple of endpoint, queue
'''
endpoint = recipient.Agent(str(uuid.uuid1()), 'lobby')
messaging = self._messaging
queue = messaging.define_queue(endpoint.key)
messaging.define_exchange(endpoint.route, 'direct')
messaging.create_binding(
endpoint.route, endpoint.key, endpoint.key)
return endpoint, queue
def assert_queue_empty(self, queue, timeout=10):
d = queue.get()
d2 = delay(None, timeout)
d2.addCallback(lambda _: self.assertFalse(d.called))
d2.addCallback(d.callback)
return d2
# methods for handling documents
def doc_factory(self, doc_class, **options):
'''
Builds document of given class and saves it to the database.
@returns: Document with id and revision set
@rtype: subclass of feat.agents.document.Document
'''
document = factories.build(doc_class.type_name, **options)
return self.agency._database.get_connection().save_document(document)
# methods for sending and receiving custom messages
def send_announce(self, manager):
msg = message.Announcement()
manager._get_medium().announce(msg)
return manager
def send_bid(self, contractor, bid=1):
msg = message.Bid()
msg.bids = [bid]
contractor._get_medium().bid(msg)
return contractor
def send_refusal(self, contractor):
msg = message.Refusal()
contractor._get_medium().refuse(msg)
return contractor
def send_final_report(self, contractor):
msg = message.FinalReport()
contractor._get_medium().complete(msg)
return contractor
def send_cancel(self, contractor, reason=""):
msg = message.Cancellation()
msg.reason = reason
contractor._get_medium().defect(msg)
return contractor
def recv_announce(self, expiration_time=None, traversal_id=None):
msg = message.Announcement()
self.guid = str(uuid.uuid1())
msg.sender_id = self.guid
msg.traversal_id = traversal_id or str(uuid.uuid1())
return self.recv_msg(msg, expiration_time=expiration_time,
public=True)
def recv_grant(self, _, update_report=None):
msg = message.Grant()
msg.update_report = update_report
msg.sender_id = self.guid
return self.recv_msg(msg).addCallback(lambda ret: _)
def recv_rejection(self, _):
msg = message.Rejection()
msg.sender_id = self.guid
return self.recv_msg(msg).addCallback(lambda ret: _)
def recv_cancel(self, _, reason=""):
msg = message.Cancellation()
msg.reason = reason
msg.sender_id = self.guid
return self.recv_msg(msg).addCallback(lambda ret: _)
def recv_ack(self, _):
msg = message.Acknowledgement()
msg.sender_id = self.guid
return self.recv_msg(msg).addCallback(lambda ret: _)
def recv_notification(self, result=None, traversal_id=None):
msg = message.Notification()
msg.traversal_id = traversal_id or str(uuid.uuid1())
d = self.recv_msg(msg, key="dummy-notification")
d.addCallback(defer.override_result, result)
return d
def recv_msg(self, msg, reply_to=None, key=None,
expiration_time=None, public=False):
d = self.cb_after(arg=None, obj=self.agent._messaging,
method='on_message')
msg.reply_to = reply_to or self.endpoint
msg.expiration_time = expiration_time or (time.future(10))
msg.protocol_type = self.protocol_type
msg.protocol_id = self.protocol_id
msg.message_id = str(uuid.uuid1())
msg.receiver_id = self.remote_id
key = 'dummy-contract' if public else self.agent._descriptor.doc_id
shard = self.agent._descriptor.shard
factory = recipient.Broadcast if public else recipient.Agent
msg.recipient = factory(key, shard)
self.agency._messaging.dispatch(msg)
return d
def reply(self, msg, reply_to, original_msg):
d = self.cb_after(arg=None, obj=self.agent._messaging,
method='on_message')
dest = recipient.IRecipient(original_msg)
msg.reply_to = recipient.IRecipient(reply_to)
msg.message_id = str(uuid.uuid1())
msg.protocol_id = original_msg.protocol_id
msg.expiration_time = time.future(10)
msg.protocol_type = original_msg.protocol_type
msg.receiver_id = original_msg.sender_id
msg.recipient = dest
self.agency._messaging.dispatch(msg)
return d
class StubAgent(object):
implements(ISink)
def __init__(self):
self.queue_name = str(uuid.uuid1())
self.messages = []
### IChannelSink ###
def get_agent_id(self):
return self.queue_name
def get_shard_id(self):
return 'lobby'
def on_message(self, msg):
self.messages.append(msg)
@feat.register_agent('descriptor')
class DummyAgent(agent.BaseAgent, Mock):
# We don't want a SetupMonitoring task in all the tests
need_local_monitoring = False
def __init__(self, medium):
agent.BaseAgent.__init__(self, medium)
Mock.__init__(self)
@Mock.record
def initiate(self):
pass
@Mock.stub
def shutdown(self):
pass
@Mock.stub
def startup(self):
pass
@Mock.stub
def unregister(self):
pass
class DummyRecorderNode(journal.DummyRecorderNode, log.LogProxy, log.Logger):
def __init__(self, test_case):
journal.DummyRecorderNode.__init__(self)
log.LogProxy.__init__(self, test_case)
log.Logger.__init__(self, test_case)
| f3at/feat | src/feat/test/common.py | Python | gpl-2.0 | 22,319 |
import happybase
from StringIO import StringIO
from PIL import Image
def decode_image_PIL(binary_data):
""" Returns PIL image from binary buffer.
"""
f = StringIO(binary_data)
img = Image.open(f)
return img
if __name__=="__main__":
tab_image = 'image_cache'
col_image = dict()
col_image['image_cache'] = 'image:binary'
conn = happybase.Connection(host='10.1.94.57')
image_rows = dict()
image_rows['image_cache'] = ['0000007031E3FA80C97940017253BEAB542EA334', '000001EC5DD154E58B72326EFC26A41A4C8E9586',
'0000081A1D6D1A2023DAE07547C242ED3106E7FE']
table = conn.table(tab_image)
for row in table.rows(image_rows[tab_image]):
binary_data = row[1][col_image[tab_image]]
img = decode_image_PIL(binary_data)
print("Saving image to: {}".format(row[0]+'.jpeg'))
img.save(row[0]+'.jpeg',"JPEG")
| svebk/DeepSentiBank_memex | scripts/tests/deprecated/read_image_from_hbase.py | Python | bsd-2-clause | 875 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: xbuf/skeletons.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from xbuf import primitives_pb2 as xbuf_dot_primitives__pb2
from xbuf import relations_pb2 as xbuf_dot_relations__pb2
from xbuf.primitives_pb2 import *
from xbuf.relations_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='xbuf/skeletons.proto',
package='xbuf',
syntax='proto3',
serialized_pb=_b('\n\x14xbuf/skeletons.proto\x12\x04xbuf\x1a\x15xbuf/primitives.proto\x1a\x14xbuf/relations.proto\"d\n\x08Skeleton\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x19\n\x05\x62ones\x18\x03 \x03(\x0b\x32\n.xbuf.Bone\x12#\n\x0b\x62ones_graph\x18\x04 \x03(\x0b\x32\x0e.xbuf.Relation\"D\n\x04\x42one\x12\n\n\x02id\x18\x01 \x01(\t\x12\"\n\ttransform\x18\x02 \x01(\x0b\x32\x0f.xbuf.Transform\x12\x0c\n\x04name\x18\x04 \x01(\tP\x00P\x01\x62\x06proto3')
,
dependencies=[xbuf_dot_primitives__pb2.DESCRIPTOR,xbuf_dot_relations__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SKELETON = _descriptor.Descriptor(
name='Skeleton',
full_name='xbuf.Skeleton',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='xbuf.Skeleton.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='xbuf.Skeleton.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bones', full_name='xbuf.Skeleton.bones', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bones_graph', full_name='xbuf.Skeleton.bones_graph', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=75,
serialized_end=175,
)
_BONE = _descriptor.Descriptor(
name='Bone',
full_name='xbuf.Bone',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='xbuf.Bone.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transform', full_name='xbuf.Bone.transform', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='xbuf.Bone.name', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=177,
serialized_end=245,
)
_SKELETON.fields_by_name['bones'].message_type = _BONE
_SKELETON.fields_by_name['bones_graph'].message_type = xbuf_dot_relations__pb2._RELATION
_BONE.fields_by_name['transform'].message_type = xbuf_dot_primitives__pb2._TRANSFORM
DESCRIPTOR.message_types_by_name['Skeleton'] = _SKELETON
DESCRIPTOR.message_types_by_name['Bone'] = _BONE
Skeleton = _reflection.GeneratedProtocolMessageType('Skeleton', (_message.Message,), dict(
DESCRIPTOR = _SKELETON,
__module__ = 'xbuf.skeletons_pb2'
# @@protoc_insertion_point(class_scope:xbuf.Skeleton)
))
_sym_db.RegisterMessage(Skeleton)
Bone = _reflection.GeneratedProtocolMessageType('Bone', (_message.Message,), dict(
DESCRIPTOR = _BONE,
__module__ = 'xbuf.skeletons_pb2'
# @@protoc_insertion_point(class_scope:xbuf.Bone)
))
_sym_db.RegisterMessage(Bone)
# @@protoc_insertion_point(module_scope)
| xbuf/blender_io_xbuf | modules/xbuf/skeletons_pb2.py | Python | gpl-3.0 | 5,338 |
#!/usr/bin/python
#
version = 'version 0.2'
author = 'duncan@linuxbandwagon.com'
#
# $Id: ringd.py,v 1.17 2002/05/06 14:46:24 master Exp $
#
# Simple re-implementation of the old perl ringd program in python.
#
# Does not do as much logging.
#
# duncan@linuxbandwagon.com
#
# THIS SOFTWARE IS COPYRIGHT 2001,2002 Linuxbandwagon Pty. Ltd. Australia
# and is released under the GPL. NO WARRANTY!!!
# see http://www.gnu.org/copyleft/gpl.html for further info.
#
#
# developed on python 1.5.2 on redhat 7
#
# This is an early python effort from me, and written rapidly to fill a
# need.
#
# 30/4/2002 - 0.2 -- added "secret knock" for 2 single rings spaced
# a certain time apart
# as the dialout signature.
#
cvsid = '$Id: ringd.py,v 1.17 2002/05/06 14:46:24 master Exp $'
debugon = 0
inifile = 'ringd.ini'
import sys, commands, os, time, glob, pty, getopt, ConfigParser, string, signal
ringcount = 0
readfrommodem = ''
def nulltimer(signun, frame):
debug('nulltimer called')
def opentimer(signum, frame):
debug('opentimer called')
raise IOError, "open() hanging"
def ringingtimer(signum, frame):
global readfrommodem
debug('ringingtimer triggered')
readfrommodem = ''
def ringingtimer2(signum, frame):
global readfrommodem, timeover
debug('ringingtimer triggered')
readfrommodem = ''
timeover = 1
def netdowntimer(signum, frame):
global pollingperiod, netupfile, systemname
debug('netdowntimer tiggered')
#
if os.path.isfile(netupfile):
debug('found file '+netupfile+' so we dont want to kill the net yet')
#
# set an allarm for pollingperiod seconds.
debug('setting alarm to trigger in '+str(pollingperiod)+' seconds')
signal.signal(signal.SIGALRM, netdowntimer)
signal.alarm(pollingperiod)
else:
debug('running pppstop command '+pppstop)
returncode = os.system(pppstop)
if returncode != 0:
print systemname+': error running '+pppstop
# sys.exit(1)
def debug(debuginfo):
if debugon == 1:
print debuginfo
#
# sanity checking on config files (or other files that need to be there)
#
def checkexists(filename):
if not os.path.isfile(filename):
print 'Error: '+filename+' is not a file'
sys.exit(1)
def make_lock_file(lockfilename):
global systemname
#
# first open the lock file
#
if os.access(lockfilename,os.R_OK):
try:
fo = open(lockfilename, 'r')
except IOError:
print systemname+': Can\'t open lock file for reading.'
sys.exit(3)
pidstring = fo.read()
debug('pid found in lockfile is ' + pidstring)
res = commands.getstatusoutput('kill -0 ' + pidstring)
if res[0] != 0:
print systemname+': process '+pidstring+' found in lock file seems dead'
else:
debug('Existing process detected, bailing out..')
fo.close()
sys.exit(3)
try:
fi = open(lockfilename, 'w')
except IOError:
print systemname+': Can\'t open lock file for writing.'
sys.exit(0)
fi.write(`os.getpid()`)
fi.close();
def usage(progname):
print progname
print version
print author
print
print 'Usage:'
print progname+' <flags>'
print
print 'where <flags> can be any of:'
print ' --debug (debug output on)'
print ' --help (print this message and quit)'
print ' --config <filename> config file to use, default is '+inifile
print
#
# fork off and run the ppp command
#
def forknet(pppstart):
debug('forking and running pppstart command '+pppstart)
if os.fork() == 0:
res = commands.getstatusoutput(pppstart)
if res[0] != 0:
print systemname+': error running '+pppstart
print res[1]
sys.exit(1)
else:
debug('output from command was')
debug(res[1])
sys.exit(0)
#
# implements the simpler ring pattern mode - just a certain amount
# of rings within a certain timeframe.
# return 1 if net connection attempted, 0 otherwise
#
def nonpatternmoderingdetect(sfile,ringstring,timewindowtoinitiatenet,counttostartnet,counttonotstartnet,pppstart):
global readfrommodem
ringcount = 0
while 1:
try:
readfrommodem = os.read(sfile,100)
except OSError:
debug('interupted, time is probably up')
if len(readfrommodem) == 0:
debug('0 length read from modem')
os.close(sfile)
break
debug('read from modem')
debug('['+readfrommodem+']')
if string.find(readfrommodem,ringstring) != -1:
ringcount = ringcount + 1
debug('ring detected '+str(ringcount))
if ringcount == 1:
debug('setting alarm to trigger in '+str(timewindowtoinitiatenet)+' seconds')
signal.signal(signal.SIGALRM, ringingtimer)
signal.alarm(timewindowtoinitiatenet)
if ringcount >= counttostartnet and ringcount < counttonotstartnet:
try:
os.close(sfile)
except OSError:
debug('error closing modem, prolly closed already, thats ok')
debug(str(counttostartnet)+'or more rings, but less than '+str(counttonotstartnet))
debug(str(ringcount)+' rings detected, waiting to dial net')
debug('sleeping '+str(delaybeforestartingnet)+\
' seconds to before starting net')
time.sleep(delaybeforestartingnet)
forknet(pppstart)
return 1
else:
return 0
#
# implements a more careful ring pattern ... X rings, then ignoring
# any rings for Y seconds.. then X rings. This is so hopefully we can
# give the system 2 single rings spaced X seconds apart as the "secret
# knock" so that the system knows we want it to dial out.
#
# this way should be less prone to restart problems, etc, as we keep
# a list of the times when we detect a single ring, and sort through
# them looking for the right gap, which is a way nicer way to do
# it.....
#
def patternmoderingdetect(sfile,ringstring,timewindowtoinitiatenet,counttostartnet,counttonotstartnet,pppstart,configs):
global readfrommodem, timeover
singlerings = [] # list of when single rings have occurred
# not used yet - for now just 1 ring is da signal
# patternrings = configs.getint('ringd','patternrings')
patterndelay = configs.getint('ringd','patterndelay')
patternwindow = configs.getint('ringd','patternwindow')
nowindow = 1
ringcount = 0
timeover = 0
while nowindow:
try:
readfrommodem = os.read(sfile,100)
except OSError:
debug('interupted, time is probably up')
if len(readfrommodem) == 0:
debug('0 length read from modem')
debug('read from modem')
debug('['+readfrommodem+']')
if string.find(readfrommodem,ringstring) != -1:
ringcount = ringcount + 1
debug('ring detected '+str(ringcount))
if ringcount == 1:
ringtime = int(time.time())
debug('setting alarm to trigger in '+str(patternwindow)+' seconds')
signal.signal(signal.SIGALRM, ringingtimer2)
signal.alarm(patternwindow)
if ringcount != 1:
debug('not just one ring detected, not the start of a pattern')
else:
if timeover == 1:
debug('single ring detected, adding to list')
singlerings.append(ringtime)
debug(singlerings)
# now check the singlerings list to see if any patterndelay
# plus or minus patternwindow seconds apart....
for ringtime2 in singlerings:
diff = ringtime - ringtime2
debug('seconds apart '+str(diff))
if (diff > (patterndelay - (patternwindow / 2))) and \
(diff < (patterndelay + (patternwindow / 2))):
debug('CORRECT WINDOW FOUND!')
nowindow = 0
signal.alarm(0)
if (ringtime - ringtime2) > (patterndelay + patternwindow):
debug('old ring removed '+str(diff))
singlerings.remove(ringtime2)
if timeover == 1:
timeover = 0
ringcount = 0
try:
os.close(sfile)
except OSError:
debug('error closing modem, prolly closed already, thats ok')
debug('wating '+str(delaybeforestartingnet)+' seconds before starting the net')
time.sleep(delaybeforestartingnet)
forknet(pppstart)
return 1
#
# main() bit
#
try:
options, xarguments = getopt.getopt(sys.argv[1:],
'dvc',['debug','verbose','config='])
except getopt.error:
usage(sys.argv[0])
sys.exit(1)
for a in options[:]:
if a[0] == '--config' and a[1] != '':
inifile = a[1]
debug('using config file '+a[1])
options.remove(a)
if a[0] == '--help':
print CVSID
usage(sys.argv[0])
sys.fexit(0)
if a[0] == '--debug':
debugon = 1
debug('setting debug output on')
if a[0] == '--verbose':
verboseon = 1
verbose('setting verbose output')
debug(version)
debug(author)
if len(xarguments) != 0:
usage(sys.argv[0])
sys.exit(1)
#
# read in the config settings
#
configs = ConfigParser.ConfigParser()
checkexists(inifile)
configs.read(inifile)
lockfile = configs.get('ringd','lockfile')
systemname = configs.get('system','name')
make_lock_file(lockfile)
#
# get the rest of the parameters from the config file
#
modem = configs.get('ringd','modem')
stty = configs.get('ringd','stty')
pppstart = configs.get('ringd','pppstart')
resetdevice = configs.get('ringd','resetdevice')
netupfile = configs.get('ringd','netupfile')
pppstop = configs.get('ringd','pppstop')
checknet = configs.get('ringd','checknet')
counttostartnet = configs.getint('ringd','counttostartnet')
#
# if this option doesnt exist we set it to 2 more than
# the minimum number of rings.
#
if 'counttonotstartnet' in configs.options('ringd'):
counttonotstartnet = configs.getint('ringd','counttonotstartnet')
else:
counttonotstartnet = counttostartnet + 2
if 'patternmode' in configs.options('ringd'):
patternmode = configs.getint('ringd','patternmode')
else:
patternmode = 0
timewindowtoinitiatenet = configs.getint('ringd','timewindowtoinitiatenet')
delaybeforestartingnet = configs.getint('ringd','delaybeforestartingnet')
netuptime = configs.getint('ringd','netuptime')
delayafterkillingnet = configs.getint('ringd','delayafterkillingnet')
pollingperiod = configs.getint('ringd','pollingperiod')
initstring = configs.get('ringd','initstring')
ringstring = configs.get('ringd','ringstring')
running = 1
netup = 0
while running:
#
# test if the net is up, if not kill any ppp crap around
#
debug('running checknet command '+checknet)
res = commands.getstatusoutput(checknet)
debug('checknet command gave output was '+res[1])
if res[0] == 0:
netup = 1
else:
debug('running pppstop command '+pppstop)
#
# maybe check for /tmp/keepnetup
# to postpone net shutdown?
#
# get rid of any net down signals hanging about (or others)
signal.alarm(0)
returncode = os.system(pppstop)
if returncode != 0:
print 'error running '+pppstop
# sys.exit(1)
netup = 0
if netup:
debug('net still seems to be up, sleeping')
try:
time.sleep(pollingperiod)
except IOError:
debug('interupted, net has probably been killed')
else:
debug('setting alarm to trigger in '+str(5)+' seconds')
signal.signal(signal.SIGALRM, opentimer)
signal.alarm(5)
try:
sfile = os.open(modem,os.O_RDWR)
except:
debug('problem opening modem, lets run the modem reset command')
#
# modem is hanging, maybe driver jammed or who knows what,
# reloading the modules seems to work for Lucent LinModems
res = commands.getstatusoutput(resetdevice)
if res[0] != 0:
print systemname +': error running '+resetdevice
print res[1]
sys.exit(1)
else:
debug('output from command '+resetdevice+' was')
debug(res[1])
sys.exit(0)
continue
signal.alarm(0)
command = stty+' < '+modem
debug('stty command is')
debug(command)
res = commands.getstatusoutput(command)
if res[0] != 0:
print 'Error Couldnt set serial port parameters for '+modem
print res[1]
# sys.exit(1)
os.close(sfile)
debug('sleeping '+ str(pollingperiod))
try:
time.sleep(pollingperiod)
except IOError:
debug('interupted, net has probably been killed')
continue
debug('initialising modem with initstring')
debug(initstring)
os.write(sfile,initstring+'\r')
#
# wait around for the phone to ring, in a pattern we want...
#
if patternmode == 0:
netcalled = nonpatternmoderingdetect(sfile,ringstring,timewindowtoinitiatenet,counttostartnet,counttonotstartnet,pppstart)
if patternmode == 1:
netcalled = patternmoderingdetect(sfile,ringstring,timewindowtoinitiatenet,counttostartnet,counttonotstartnet,pppstart,configs)
if netcalled == 1:
# we have
# found a ring pattern that is agreeable to us, and tried to
# start the net, so now we just wait and see if things are OK.
#
# set alarm to kill net connection in X seconds
#
debug('setting alarm to trigger in '+str(netuptime)+' seconds')
signal.signal(signal.SIGALRM, netdowntimer)
signal.alarm(netuptime)
#
# loop around and check the net, after a delay
#
debug('sleeping '+str(pollingperiod)+' before checking net')
try:
time.sleep(pollingperiod)
except IOError:
debug('interupted, net has probably been killed')
| DuncanRobertson/ringd | ringd.py | Python | gpl-2.0 | 13,907 |
# coding: utf-8
"""
Compatibility functions for unified behavior between Python 2.x and 3.x.
:author: Alex Grönholm
"""
from __future__ import unicode_literals, absolute_import
import inspect
import sys
from threading import Thread
if sys.version_info[0] < 3:
def items(d):
return d.items()
def iteritems(d):
return d.iteritems()
def next(x):
return x.next()
range = xrange # noqa
long = long # noqa
basestring = basestring # noqa
unicode = unicode # noqa
bytearray2 = bytearray
unichr = unichr # noqa
bytestr = str
tobytestr = str
def isbytestr(s):
return isinstance(s, str)
def ispython3bytestr(s):
return False
def isbytearray(s):
return isinstance(s, bytearray)
def bytetoint(b):
return ord(b)
def bytetostr(b):
return b
def strtobyte(b):
return b
import Queue
Queue = Queue.Queue
else:
def items(d):
return list(d.items())
def iteritems(d):
return d.items()
next = next
range = range
long = int
basestring = str
unicode = str
bytearray2 = bytes
unichr = chr
bytestr = bytes
def tobytestr(s):
return bytes(s, "ascii")
def isbytestr(s):
return isinstance(s, bytes)
def ispython3bytestr(s):
return isinstance(s, bytes)
def isbytearray(s):
return isinstance(s, bytearray)
def bytetoint(b):
return b
def bytetostr(b):
return str(b, encoding="ascii")
def strtobyte(s):
return bytes(s, encoding="ascii")
import queue
Queue = queue.Queue
if hasattr(inspect, "getattr_static"):
def hasattr2(obj, attr):
return bool(inspect.getattr_static(obj, attr, False))
else:
hasattr2 = hasattr
class CompatThread(Thread):
"""Compatibility Thread class.
Allows Python 2 Thread class to accept daemon kwarg in init.
"""
def __init__(self, *args, **kwargs):
daemon = None
try:
daemon = kwargs.pop("daemon")
except KeyError:
pass
super(CompatThread, self).__init__(*args, **kwargs)
if daemon:
self.daemon = daemon
| fouzelddin/py4j | py4j-python/src/py4j/compat.py | Python | bsd-3-clause | 2,249 |
'''
Copyright 2015 Serendio Inc.
Author - Satish Palaniappan
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
'''
__author__ = "Satish Palaniappan"
import pickle
import config
import sys
sys.path.append(config.basePath+config.SocialFilter)
from SocialFilter.TextFilter import Filter
from SocialFilter.Twokenize.twokenize import *
import re
class extractor(object):
def __init__(self):
self.path = config.basePath +config.microblog
self.SentiModel = self.load_obj("_model")
self.ch2 = self.load_obj("_feature_selector")
self.vectorizer = self.load_obj("_vectorizer")
self.filter = Filter()
def load_obj(self,name):
with open( self.path + name + '.pkl', 'rb') as f:
return pickle.load(f)
def simpleProcess(self,text):
text = text.lower().strip()
line = re.sub(Url_RE,"",text)
line = re.sub(r"[@#]","",line)
line =u" ".join(tokenize(line))
return line
def getSentimentScore(self,message):
vec = self.vectorizer.transform([self.simpleProcess(message)])
Tvec = self.ch2.transform(vec)
plainScore = self.SentiModel.predict(Tvec)
#####
message = self.filter.process(message)
emo = re.findall(r"(\w+)#\(([+-]*\d+.\d+)\)#",message.decode("utf-8"))
emoScore = 0.0
for e in emo:
emoScore += float(e[1])
# remove emo info
onlyEMO = 0
message = re.sub(r"\w+#\([+-]*\d+.\d+\)#"," ",message.decode("utf-8"))
if len(message.strip()) == 0:
onlyEMO = 1
vec = self.vectorizer.transform([message])
Tvec = self.ch2.transform(vec)
predScore = self.SentiModel.predict(Tvec)
# Vary weights if wanted
if onlyEMO == 1:
predScore = emoScore
elif len(emo) >= 1:
predScore = 0.20 * predScore + 0.30 * plainScore + 0.50 * emoScore
else:
predScore = 0.35 * predScore + 0.65 * plainScore
return float(predScore)
# #### TEST
# S = extractor()
# message = ":("
# print(S.getSentimentScore(message))
| serendio-labs-stage/diskoveror-ml-server | SentimentThrift/SentiHandlers/comments.py | Python | apache-2.0 | 2,344 |
# -*- coding: utf-8 -*-
import recommonmark
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'MIT-Tab'
copyright = '2019, Joey Lynch, Julia Boortz, and Ben Muschol'
author = 'Joey Lynch, Julia Boortz, and Ben Muschol'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['recommonmark']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = {
'.md': 'markdown'
}
# The master toctree document.
master_doc = 'index'
enable_auto_toc_tree = True
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MIT-Tabdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MIT-Tab.tex', 'MIT-Tab Documentation',
'Joey Lynch, Julia Boortz, and Ben Muschol', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mit-tab', 'MIT-Tab Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MIT-Tab', 'MIT-Tab Documentation',
author, 'MIT-Tab', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
github_doc_root = 'https://github.com/mit-tab/mit-tab/tree/master/doc/'
def setup(app):
app.add_config_value('recommonmark_config', {
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
| jolynch/mit-tab | docs/conf.py | Python | mit | 5,647 |
"""
WSGI config for simple_web_scraper project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simple_web_scraper.settings")
application = get_wsgi_application()
# see about serving static files with Whitenoise at Herokyu's doc page
# https://devcenter.heroku.com/articles/django-assets
if os.environ.get('WHITENOISE_ENABLED', '').lower() in {'true', '1'}:
from whitenoise.django import DjangoWhiteNoise
application = DjangoWhiteNoise(application)
| fcv/simple-web-scraper | simple_web_scraper/wsgi.py | Python | mit | 709 |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from itertools import groupby
from compas.geometry import Point
from compas.geometry import NurbsCurve
from compas_rhino.conversions import point_to_rhino
from compas_rhino.conversions import point_to_compas
# from compas_rhino.conversions import circle_to_rhino
# from compas_rhino.conversions import ellipse_to_rhino
from compas_rhino.conversions import line_to_rhino
from .curve import RhinoCurve
import Rhino.Geometry
def rhino_curve_from_parameters(points, weights, knots, multiplicities, degree):
rhino_curve = Rhino.Geometry.NurbsCurve(3, True, degree + 1, len(points))
for index, (point, weight) in enumerate(zip(points, weights)):
rhino_curve.Points.SetPoint(index, point_to_rhino(point), weight)
knotvector = [knot for knot, mult in zip(knots, multiplicities) for _ in range(mult)]
# account for superfluous knots
# https://developer.rhino3d.com/guides/opennurbs/superfluous-knots/
p = len(points)
o = degree + 1
k = p + o
if len(knotvector) == k:
knotvector[:] = knotvector[1:-1]
for index, knot in enumerate(knotvector):
rhino_curve.Knots[index] = knot
return rhino_curve
class RhinoNurbsCurve(NurbsCurve, RhinoCurve):
"""Class representing a NURBS curve based on the NurbsCurve of Rhino.Geometry.
Parameters
----------
name : str, optional
Name of the curve.
Attributes
----------
points : list[:class:`~compas.geometry.Point`], read-only
The control points of the curve.
weights : list[float], read-only
The weights of the control points.
knots : list[float], read-only
The knot vector, without duplicates.
multiplicities : list[int], read-only
The multiplicities of the knots in the knot vector.
knotsequence : list[float], read-only
The knot vector, with repeating values according to the multiplicities.
continuity : int, read-only
The degree of continuity of the curve.
degree : int, read-only
The degree of the curve.
order : int, read-only
The order of the curve (degree + 1).
is_rational : bool, read-only
True is the curve is rational.
References
----------
* https://developer.rhino3d.com/api/RhinoCommon/html/T_Rhino_Geometry_NurbsCurve.htm
* https://en.wikipedia.org/wiki/Non-uniform_rational_B-spline
* https://developer.rhino3d.com/guides/opennurbs/nurbs-geometry-overview/
"""
def __init__(self, name=None):
super(RhinoNurbsCurve, self).__init__(name=name)
self.rhino_curve = None
# ==============================================================================
# Data
# ==============================================================================
@property
def data(self):
# add superfluous knots
# for compatibility with all/most other NURBS implementations
# https://developer.rhino3d.com/guides/opennurbs/superfluous-knots/
multiplicities = self.multiplicities[:]
multiplicities[0] += 1
multiplicities[-1] += 1
return {
'points': [point.data for point in self.points],
'weights': self.weights,
'knots': self.knots,
'multiplicities': multiplicities,
'degree': self.degree,
'is_periodic': self.is_periodic,
}
@data.setter
def data(self, data):
points = [Point.from_data(point) for point in data['points']]
weights = data['weights']
knots = data['knots']
multiplicities = data['multiplicities']
degree = data['degree']
# is_periodic = data['is_periodic']
# have not found a way to actually set this
# not sure if that is actually possible...
self.rhino_curve = rhino_curve_from_parameters(points, weights, knots, multiplicities, degree)
# ==============================================================================
# Rhino Properties
# ==============================================================================
# ==============================================================================
# Properties
# ==============================================================================
@property
def points(self):
if self.rhino_curve:
return [point_to_compas(point) for point in self.rhino_curve.Points]
@property
def weights(self):
if self.rhino_curve:
return [point.Weight for point in self.rhino_curve.Points]
@property
def knots(self):
if self.rhino_curve:
return [key for key, _ in groupby(self.rhino_curve.Knots)]
@property
def knotsequence(self):
if self.rhino_curve:
return list(self.rhino_curve.Knots)
@property
def multiplicities(self):
if self.rhino_curve:
return [len(list(group)) for _, group in groupby(self.rhino_curve.Knots)]
@property
def degree(self):
if self.rhino_curve:
return self.rhino_curve.Degree
@property
def order(self):
if self.rhino_curve:
return self.rhino_curve.Order
@property
def is_rational(self):
if self.rhino_curve:
return self.rhino_curve.IsRational
# ==============================================================================
# Constructors
# ==============================================================================
@classmethod
def from_parameters(cls, points, weights, knots, multiplicities, degree, is_periodic=False):
"""Construct a NURBS curve from explicit curve parameters.
Parameters
----------
points : list[:class:`~compas.geometry.Point`]
The control points.
weights : list[float]
The control point weights.
knots : list[float]
The curve knots, without duplicates.
multiplicities : list[int]
The multiplicities of the knots.
degree : int
The degree of the curve.
is_periodic : bool, optional
Flag indicating whether the curve is periodic or not.
Note that this parameters is currently not supported.
Returns
-------
:class:`~compas_rhino.geometry.RhinoNurbsCurve`
"""
curve = cls()
curve.rhino_curve = rhino_curve_from_parameters(points, weights, knots, multiplicities, degree)
return curve
@classmethod
def from_points(cls, points, degree=3, is_periodic=False):
"""Construct a NURBS curve from control points.
Parameters
----------
points : list[:class:`~compas.geometry.Point`]
The control points.
degree : int, optional
The degree of the curve.
is_periodic : bool, optional
Flag indicating whether the curve is periodic or not.
Returns
-------
:class:`~compas_rhino.geometry.RhinoNurbsCurve`
"""
points[:] = [point_to_rhino(point) for point in points]
curve = cls()
curve.rhino_curve = Rhino.Geometry.NurbsCurve.Create(is_periodic, degree, points)
return curve
@classmethod
def from_interpolation(cls, points, precision=1e-3):
"""Construct a NURBS curve by interpolating a set of points.
Parameters
----------
points : list[:class:`~compas.geometry.Point`]
The control points.
precision : float, optional
The required precision of the interpolation.
This parameter is currently not supported.
Returns
-------
:class:`~compas_rhino.geometry.RhinoNurbsCurve`
"""
curve = cls()
curve.rhino_curve = Rhino.Geometry.NurbsCurve.CreateHSpline([point_to_rhino(point) for point in points])
return curve
# @classmethod
# def from_circle(cls, circle):
# """Construct a NURBS curve from a circle.
# Parameters
# ----------
# circle : :class:`~compas.geometry.Circle`
# A circle geometry.
# Returns
# -------
# :class:`~compas_rhino.geometry.RhinoNurbsCurve`
# """
# curve = cls()
# curve.rhino_curve = Rhino.Geometry.NurbsCurve.CreateFromCircle(circle_to_rhino(circle))
# return curve
# @classmethod
# def from_ellipse(cls, ellipse):
# """Construct a NURBS curve from an ellipse.
# Parameters
# ----------
# ellipse : :class:`~compas.geometry.Ellipse`
# An ellipse geometry.
# Returns
# -------
# :class:`~compas_rhino.geometry.RhinoNurbsCurve`
# """
# curve = cls()
# curve.rhino_curve = Rhino.Geometry.NurbsCurve.CreateFromEllipse(ellipse_to_rhino(ellipse))
# return curve
@classmethod
def from_line(cls, line):
"""Construct a NURBS curve from a line.
Parameters
----------
line : :class:`~compas.geometry.Line`
A line geometry.
Returns
-------
:class:`~compas_rhino.geometry.RhinoNurbsCurve`
"""
curve = cls()
curve.rhino_curve = Rhino.Geometry.NurbsCurve.CreateFromLine(line_to_rhino(line))
return curve
# ==============================================================================
# Conversions
# ==============================================================================
# ==============================================================================
# Methods
# ==============================================================================
| compas-dev/compas | src/compas_rhino/geometry/curves/nurbs.py | Python | mit | 9,820 |
'''
Postprocess teensy ouput
~~~~~~~~~~~~~~~~~~~~~~~~
'''
import argparse
import csv
import time
import os
import sys
def create_filename (Kp, Ki, Kd, setpoint):
parts = [
'teensy-output',
'Kp={}'.format(Kp),
'Ki={}'.format(Ki),
'Kd={}'.format(Kd),
time.strftime('%Y-%m-%d_%H,%M')
]
return '{}.log'.format('_'.join(parts))
def log_lines (log):
for line in log:
line = line.strip()
if line == '' or line[0] == ':':
continue
yield [float(_) for _ in line.split('\t')]
def main ():
parser = argparse.ArgumentParser()
parser.add_argument('logfile', help='Log file from the teensy.')
parser.add_argument('kp', type=float)
parser.add_argument('ki', default=0, nargs='?', type=float)
parser.add_argument('kd', default=0, nargs='?', type=float)
parser.add_argument('setpoint', default=100, nargs='?', type=float)
parser.add_argument('cycle', default=500, nargs='?', type=int)
args = parser.parse_args()
if not os.path.isfile(args.logfile):
print('{}: no such file'.format(args.logfile))
return -1
Kp = args.kp
Ki = args.ki
Kd = args.kd
setpoint = args.setpoint
filename = create_filename(Kp, Ki, Kd, setpoint)
if os.path.isfile(filename):
print('Output file {} already exists.'.format(filename))
return -2
with open(args.logfile) as log, open(filename, 'w') as out:
vals = log_lines(log)
writer = csv.writer(out, delimiter='\t')
writer.writerow([
time.strftime('%Y-%m-%d %H:%M:%S'),
'Kp={}'.format(Kp),
'Ki={}'.format(Ki),
'Kd={}'.format(Kd),
'setpoint={}'.format(setpoint),
'cycle={}'.format(args.cycle),
])
writer.writerow(['On','T','Output', 'dt', 'cycle', 't'])
writer.writerows(vals)
if __name__ == '__main__':
sys.exit(main())
| blubber/silvia | util/postprocess.py | Python | apache-2.0 | 1,962 |
# -*- coding: cp1251 -*-
# Object umbenennen
import win32com.client
dso = win32com.client.GetObject("LDAP:")
obj = dso.OpenDSObject("LDAP://DC=funkegrp/DC=de", "funkegrp\\p0532", "geheim#15", 0)
print(obj)
#obj.MoveHere("LDAP://DC=ru/DC=domen/OU=podrazdelenie/CN=_TestAdmin", "CN=_CoolAdmin")
| AlexFortLabs/MyPythonLabs | Sammelsurium/AD-Rename-LDAP.py | Python | gpl-3.0 | 299 |
from pylons_common.lib.exceptions import *
from pylons_common.lib.date import convert_date
from pylons_common.lib.log import create_logger
logger = create_logger('pylons_common.lib.decorators')
__all__ = ['zipargs', 'stackable', 'enforce']
def zipargs(decorated_fn):
"""
This will zip up the positional args into kwargs. This makes handling them in
decorators easier. Call on the inner deocrator function, and pass in the outer's
arg. Outer function must be @stackable. Convolution. Apologies.
"""
def decorator(fn):
def new(*args, **kwargs):
# Get the original func's param names. If this is the outer decorator, decorated_fn.func_code.co_varnames
# will have the inner decorator's names ('args', 'kwargs'). The inner decorator should
# attach original_varnames to the function
varnames = hasattr(decorated_fn, 'original_varnames') and decorated_fn.original_varnames or decorated_fn.func_code.co_varnames
dargs = dict(zip(varnames, args))
dargs.update(kwargs)
#if 'self' in dargs:
# del dargs['self']
return fn(**dargs)
return new
return decorator
def stackable(fn):
"""
This will make a decorator 'stackable' in that we can get the original function's params.
"""
def new(*args, **kwargs):
decorated_fn = args[0]
newfn = fn(*args, **kwargs)
if decorated_fn:
# We need to pass the original_varnames into every fn we return in these decorators so
# the dispatch controller has access to the original function's arg names.
# Do this in @auth due to decorator stacking.
newfn.func_name = decorated_fn.func_name
newfn.original_varnames = hasattr(decorated_fn, 'original_varnames') and decorated_fn.original_varnames or decorated_fn.func_code.co_varnames
return newfn
return new
##
### Decorators for api functions
##
def enforce(Session, **types):
"""
Assumes all arguments are unicode strings, and converts or resolves them to more complex objects.
If a type of the form [Type] is specified, the arguments will be interpreted as a comma-delimited
list of strings that will be converted to a list of complex objects.
"""
from datetime import datetime
@stackable
def decorator(fn):
@zipargs(fn)
def new(**kwargs):
from sqlalchemy.ext import declarative
errors = []
def convert(arg_name, arg_type, arg_value):
converted_value = arg_value
try:
if arg_type is file:
if type(arg_value) is not file:
if hasattr(arg_value, 'file'):
converted_value = arg_value.file
else:
raise ValueError("Value must be an open file object, or uploaded file.")
if arg_type == 'filedict':
if type(arg_value) is not dict:
if hasattr(arg_value, 'file'):
converted_value = {'file': arg_value.file}
else:
raise ValueError("Value must be an open file object, or uploaded file.")
if hasattr(arg_value, 'filename'):
converted_value['filename'] = arg_value.filename
elif arg_type is int:
converted_value = int(arg_value)
elif arg_type is float:
converted_value = float(arg_value)
elif arg_type is datetime:
converted_value = convert_date(arg_value)
elif type(arg_type) is declarative.DeclarativeMeta:
if type(type(arg_value)) is not declarative.DeclarativeMeta:
is_int = True
try:
arg_value = int(arg_value)
except ValueError, e:
is_int = False
if not is_int and hasattr(arg_type, 'eid'):
field = arg_type.eid
if arg_value is str:
arg_value = arg_value.decode('utf-8')
else:
arg_value = unicode(arg_value)
else:
field = arg_type.id
arg_value = int(arg_value)
converted_value = Session.query(arg_type).filter(field == arg_value).first()
elif arg_type is str:
if type(arg_value) is unicode:
converted_value = arg_value.encode('utf-8')
else:
converted_value = str(arg_value)
elif arg_type is unicode:
if type(arg_value) is str:
converted_value = arg_value.decode('utf-8')
else:
converted_value = unicode(arg_value)
elif arg_type is bool:
if type(arg_value) is not bool:
arg_value = arg_value.encode('utf-8').lower()
if arg_value in ['t','true','1','y','yes','on']:
converted_value = True
elif arg_value in ['f','false','0','n','no']:
converted_value = False
else:
raise ValueError('Value must be true or false')
except (ValueError, TypeError), e:
errors.append((e, arg_name, arg_value))
return converted_value
for name, value in kwargs.iteritems():
if name in types and value is not None:
t = types[name]
if type(type(value)) is declarative.DeclarativeMeta or isinstance(value, list):
kwargs[name] = convert(name, t, value)
# If the type is a list, this means that we want to
# return a list of objects of the type at index 0 in the list
elif isinstance(t, list):
if not isinstance(value, list):
list_of_values = [s for s in value.split(',') if s]
converted_values = []
t = t[0]
for v in list_of_values:
converted_values.append(convert(name, t, v))
# If the value was already a list, then it must have
# been a list of DB objects, so we didn't need to touch it
kwargs[name] = converted_values
else:
kwargs[name] = convert(name, t, value)
if errors:
raise ApiValueException([{'value': str(e[2]), 'message':str(e[0]), 'field': e[1]} for e in errors], INVALID)
else:
return fn(**kwargs)
return new
return decorator | benogle/pylons_common | pylons_common/lib/decorators.py | Python | mit | 7,740 |
# -*- coding: utf-8 -*-
"""
Django settings for project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os.path
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lj-35m*+49jg+pji^sw)bw%-8z81og(=jb_6oh=ph3(5*qs70g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Marcelo Pessanha', 'mpessanha1975@gmail.com'),
)
MANAGERS = ADMINS
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'project.urls'
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
MEDIA_ROOT = os.path.join(PROJECT_PATH,'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(PROJECT_PATH,'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
('site', os.path.join(PROJECT_PATH,'sitestatic'))
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_PATH,'templates'),
)
# =======================================================================================
# Loads settings_local.py is exists
# =======================================================================================
try:
execfile(os.path.join(PROJECT_PATH,'settings_local.py'), globals(), locals())
except IOError:
pass | mpessanha/projeto-controle | project/settings.py | Python | gpl-2.0 | 3,505 |
# coding: utf8
# Copyright 2015 Vincent Jacques <vincent@vincent-jacques.net>
import unittest
import MockMockMock
def compute_checksum(payload):
return ~(sum(payload)) & 0xFF
# @todo Extract in exceptions.py (Easier to document)
class CommunicationError(Exception):
"""
@todoc
"""
pass
class Bus(object):
"""
@todoc
"""
def __init__(self, hardware):
self.__hardware = hardware
def send(self, ident, instruction):
"""
@todoc
"""
self.__send(ident, instruction)
return self.__receive(instruction.response_class)
def broadcast(self, instruction):
"""
@todoc
"""
self.__send(0xFE, instruction)
def __send(self, ident, instruction):
length = len(instruction.parameters) + 2
payload = [ident, length, instruction.code] + instruction.parameters
checksum = compute_checksum(payload)
packet = [0xFF, 0xFF] + payload + [checksum]
self.__hardware.send(packet)
def __receive(self, response_class):
# @todo Catch and translate exceptions raised by hardware
ff1, ff2, ident, length = self.__receive_from_hardware(4)
if ff1 != 0xFF or ff2 != 0xFF:
raise CommunicationError
payload = self.__receive_from_hardware(length)
error = payload[0]
parameters = payload[1:-1]
checksum = payload[-1]
payload = [ident, length, error] + parameters
if checksum != compute_checksum(payload):
raise CommunicationError
return ident, error, response_class(parameters)
def __receive_from_hardware(self, count):
payload = self.__hardware.receive(count)
if len(payload) != count:
raise CommunicationError
return payload
class ComputeChecksumTestCase(unittest.TestCase):
def test(self):
# From http://support.robotis.com/en/product/dynamixel/communication/dxl_packet.htm
self.assertEqual(compute_checksum([0x01, 0x05, 0x03, 0x0C, 0x64, 0xAA]), 0xDC)
class BusTestCase(unittest.TestCase):
class TestInstruction:
def __init__(self, code, parameters):
self.code = code
self.parameters = parameters
self.response_class = lambda parameters: parameters
def setUp(self):
super(BusTestCase, self).setUp()
self.mocks = MockMockMock.Engine()
self.hardware = self.mocks.create("hardware")
self.bus = Bus(self.hardware.object)
def tearDown(self):
self.mocks.tearDown()
super(BusTestCase, self).tearDown()
def test_send(self):
# From http://support.robotis.com/en/product/dynamixel/communication/dxl_instruction.htm (example 1)
self.hardware.expect.send([0xFF, 0xFF, 0x01, 0x04, 0x02, 0x2B, 0x01, 0xCC])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x01, 0x03])
self.hardware.expect.receive(3).and_return([0x00, 0x20, 0xDB])
ident, error, parameters = self.bus.send(0x01, self.TestInstruction(0x02, [0x2B, 0x01]))
self.assertEqual(ident, 0x01)
self.assertEqual(error, 0x00)
self.assertEqual(parameters, [0x20])
def test_broadcast(self):
# From http://support.robotis.com/en/product/dynamixel/communication/dxl_instruction.htm (example 2)
self.hardware.expect.send([0xFF, 0xFF, 0xFE, 0x04, 0x03, 0x03, 0x01, 0xF6])
self.bus.broadcast(self.TestInstruction(0x03, [0x03, 0x01]))
def test_hardware_returns_wrong_number_of_bytes(self):
self.hardware.expect.send([0xFF, 0xFF, 0x01, 0x04, 0x02, 0x2B, 0x01, 0xCC])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x01])
with self.assertRaises(CommunicationError):
self.bus.send(0x01, self.TestInstruction(0x02, [0x2B, 0x01]))
def test_hardware_returns_not_ffff(self):
self.hardware.expect.send([0xFF, 0xFF, 0x01, 0x04, 0x02, 0x2B, 0x01, 0xCC])
self.hardware.expect.receive(4).and_return([0xFE, 0xFF, 0x01, 0x00])
with self.assertRaises(CommunicationError):
self.bus.send(0x01, self.TestInstruction(0x02, [0x2B, 0x01]))
def test_wrong_checksum(self):
self.hardware.expect.send([0xFF, 0xFF, 0x01, 0x04, 0x02, 0x2B, 0x01, 0xCC])
self.hardware.expect.receive(4).and_return([0xFF, 0xFF, 0x01, 0x03])
self.hardware.expect.receive(3).and_return([0x00, 0x20, 0xDA])
with self.assertRaises(CommunicationError):
self.bus.send(0x01, self.TestInstruction(0x02, [0x2B, 0x01]))
| jacquev6/Pynamixel | Pynamixel/bus.py | Python | mit | 4,563 |
import NavigationInstance
from time import localtime, mktime, gmtime
from ServiceReference import ServiceReference
from enigma import iServiceInformation, eServiceCenter, eServiceReference, getBestPlayableServiceReference
from timer import TimerEntry
class TimerSanityCheck:
def __init__(self, timerlist, newtimer=None):
self.localtimediff = 25*3600 - mktime(gmtime(25*3600))
self.timerlist = timerlist
self.newtimer = newtimer
self.simultimer = []
self.rep_eventlist = []
self.nrep_eventlist = []
self.bflag = -1
self.eflag = 1
def check(self, ext_timer=1):
if ext_timer != 1:
self.newtimer = ext_timer
if self.newtimer is None:
self.simultimer = []
else:
self.simultimer = [ self.newtimer ]
return self.checkTimerlist()
def getSimulTimerList(self):
return self.simultimer
def doubleCheck(self):
if self.newtimer is not None and self.newtimer.service_ref.ref.valid():
self.simultimer = [ self.newtimer ]
for timer in self.timerlist:
if timer == self.newtimer:
return True
else:
if self.newtimer.begin >= timer.begin and self.newtimer.end <= timer.end:
fl1 = timer.service_ref.ref.flags & eServiceReference.isGroup
fl2 = self.newtimer.service_ref.ref.flags & eServiceReference.isGroup
if fl1 != fl2:
return False
if fl1: #is group
return timer.service_ref.ref.getPath() == self.newtimer.service_ref.ref.getPath()
getUnsignedDataRef1 = timer.service_ref.ref.getUnsignedData
getUnsignedDataRef2 = self.newtimer.service_ref.ref.getUnsignedData
for x in (1, 2, 3, 4):
if getUnsignedDataRef1(x) != getUnsignedDataRef2(x):
break;
else:
return True
return False
def checkTimerlist(self, ext_timer=1):
#with special service for external plugins
# Entries in eventlist
# timeindex
# BeginEndFlag 1 for begin, -1 for end
# index -1 for the new Timer, 0..n index of the existing timers
# count of running timers
serviceHandler = eServiceCenter.getInstance()
# create a list with all start and end times
# split it into recurring and singleshot timers
##################################################################################
# process the new timer
self.rep_eventlist = []
self.nrep_eventlist = []
if ext_timer != 1:
self.newtimer = ext_timer
if (self.newtimer is not None) and (not self.newtimer.disabled):
if not self.newtimer.service_ref.ref.valid():
return False
rflags = self.newtimer.repeated
rflags = ((rflags & 0x7F)>> 3)|((rflags & 0x07)<<4)
if rflags:
begin = self.newtimer.begin % 86400 # map to first day
if (self.localtimediff > 0) and ((begin + self.localtimediff) > 86400):
rflags = ((rflags >> 1)& 0x3F)|((rflags << 6)& 0x40)
elif (self.localtimediff < 0) and (begin < self.localtimediff):
rflags = ((rflags << 1)& 0x7E)|((rflags >> 6)& 0x01)
while rflags: # then arrange on the week
if rflags & 1:
self.rep_eventlist.append((begin, -1))
begin += 86400
rflags >>= 1
else:
self.nrep_eventlist.extend([(self.newtimer.begin,self.bflag,-1),(self.newtimer.end,self.eflag,-1)])
##################################################################################
# now process existing timers
idx = 0
for timer in self.timerlist:
if (timer != self.newtimer) and (not timer.disabled):
if timer.repeated:
rflags = timer.repeated
rflags = ((rflags & 0x7F)>> 3)|((rflags & 0x07)<<4)
begin = timer.begin % 86400 # map all to first day
if (self.localtimediff > 0) and ((begin + self.localtimediff) > 86400):
rflags = ((rflags >> 1)& 0x3F)|((rflags << 6)& 0x40)
elif (self.localtimediff < 0) and (begin < self.localtimediff):
rflags = ((rflags << 1)& 0x7E)|((rflags >> 6)& 0x01)
while rflags:
if rflags & 1:
self.rep_eventlist.append((begin, idx))
begin += 86400
rflags >>= 1
elif timer.state < TimerEntry.StateEnded:
self.nrep_eventlist.extend([(timer.begin,self.bflag,idx),(timer.end,self.eflag,idx)])
idx += 1
################################################################################
# journalize timer repeations
if self.nrep_eventlist:
interval_begin = min(self.nrep_eventlist)[0]
interval_end = max(self.nrep_eventlist)[0]
offset_0 = interval_begin - (interval_begin % 604800)
weeks = (interval_end - offset_0) / 604800
if ((interval_end - offset_0) % 604800):
weeks += 1
for cnt in range(weeks):
for event in self.rep_eventlist:
if event[1] == -1: # -1 is the identifier of the changed timer
event_begin = self.newtimer.begin
event_end = self.newtimer.end
else:
event_begin = self.timerlist[event[1]].begin
event_end = self.timerlist[event[1]].end
new_event_begin = event[0] + offset_0 + (cnt * 604800)
# summertime correction
new_lth = localtime(new_event_begin).tm_hour
new_event_begin += 3600 * (localtime(event_begin).tm_hour - new_lth)
new_event_end = new_event_begin + (event_end - event_begin)
if event[1] == -1:
if new_event_begin >= self.newtimer.begin: # is the soap already running?
self.nrep_eventlist.extend([(new_event_begin, self.bflag, event[1]),(new_event_end, self.eflag, event[1])])
else:
if new_event_begin >= self.timerlist[event[1]].begin: # is the soap already running?
self.nrep_eventlist.extend([(new_event_begin, self.bflag, event[1]),(new_event_end, self.eflag, event[1])])
else:
offset_0 = 345600 # the Epoch begins on Thursday
for cnt in (0, 1): # test two weeks to take care of Sunday-Monday transitions
for event in self.rep_eventlist:
if event[1] == -1: # -1 is the identifier of the changed timer
event_begin = self.newtimer.begin
event_end = self.newtimer.end
else:
event_begin = self.timerlist[event[1]].begin
event_end = self.timerlist[event[1]].end
new_event_begin = event[0] + offset_0 + (cnt * 604800)
new_event_end = new_event_begin + (event_end - event_begin)
self.nrep_eventlist.extend([(new_event_begin, self.bflag, event[1]),(new_event_end, self.eflag, event[1])])
################################################################################
# order list chronological
self.nrep_eventlist.sort()
##################################################################################
# detect overlapping timers and overlapping times
fakeRecList = []
ConflictTimer = None
ConflictTunerType = None
newTimerTunerType = None
cnt = 0
idx = 0
overlaplist = []
for event in self.nrep_eventlist:
cnt += event[1]
if event[2] == -1: # new timer
timer = self.newtimer
else:
timer = self.timerlist[event[2]]
if event[1] == self.bflag:
tunerType = [ ]
if timer.service_ref.ref and timer.service_ref.ref.flags & eServiceReference.isGroup:
fakeRecService = NavigationInstance.instance.recordService(getBestPlayableServiceReference(timer.service_ref.ref, eServiceReference()), True)
else:
fakeRecService = NavigationInstance.instance.recordService(timer.service_ref, True)
if fakeRecService:
fakeRecResult = fakeRecService.start(True)
else:
fakeRecResult = -1
if not fakeRecResult: # tune okay
feinfo = fakeRecService.frontendInfo()
if feinfo:
tunerType.append(feinfo.getFrontendData().get("tuner_type"))
else: # tune failed.. so we must go another way to get service type (DVB-S, DVB-T, DVB-C)
def getServiceType(ref): # helper function to get a service type of a service reference
serviceInfo = serviceHandler.info(ref)
serviceInfo = serviceInfo and serviceInfo.getInfoObject(ref, iServiceInformation.sTransponderData)
return serviceInfo and serviceInfo["tuner_type"] or ""
ref = timer.service_ref.ref
if ref.flags & eServiceReference.isGroup: # service group ?
serviceList = serviceHandler.list(ref) # get all alternative services
if serviceList:
for ref in serviceList.getContent("R"): # iterate over all group service references
type = getServiceType(ref)
if not type in tunerType: # just add single time
tunerType.append(type)
else:
tunerType.append(getServiceType(ref))
if event[2] == -1: # new timer
newTimerTunerType = tunerType
overlaplist.append((fakeRecResult, timer, tunerType))
fakeRecList.append((timer, fakeRecService))
if fakeRecResult:
if ConflictTimer is None: # just take care of the first conflict
ConflictTimer = timer
ConflictTunerType = tunerType
elif event[1] == self.eflag:
for fakeRec in fakeRecList:
if timer == fakeRec[0] and fakeRec[1]:
NavigationInstance.instance.stopRecordService(fakeRec[1])
fakeRecList.remove(fakeRec)
fakeRec = None
for entry in overlaplist:
if entry[1] == timer:
overlaplist.remove(entry)
else:
print "Bug: unknown flag!"
self.nrep_eventlist[idx] = (event[0],event[1],event[2],cnt,overlaplist[:]) # insert a duplicate into current overlaplist
idx += 1
if ConflictTimer is None: # no conflict found :)
return True
##################################################################################
# we have detected a conflict, now we must figure out the involved timers
if self.newtimer is not None: # new timer?
if self.newtimer is not ConflictTimer: # the new timer is not the conflicting timer?
for event in self.nrep_eventlist:
if len(event[4]) > 1: # entry in overlaplist of this event??
kt = False
nt = False
for entry in event[4]:
if entry[1] is ConflictTimer:
kt = True
if entry[1] is self.newtimer:
nt = True
if nt and kt:
ConflictTimer = self.newtimer
ConflictTunerType = newTimerTunerType
break
self.simultimer = [ ConflictTimer ]
for event in self.nrep_eventlist:
if len(event[4]) > 1: # entry in overlaplist of this event??
for entry in event[4]:
if entry[1] is ConflictTimer:
break
else:
continue
for entry in event[4]:
if not entry[1] in self.simultimer:
for x in entry[2]:
if x in ConflictTunerType:
self.simultimer.append(entry[1])
break
if len(self.simultimer) < 2:
print "Possible Bug: unknown Conflict!"
return True
return False # conflict detected!
| XTAv2/Enigma2 | lib/python/Components/TimerSanityCheck.py | Python | gpl-2.0 | 10,362 |
data = (
' a/c ', # 0x00
' a/s ', # 0x01
'C', # 0x02
'', # 0x03
'', # 0x04
' c/o ', # 0x05
' c/u ', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'g', # 0x0a
'H', # 0x0b
'H', # 0x0c
'H', # 0x0d
'h', # 0x0e
'', # 0x0f
'I', # 0x10
'I', # 0x11
'L', # 0x12
'l', # 0x13
'', # 0x14
'N', # 0x15
'No. ', # 0x16
'', # 0x17
'', # 0x18
'P', # 0x19
'Q', # 0x1a
'R', # 0x1b
'R', # 0x1c
'R', # 0x1d
'', # 0x1e
'', # 0x1f
'(sm)', # 0x20
'TEL', # 0x21
'(tm)', # 0x22
'', # 0x23
'Z', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'Z', # 0x28
'', # 0x29
'K', # 0x2a
'A', # 0x2b
'B', # 0x2c
'C', # 0x2d
'e', # 0x2e
'e', # 0x2f
'E', # 0x30
'F', # 0x31
'F', # 0x32
'M', # 0x33
'o', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'i', # 0x39
'', # 0x3a
'FAX', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'D', # 0x45
'd', # 0x46
'e', # 0x47
'i', # 0x48
'j', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'F', # 0x4e
'[?]', # 0x4f
' 1/7 ', # 0x50
' 1/9 ', # 0x51
' 1/10 ', # 0x52
' 1/3 ', # 0x53
' 2/3 ', # 0x54
' 1/5 ', # 0x55
' 2/5 ', # 0x56
' 3/5 ', # 0x57
' 4/5 ', # 0x58
' 1/6 ', # 0x59
' 5/6 ', # 0x5a
' 1/8 ', # 0x5b
' 3/8 ', # 0x5c
' 5/8 ', # 0x5d
' 7/8 ', # 0x5e
' 1/', # 0x5f
'I', # 0x60
'II', # 0x61
'III', # 0x62
'IV', # 0x63
'V', # 0x64
'VI', # 0x65
'VII', # 0x66
'VIII', # 0x67
'IX', # 0x68
'X', # 0x69
'XI', # 0x6a
'XII', # 0x6b
'L', # 0x6c
'C', # 0x6d
'D', # 0x6e
'M', # 0x6f
'i', # 0x70
'ii', # 0x71
'iii', # 0x72
'iv', # 0x73
'v', # 0x74
'vi', # 0x75
'vii', # 0x76
'viii', # 0x77
'ix', # 0x78
'x', # 0x79
'xi', # 0x7a
'xii', # 0x7b
'l', # 0x7c
'c', # 0x7d
'd', # 0x7e
'm', # 0x7f
'(D', # 0x80
'D)', # 0x81
'((|))', # 0x82
')', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
' 0/3 ', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'-', # 0x90
'|', # 0x91
'-', # 0x92
'|', # 0x93
'-', # 0x94
'|', # 0x95
'\\', # 0x96
'/', # 0x97
'\\', # 0x98
'/', # 0x99
'-', # 0x9a
'-', # 0x9b
'~', # 0x9c
'~', # 0x9d
'-', # 0x9e
'|', # 0x9f
'-', # 0xa0
'|', # 0xa1
'-', # 0xa2
'-', # 0xa3
'-', # 0xa4
'|', # 0xa5
'-', # 0xa6
'|', # 0xa7
'|', # 0xa8
'-', # 0xa9
'-', # 0xaa
'-', # 0xab
'-', # 0xac
'-', # 0xad
'-', # 0xae
'|', # 0xaf
'|', # 0xb0
'|', # 0xb1
'|', # 0xb2
'|', # 0xb3
'|', # 0xb4
'|', # 0xb5
'^', # 0xb6
'V', # 0xb7
'\\', # 0xb8
'=', # 0xb9
'V', # 0xba
'^', # 0xbb
'-', # 0xbc
'-', # 0xbd
'|', # 0xbe
'|', # 0xbf
'-', # 0xc0
'-', # 0xc1
'|', # 0xc2
'|', # 0xc3
'=', # 0xc4
'|', # 0xc5
'=', # 0xc6
'=', # 0xc7
'|', # 0xc8
'=', # 0xc9
'|', # 0xca
'=', # 0xcb
'=', # 0xcc
'=', # 0xcd
'=', # 0xce
'=', # 0xcf
'=', # 0xd0
'|', # 0xd1
'=', # 0xd2
'|', # 0xd3
'=', # 0xd4
'|', # 0xd5
'\\', # 0xd6
'/', # 0xd7
'\\', # 0xd8
'/', # 0xd9
'=', # 0xda
'=', # 0xdb
'~', # 0xdc
'~', # 0xdd
'|', # 0xde
'|', # 0xdf
'-', # 0xe0
'|', # 0xe1
'-', # 0xe2
'|', # 0xe3
'-', # 0xe4
'-', # 0xe5
'-', # 0xe6
'|', # 0xe7
'-', # 0xe8
'|', # 0xe9
'|', # 0xea
'|', # 0xeb
'|', # 0xec
'|', # 0xed
'|', # 0xee
'|', # 0xef
'-', # 0xf0
'\\', # 0xf1
'\\', # 0xf2
'|', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| wilsonrivera/scalider-v2 | tools/UnidecodeDataCompiler/data/x021.py | Python | apache-2.0 | 4,012 |
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^dashboard/', include('dashboard.urls')),
url(r'^manager/', include('manager.urls')),
# Core URLS
url(r'^$', 'dashboard.views.index', name='index'),
url(r'^login/$', 'core.views.login', name='login'),
url(r'^logout/$', 'core.views.logout', name='logout'),
]
| ajrbyers/statpage | src/core/urls.py | Python | gpl-2.0 | 429 |
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os
import tempfile
import shutil
import itertools
import platform
from nose.tools import raises, assert_raises
import mock
import numpy as np
import PIL.Image
from . import parse_folder as _
class TestUnescape():
def test_hello(self):
assert _.unescape('hello') == 'hello'
def test_space(self):
assert _.unescape('%20') == ' '
class TestValidateFolder():
@classmethod
def setUpClass(cls):
cls.tmpdir = tempfile.mkdtemp()
_handle, cls.tmpfile = tempfile.mkstemp(dir=cls.tmpdir)
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.tmpdir)
except:
pass
def test_dir(self):
assert _.validate_folder(self.tmpdir) == True
def test_file(self):
assert _.validate_folder(self.tmpfile) == False
def test_nonexistent_dir(self):
assert _.validate_folder(os.path.abspath('not-a-directory')) == False
def test_nonexistent_url(self):
assert _.validate_folder('http://localhost/not-a-url') == False
class TestValidateOutputFile():
@classmethod
def setUpClass(cls):
cls.tmpdir = tempfile.mkdtemp()
_handle, cls.tmpfile = tempfile.mkstemp(dir=cls.tmpdir)
@classmethod
def tearDownClass(cls):
try:
shutil.rmtree(cls.tmpdir)
except:
pass
def test_missing_file(self):
assert _.validate_output_file(None) == True, 'all new files should be valid'
def test_file(self):
assert _.validate_output_file(os.path.join(self.tmpdir, 'output.txt')) == True
@mock.patch('os.access')
def test_local_file(self, mock_access):
mock_access.return_value = True
assert _.validate_output_file('not-a-file.txt') == True, 'relative paths should be accepted'
@mock.patch('os.access')
def test_not_writeable(self, mock_access):
mock_access.return_value = False
assert _.validate_output_file(self.tmpfile) == False, 'should not succeed without write permission'
def test_existing_file(self):
assert _.validate_output_file(self.tmpfile) == False
def test_nonexistent_dir(self):
assert _.validate_output_file(
os.path.join(
os.path.abspath('not-a-dir'),
'output.txt'
)
) == False
class TestValidateInputFile():
@classmethod
def setUpClass(cls):
_handle, cls.tmpfile = tempfile.mkstemp()
os.close(_handle)
@classmethod
def tearDownClass(cls):
os.remove(cls.tmpfile)
def test_missing_file(self):
assert _.validate_input_file('not-a-file.txt') == False, 'should not pass on missigle file'
@mock.patch('os.access')
def test_not_readable(self, mock_access):
mock_access.return_value = False
assert _.validate_input_file(self.tmpfile) == False, 'should not succeed without read permission'
class TestValidateRange():
def test_no_range(self):
assert _.validate_range(0) == True
def test_min_less(self):
assert _.validate_range(-1, min_value=0) == False
def test_min_equal(self):
assert _.validate_range(0, min_value=0) == True
def test_min_more(self):
assert _.validate_range(1, min_value=0) == True
def test_max_less(self):
assert _.validate_range(9, max_value=10) == True
def test_max_equal(self):
assert _.validate_range(10, max_value=10) == True
def test_max_more(self):
assert _.validate_range(11, max_value=10) == False
def test_allow_none_true(self):
assert _.validate_range(None, allow_none=True) == True
def test_allow_none_false(self):
assert _.validate_range(None, allow_none=False) == False
def test_string(self):
assert _.validate_range('foo') == False
@mock.patch('tools.parse_folder.validate_output_file')
@mock.patch('tools.parse_folder.validate_input_file')
class TestCalculatePercentages():
@raises(AssertionError)
def test_making_0(self, mock_input, mock_output):
_.calculate_percentages(None, None, None, None, None, None, None)
def test_making_1(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected_outputs = [
('train_file', (100, 0, 0)),
('val_file', (0, 100, 0)),
('test_file', (0, 0, 100))
]
for supplied, expected in expected_outputs:
args = {k: None for k in ['labels_file', 'train_file', 'percent_train', 'val_file', 'percent_val', 'test_file', 'percent_test']}
args.update({supplied: ''})
output = _.calculate_percentages(**args)
assert output == expected, 'expected output of {}, got {}'.format(output, expected)
def test_making_2(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
permutes = itertools.combinations(['train', 'val', 'test'], 2)
expected_outputs = itertools.izip(permutes, itertools.repeat((32, 68)))
for supplied, expected in expected_outputs:
args = {k: None for k in ['labels_file', 'train_file', 'percent_train', 'val_file', 'percent_val', 'test_file', 'percent_test']}
args.update({k+'_file': '' for k in supplied})
args.update({'percent_'+k: v for k, v in itertools.izip(supplied, expected)})
# Tricky line. itertools returns combinations in sorted order, always.
# The order of the returned non-zero values should always be correct.
output = [x for x in _.calculate_percentages(**args) if x != 0]
assert output == list(expected), 'expected output of {}, got {}'.format(output, expected)
def test_making_3_all_given(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected = (25, 30, 45)
assert _.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=25,
val_file='not-a-file.txt', percent_val=30,
test_file='not-a-file.txt', percent_test=45
) == expected, 'Calculate percentages should return identical values of {}'.format(expected)
def test_making_3_2_given(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
expected = 45
assert _.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=25,
val_file='not-a-file.txt', percent_val=30,
test_file='not-a-file.txt', percent_test=None
)[2] == expected, 'Calculate percentages should calculate third value of {}'.format(expected)
@raises(AssertionError)
def test_making_out_of_range(self, mock_input, mock_output):
mock_input.return_value = True
mock_output.return_value = True
# should raise AssertionError because percentages not between 0-100 are invalid
_.calculate_percentages(
labels_file='not-a-file.txt',
train_file='not-a-file.txt', percent_train=-1,
val_file=None, percent_val=None,
test_file=None, percent_test=None
)
class TestParseWebListing():
def test_non_url(self):
for url in ['not-a-url', 'http://not-a-url', 'https://not-a-url']:
yield self.check_url_raises, url
def check_url_raises(self, url):
assert_raises(Exception, _.parse_web_listing, url)
def test_mock_url(self):
for content, dirs, files in [
# Nothing
('', [], []),
# Apache 2.2.22
(
'<head></head><body><table>\n \
<tr><td><a href="/home/">Parent</a></td></tr>\n \
<tr><td><a href="cat1/">cat1/</a></td><td>01-Jan-2015 12:34</td><td> - </td></tr>\n \
<tr><td><a href="cat2/">cat2/</a></td><td>02-Feb-2015 23:45</td><td> - </td></tr>\n \
<tr><td><a href="cat.jpg">cat.jpg</a></td><td>03-Mar-2015 1:23</td><td> 1 </td></tr>\n \
</table</body>\n',
['cat1/', 'cat2/'],
['cat.jpg'],
),
# Apache 2.4.7
(
'<html><head></head><body><table>\n \
<tr><td><a href="/home/">Parent</a></td></tr>\n \
<tr><td><a href="dog/">dog/</a></td><td>01-01-2015 12:34</td><td> - </td></tr>\n \
<tr><td><a href="dog1.jpeg">dog1.jpeg</a></td><td>02-02-2015 23:45</td><td> 1 </td></tr>\n \
<tr><td><a href="dog2.png">dog2.png</a></td><td>03-03-2015 1:23</td><td> 2 </td></tr>\n \
</table</body></html>\n',
['dog/'],
['dog1.jpeg', 'dog2.png'],
),
# Nginx
(
'<html><head></head><body>\n \
<a href="bird.jpg">bird.jpg</a> 01-Jan-1999 01:23 1\n \
<a href="birds/">birds/</a> 02-Feb-1999 12:34 -',
['birds/'],
['bird.jpg'],
),
]:
with mock.patch('tools.parse_folder.requests') as mock_requests:
response = mock.Mock()
response.status_code = mock_requests.codes.ok
response.content = content
mock_requests.get.return_value = response
yield self.check_listing, (dirs, files)
def check_listing(self, rc):
assert _.parse_web_listing('any_url') == rc
class TestSplitIndices():
def test_indices(self):
for size in [5, 22, 32]:
for percent_b in range(0, 100, 31):
for percent_c in range(0, 100-percent_b, 41):
yield self.check_split, size, percent_b, percent_c
def check_split(self, size, pct_b, pct_c):
ideala = size * float(100 - pct_b - pct_c)/100.0
idealb = size * float(100 - pct_c)/100.0
idxa, idxb = _.three_way_split_indices(size, pct_b, pct_c)
assert abs(ideala-idxa) <= 2, 'split should be close to {}, is {}'.format(ideala, idxa)
assert abs(idealb-idxb) <= 2, 'split should be close to {}, is {}'.format(idealb, idxb)
class TestParseFolder():
def test_all_train(self):
tmpdir = tempfile.mkdtemp()
img = PIL.Image.fromarray(np.zeros((10,10,3), dtype='uint8'))
classes = ['A','B','C']
for cls in classes:
os.makedirs(os.path.join(tmpdir, cls))
img.save(os.path.join(tmpdir, cls, 'image1.png'))
img.save(os.path.join(tmpdir, cls, 'image2.jpg'))
labels_file = os.path.join(tmpdir, 'labels.txt')
train_file = os.path.join(tmpdir, 'train.txt')
_.parse_folder(tmpdir, labels_file, train_file=train_file,
percent_train=100, percent_val=0, percent_test=0)
with open(labels_file) as infile:
parsed_classes = [line.strip() for line in infile]
assert parsed_classes == classes, '%s != %s' % (parsed_classes, classes)
shutil.rmtree(tmpdir)
| delectable/DIGITS | tools/test_parse_folder.py | Python | bsd-3-clause | 11,183 |
request = {
"method": "GET",
"uri": uri("/test"),
"version": (1, 1),
"headers": [
("USER-AGENT", "curl/7.18.0 (i486-pc-linux-gnu) libcurl/7.18.0 OpenSSL/0.9.8g zlib/1.2.3.3 libidn/1.1"),
("HOST", "0.0.0.0=5000"),
("ACCEPT", "*/*")
],
"body": b""
}
| urbaniak/gunicorn | tests/requests/valid/002.py | Python | mit | 296 |
# -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints report numbers
"""
__revision__ = ""
def format(bfo, limit, separator=" ", extension=" etc."):
"""
Prints the report numbers of the record (037__a and 088__a)
@param separator: the separator between report numbers.
@param limit: the max number of report numbers to print
@param extension: a prefix printed when limit param is reached
"""
numbers = bfo.fields("037__a")
numbers.extend(bfo.fields("088__a"))
if limit.isdigit():
limit_as_int = int(limit)
if limit_as_int <= len(numbers):
return separator.join(numbers[:limit_as_int]) + extension
return separator.join(numbers)
| ppiotr/Bibedit-some-refactoring | modules/bibformat/lib/elements/bfe_report_numbers.py | Python | gpl-2.0 | 1,519 |
"""
File: email_vsas.py
Author: Levi Bostian (bostianl@uni.edu)
Description: Class for sending emails.
***NOTE***
Can only send with 1 email address.
Following commands to get to work:
emailObj = SendEmail()
emailObj.setRecipient("test@test.com")
emailObj.setSubject("test subject")
#set as RED, YELLOW, or ORANGE
emailObj.setAlertLevel("RED")
emailObj.setDbPhotoLink("dropbox.com/photo/link/")
emailObj.setDbVidLink("vsassoftware.com/videoInfoHere")
emailObj.setDuration("2 min 34 sec")
emailObj.sendEmail()
References: http://www.tutorialspoint.com/python/python_sending_email.htm
http://segfault.in/2010/12/sending-gmail-from-python/
http://docs.python.org/2/library/email-examples.html#email-examples <--- use this one
"""
import bz2 # used for really bad password "encyption" (it actually just compresses it). Better then nothing
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class SendEmail:
def __init__(self):
self.SMTP_SERVER = 'smtp.gmail.com'
self.SMTP_PORT = 587 # or 465
self.SENDER = 'team4.cs2720@gmail.com'
self.subject = ""
self.recipient = ""
self.header = []
self.emailConnection = ""
self.alertLevel = ""
self.dbPhotoLink = ""
self.duration = ""
self.dbVidLink = ""
self.emailBody = ""
self.date = ""
self.setUp()
def setRecipient(self, emailAddress):
self.recipient = emailAddress
def setAlertLevel(self, level):
self.alertLevel = level
def setDbPhotoLink(self, link):
self.dbPhotoLink = link
def setDbVidLink(self, link):
self.dbVidLink = link
def setDuration(self, length):
self.duration = length
def setDate(self, date):
self.date = date
def setUp(self):
self.getPassword()
self.setConnection()
def setSubject(self, subject = "subject"):
#expecting String
self.subject = subject
def sendEmail(self):
self.openEmailBody()
try:
self.emailConnection.sendmail(self.SENDER, self.recipient, self.body.as_string())
self.emailConnection.quit()
except:
print "Error: unable to send email"
def openEmailBody(self):
self.body = MIMEMultipart('alternative')
self.body['Subject'] = self.subject
self.body['From'] = self.SENDER
self.body['To'] = self.recipient
self.setUpEmail()
self.body.attach(MIMEText(self.emailBody, 'html'))
def getPassword(self):
# receive encrypted email password from password file
# passwordFile = open("email_p.txt", "r")
self.password = "teamPassword1"
#passwordFile.close()
def setConnection(self):
self.emailConnection = smtplib.SMTP(self.SMTP_SERVER, self.SMTP_PORT)
self.emailConnection.ehlo()
self.emailConnection.starttls()
self.emailConnection.ehlo()
self.emailConnection.login(self.SENDER, self.password)
def setUpEmail(self):
self.emailBody = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<style type="text/css">
body {
margin: 0 auto;
padding: 0;
width: 600px;
height: 100%;
background-color: #FFFFFF;
text-align: center;
}
#container {
height: 100%;
width: 600px;
}
</style>
</head>
<body>
<h1 style="color:"""+self.alertLevel.lower()+"""; font-size: 50px;">"""+self.alertLevel+""" ALERT</h1>
<h3>VSAS Motion Detected</h3>
<img src="""+self.dbPhotoLink+" width=\"500px;\" height=\"400px;\" />"+"""
<table border=0 style="margin: 0 auto;">
<tr>
<th colspan=3>-Motion Details-</th>
</tr>
<tr>
<th>Date:</th>
<td colspan=2>"""+self.date+"</td>"+"""
</tr>
<tr><td colspan=3></td></tr>
<tr>
<th colspan=3>Video Footage Link</th>
</tr>
<tr>
<td colspan=3><a href="""+self.dbVidLink+">"+self.dbVidLink+"</a></td>"+"""
</tr>
</table>
<h1> </h1>
</body>
</html>"""
| levibostian/VSAS | VSAS system/VSAS/Motion/email_vsas/email_vsas.py | Python | mit | 5,160 |
"""hug/this.py.
The Zen of Hug
Copyright (C) 2019 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
ZEN_OF_HUG = """
Simple Things should be easy, complex things should be possible.
Complex things done often should be made simple.
Magic should be avoided.
Magic isn't magic as soon as its mechanics are universally understood.
Wrong documentation is worse than no documentation.
Everything should be documented.
All code should be tested.
All tests should be meaningful.
Consistency is more important than perfection.
It's okay to break consistency for practicality.
Clarity is more important than performance.
If we do our job right, there shouldn't need to be a choice.
Interfaces are one honking great idea -- let's do more of those!
"""
print(ZEN_OF_HUG)
| timothycrosley/hug | hug/this.py | Python | mit | 1,771 |
#!/usr/bin/python
from distutils.core import setup
setup(name = 'vips8',
version = '7.28.0dev',
description = 'vips-8.x image processing library',
long_description = open('README.txt').read(),
license = 'LGPL'
author = 'John Cupitt',
author_email = 'jcupitt@gmail.com',
url = 'http://www.vips.ecs.soton.ac.uk',
requires = ['gi'],
packages = ['vips8'])
| Web5design/libvips | python/setup.py | Python | lgpl-2.1 | 390 |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
# Based on https://groups.google.com/d/topic/sqlalchemy/cQ9e9IVOykE/discussion
# By David Gardner (dgardner@creatureshop.com)
"""
StaticArray class and functions that SQLAlchemy can process instead of non hashable lists
"""
from cStringIO import StringIO
from sqlalchemy import String, types
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.dialects.postgresql.base import PGDialect
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import expression
class StaticArray(types.TypeDecorator):
impl = types.TypeEngine
def __init__(self):
super(StaticArray, self).__init__()
self.__supported = {PGDialect: ARRAY}
def load_dialect_impl(self, dialect):
if dialect.__class__ in self.__supported:
return self.__supported[dialect.__class__](String)
else:
return dialect.type_descriptor(String)
def process_bind_param(self, value, dialect):
return value
def process_result_value(self, value, dialect):
return tuple(value)
def is_mutable(self):
return False
class array_agg(expression.ColumnElement):
type = StaticArray()
def __init__(self, expr, order_by=None):
self.expr = expression._literal_as_binds(expr)
if order_by is not None:
self.order_by = expression._literal_as_binds(order_by)
else:
self.order_by = None
@compiles(array_agg, 'postgresql')
def _compile_array_agg_postgresql(element, compiler, **kw):
buf = StringIO()
buf.write('array(select x from unnest(array_agg(')
buf.write(compiler.process(element.expr))
if element.order_by is not None:
buf.write(' ORDER BY ')
buf.write(compiler.process(element.order_by))
buf.write(')) x WHERE x IS NOT NULL)')
return buf.getvalue()
class array(expression.ColumnElement):
type = StaticArray()
def __init__(self, expr):
self.expr = expression._literal_as_binds(expr)
@compiles(array, 'postgresql')
def _compile_array_postgresql(element, compiler, **kw):
buf = StringIO()
buf.write('array(')
buf.write(compiler.process(element.expr))
buf.write(')')
return buf.getvalue()
| mvidalgarcia/indico | indico/core/db/sqlalchemy/custom/static_array.py | Python | mit | 2,413 |
import random
import gmpy2
import binascii
FLAG = b"# Who knows :)"
p = gmpy2.next_prime(random.SystemRandom().getrandbits(512))
q = gmpy2.next_prime(random.SystemRandom().getrandbits(512))
n = p * q
e = 65537
phi = (p-1) * (q-1)
d = gmpy2.invert(e, phi)
print('''
Welcome to our RSA Secure Oracle!
We have anti-hacker protection and a bit of obscurity as you don't even have the
full public key!
TODO: check this is actually secure.
Public key: e=%d, n=<REDACTED>
''' % e)
while True:
print('''
1) Encrypt data
2) Decrypt data
3) Get encrypted flag
''')
choice = int(input('> '))
if choice == 1:
print('Please gimme the message as int:')
msg = int(input('> '))
print(pow(msg, e, n))
elif choice == 2:
print('Please gimme the message as int:')
msg = int(input('> '))
output = int(pow(msg, d, n))
if b'HCL8{' in output.to_bytes(output.bit_length() // 8 + 1, byteorder='big'):
print('Hacker detected!')
exit(1)
print(output)
elif choice == 3:
print('Here\'s your flag, not that you can do much with it:')
print(pow(int.from_bytes(FLAG, byteorder='big'), e, n))
| Qwaz/solved-hacking-problem | GoogleCTF/2020 Hackceler8/chals/in-game-ctf-12/chal.py | Python | gpl-2.0 | 1,190 |
# -*- coding: utf-8 -*-
# This file is part of visvalingamwyatt.
# https://github.com/fitnr/visvalingamwyatt
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2015, fitnr <contact@fakeisthenewreal.org>
"""visvalingamwyatt module tests"""
import json
import os
import unittest
from collections import namedtuple
import numpy as np
import visvalingamwyatt as vw
from visvalingamwyatt import __main__ as cli
class TestVW(unittest.TestCase):
def setUp(self):
self.samplefile = os.path.join(os.path.dirname(__file__), 'data', 'sample.json')
with open(self.samplefile) as f:
self.fixture = json.load(f).get('features')[0]
def standard(self, **kwargs):
result = vw.simplify_feature(self.fixture, **kwargs)
self.assertIn('geometry', result)
self.assertIn('properties', result)
self.assertEqual(result['properties'], self.fixture['properties'])
self.assertEqual(self.fixture['geometry']['type'], result['geometry']['type'])
self.assertEqual(
self.fixture['geometry']['coordinates'][0],
result['geometry']['coordinates'][0],
)
self.assertGreater(
len(self.fixture['geometry']['coordinates']),
len(result['geometry']['coordinates']),
)
return result
def testSimplifyFeature(self):
self.standard()
def testSimplifyFeatureThreshold(self):
self.standard(threshold=0.1)
def testSimplifyFeatureRatio(self):
result = self.standard(ratio=0.1)
b = vw.simplify_feature(self.fixture, ratio=0.90)
assert len(b['geometry']['coordinates']) > len(
result['geometry']['coordinates']
)
for i, j in zip(range(1, 9), range(2, 10)):
r = vw.simplify_feature(self.fixture, ratio=float(i) / 10)
s = vw.simplify_feature(self.fixture, ratio=float(j) / 10)
assert len(r['geometry']['coordinates']) <= len(
s['geometry']['coordinates']
)
def testSimplifyFeatureNumber(self):
result = self.standard(number=10)
self.assertEqual(len(result['geometry']['coordinates']), 10)
def test3dCoords(self):
coordinates = [
[0.0, 0.0, 0.0],
[1.1, 0, 1],
[2.1, 3, 0],
[4.1, 5, 10],
[1.1, 2, 0],
[5.1, 2, 0],
]
a = vw.simplify(coordinates)
self.assertEqual(a[0], [0, 0, 0])
self.assertLessEqual(len(a), len(coordinates))
def testSimplifyTupleLike(self):
Point = namedtuple("Point", ("x", "y"))
# coordinates are in the shape
#
# c
# b d
# a e
#
# so b and d are eliminated
a, b, c, d, e = Point(0, 0), Point(1, 1), Point(2, 2), Point(3, 1), Point(4, 0)
inp = [a, b, c, d, e]
expected_output = np.array([a, c, e])
actual_output = vw.simplify(inp, threshold=0.001)
self.assertTrue(np.array_equal(actual_output, expected_output))
def testSimplifyIntegerCoords(self):
# coordinates are in the shape
#
# c
# b d
# a e
#
# so b and d are eliminated
a, b, c, d, e = (0, 0), (1, 1), (2, 2), (3, 1), (4, 0)
inp = [a, b, c, d, e]
expected_output = np.array([a, c, e])
actual_output = vw.simplify(inp, threshold=0.001)
self.assertTrue(np.array_equal(actual_output, expected_output))
def testSimplifyClosedFeature(self):
'''When simplifying geometries with closed rings (Polygons and MultiPolygons),
the first and last points in each ring should remain the same'''
test_ring = [
[121.20803833007811, 24.75431413309125],
[121.1846923828125, 24.746831298412058],
[121.1517333984375, 24.74059525872194],
[121.14486694335936, 24.729369599118222],
[121.12152099609375, 24.693191139677126],
[121.13525390625, 24.66449040712424],
[121.10504150390625, 24.66449040712424],
[121.10092163085936, 24.645768980151793],
[121.0748291015625, 24.615808859044243],
[121.09405517578125, 24.577099744289427],
[121.12564086914062, 24.533381526147682],
[121.14624023437499, 24.515889973088104],
[121.19018554687499, 24.528384188171866],
[121.19430541992186, 24.57959746772822],
[121.23687744140624, 24.587090339209634],
[121.24099731445311, 24.552119771544227],
[121.2451171875, 24.525885444592642],
[121.30279541015624, 24.55087064225044],
[121.27258300781251, 24.58958786341259],
[121.26708984374999, 24.623299562653035],
[121.32614135742188, 24.62579636412304],
[121.34674072265624, 24.602074737077242],
[121.36871337890625, 24.580846310771612],
[121.40853881835936, 24.653257887871963],
[121.40853881835936, 24.724380091871726],
[121.37283325195312, 24.716895455859337],
[121.3604736328125, 24.693191139677126],
[121.343994140625, 24.69942955501979],
[121.32888793945312, 24.728122241065808],
[121.3714599609375, 24.743089712134605],
[121.37695312499999, 24.77177232822881],
[121.35635375976562, 24.792968265314457],
[121.32476806640625, 24.807927923059236],
[121.29730224609375, 24.844072974931866],
[121.24923706054688, 24.849057671305268],
[121.24786376953125, 24.816653556469955],
[121.27944946289062, 24.79047481357294],
[121.30142211914061, 24.761796517185815],
[121.27258300781251, 24.73311159823193],
[121.25335693359374, 24.708162811665265],
[121.20391845703125, 24.703172454280217],
[121.19979858398438, 24.731864277701714],
[121.20803833007811, 24.75431413309125],
]
multipolygon = {"type": "MultiPolygon", "coordinates": [[test_ring]]}
number = vw.simplify_geometry(multipolygon, number=10)
self.assertEqual(
number['coordinates'][0][0][0], number['coordinates'][0][0][-1]
)
ratio = vw.simplify_geometry(multipolygon, ratio=0.3)
self.assertEqual(ratio['coordinates'][0][0][0], ratio['coordinates'][0][0][-1])
thres = vw.simplify_geometry(multipolygon, threshold=0.01)
self.assertEqual(thres['coordinates'][0][0][0], thres['coordinates'][0][0][-1])
number = vw.simplify_geometry(multipolygon, number=10)
self.assertEqual(
number['coordinates'][0][0][0], number['coordinates'][0][0][-1]
)
ratio = vw.simplify_geometry(multipolygon, ratio=0.3)
self.assertEqual(ratio['coordinates'][0][0][0], ratio['coordinates'][0][0][-1])
thres = vw.simplify_geometry(multipolygon, threshold=0.01)
self.assertEqual(thres['coordinates'][0][0][0], thres['coordinates'][0][0][-1])
def testCli(self):
pass
def testSimplify(self):
'''Use the command-line function to simplify the sample data.'''
try:
output = 'tmp.json'
cli.simplify(self.samplefile, output, number=9)
self.assertTrue(os.path.exists(output))
with open('tmp.json', 'r') as f:
result = json.load(f)
coords = result['features'][0]['geometry']['coordinates']
self.assertEqual(len(coords), 9)
finally:
os.remove(output)
if __name__ == '__main__':
unittest.main()
| fitnr/visvalingamwyatt | tests/test_vw.py | Python | mit | 7,733 |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
'''
sota.sha256
'''
from .sha256 import sha256
| sota/lang | sota/sha256/__init__.py | Python | mit | 97 |
import logging
from csirtgsdk.client.http import HTTP as Client
class Search(object):
"""
Search Object class
"""
def __init__(self, client=Client()):
"""
:param client: client.Client object
:return: Search Object
"""
self.logger = logging.getLogger(__name__)
self.client = client
def search(self, q, limit=None):
"""
Performs a search against the /search endpoint
:param q: query to be searched for [STRING]
:param limit: limit the results [INT]
:return: list of dicts
"""
uri = '{}/search'.format(self.client.remote)
body = self.client.get(uri, {'q': q, 'limit': limit})
return body
| csirtgadgets/csirtgsdk-py | csirtgsdk/search.py | Python | mpl-2.0 | 730 |
from setuptools import setup
setup(
name='vmfusion',
version='0.2.0',
author='Mario Steinhoff',
author_email='steinhoff.mario@gmail.com',
packages=['vmfusion'],
url='https://github.com/msteinhoff/vmfusion-python',
license='LICENSE.txt',
description='A python API for the VMware Fusion CLI tools.',
long_description=open('README.md').read(),
install_requires=[
"pyparsing >= 2.0.1"
]
)
| msteinhoff/vmfusion-python | setup.py | Python | mit | 438 |
#!/usr/bin/env python
# A tool to parse ASTMatchers.h and update the documentation in
# ../LibASTMatchersReference.html automatically. Run from the
# directory in which this file is located to update the docs.
import collections
import re
import urllib2
MATCHERS_FILE = '../../include/clang/ASTMatchers/ASTMatchers.h'
# Each matcher is documented in one row of the form:
# result | name | argA
# The subsequent row contains the documentation and is hidden by default,
# becoming visible via javascript when the user clicks the matcher name.
TD_TEMPLATE="""
<tr><td>%(result)s</td><td class="name" onclick="toggle('%(id)s')"><a name="%(id)sAnchor">%(name)s</a></td><td>%(args)s</td></tr>
<tr><td colspan="4" class="doc" id="%(id)s"><pre>%(comment)s</pre></td></tr>
"""
# We categorize the matchers into these three categories in the reference:
node_matchers = {}
narrowing_matchers = {}
traversal_matchers = {}
# We output multiple rows per matcher if the matcher can be used on multiple
# node types. Thus, we need a new id per row to control the documentation
# pop-up. ids[name] keeps track of those ids.
ids = collections.defaultdict(int)
# Cache for doxygen urls we have already verified.
doxygen_probes = {}
def esc(text):
"""Escape any html in the given text."""
text = re.sub(r'&', '&', text)
text = re.sub(r'<', '<', text)
text = re.sub(r'>', '>', text)
def link_if_exists(m):
name = m.group(1)
url = 'http://clang.llvm.org/doxygen/classclang_1_1%s.html' % name
if url not in doxygen_probes:
try:
print 'Probing %s...' % url
urllib2.urlopen(url)
doxygen_probes[url] = True
except:
doxygen_probes[url] = False
if doxygen_probes[url]:
return r'Matcher<<a href="%s">%s</a>>' % (url, name)
else:
return m.group(0)
text = re.sub(
r'Matcher<([^\*&]+)>', link_if_exists, text)
return text
def extract_result_types(comment):
"""Extracts a list of result types from the given comment.
We allow annotations in the comment of the matcher to specify what
nodes a matcher can match on. Those comments have the form:
Usable as: Any Matcher | (Matcher<T1>[, Matcher<t2>[, ...]])
Returns ['*'] in case of 'Any Matcher', or ['T1', 'T2', ...].
Returns the empty list if no 'Usable as' specification could be
parsed.
"""
result_types = []
m = re.search(r'Usable as: Any Matcher[\s\n]*$', comment, re.S)
if m:
return ['*']
while True:
m = re.match(r'^(.*)Matcher<([^>]+)>\s*,?[\s\n]*$', comment, re.S)
if not m:
if re.search(r'Usable as:\s*$', comment):
return result_types
else:
return None
result_types += [m.group(2)]
comment = m.group(1)
def strip_doxygen(comment):
"""Returns the given comment without \-escaped words."""
# If there is only a doxygen keyword in the line, delete the whole line.
comment = re.sub(r'^\\[^\s]+\n', r'', comment, flags=re.M)
# Delete the doxygen command and the following whitespace.
comment = re.sub(r'\\[^\s]+\s+', r'', comment)
return comment
def unify_arguments(args):
"""Gets rid of anything the user doesn't care about in the argument list."""
args = re.sub(r'internal::', r'', args)
args = re.sub(r'const\s+', r'', args)
args = re.sub(r'&', r' ', args)
args = re.sub(r'(^|\s)M\d?(\s)', r'\1Matcher<*>\2', args)
return args
def add_matcher(result_type, name, args, comment, is_dyncast=False):
"""Adds a matcher to one of our categories."""
if name == 'id':
# FIXME: Figure out whether we want to support the 'id' matcher.
return
matcher_id = '%s%d' % (name, ids[name])
ids[name] += 1
args = unify_arguments(args)
matcher_html = TD_TEMPLATE % {
'result': esc('Matcher<%s>' % result_type),
'name': name,
'args': esc(args),
'comment': esc(strip_doxygen(comment)),
'id': matcher_id,
}
if is_dyncast:
node_matchers[result_type + name] = matcher_html
# Use a heuristic to figure out whether a matcher is a narrowing or
# traversal matcher. By default, matchers that take other matchers as
# arguments (and are not node matchers) do traversal. We specifically
# exclude known narrowing matchers that also take other matchers as
# arguments.
elif ('Matcher<' not in args or
name in ['allOf', 'anyOf', 'anything', 'unless']):
narrowing_matchers[result_type + name] = matcher_html
else:
traversal_matchers[result_type + name] = matcher_html
def act_on_decl(declaration, comment, allowed_types):
"""Parse the matcher out of the given declaration and comment.
If 'allowed_types' is set, it contains a list of node types the matcher
can match on, as extracted from the static type asserts in the matcher
definition.
"""
if declaration.strip():
# Node matchers are defined by writing:
# VariadicDynCastAllOfMatcher<ResultType, ArgumentType> name;
m = re.match(r""".*VariadicDynCastAllOfMatcher\s*<
\s*([^\s,]+)\s*,
\s*([^\s>]+)\s*>
\s*([^\s;]+)\s*;\s*$""", declaration, flags=re.X)
if m:
result, inner, name = m.groups()
add_matcher(result, name, 'Matcher<%s>...' % inner,
comment, is_dyncast=True)
return
# Parse the various matcher definition macros.
m = re.match(r"""^\s*AST_(POLYMORPHIC_)?MATCHER(_P)?(.?)\(
(?:\s*([^\s,]+)\s*,)?
\s*([^\s,]+)\s*
(?:,\s*([^\s,]+)\s*
,\s*([^\s,]+)\s*)?
(?:,\s*([^\s,]+)\s*
,\s*([^\s,]+)\s*)?
\)\s*{\s*$""", declaration, flags=re.X)
if m:
p, n, result, name = m.groups()[1:5]
args = m.groups()[5:]
if not result:
if not allowed_types:
raise Exception('Did not find allowed result types for: %s' % name)
result_types = allowed_types
else:
result_types = [result]
if n not in ['', '2']:
raise Exception('Cannot parse "%s"' % declaration)
args = ', '.join('%s %s' % (args[i], args[i+1])
for i in range(0, len(args), 2) if args[i])
for result_type in result_types:
add_matcher(result_type, name, args, comment)
return
# Parse free standing matcher functions, like:
# Matcher<ResultType> Name(Matcher<ArgumentType> InnerMatcher) {
m = re.match(r"""^\s*(.*)\s+
([^\s\(]+)\s*\(
(.*)
\)\s*{""", declaration, re.X)
if m:
result, name, args = m.groups()
args = ', '.join(p.strip() for p in args.split(','))
m = re.match(r'.*\s+internal::Matcher<([^>]+)>$', result)
if m:
result_types = [m.group(1)]
else:
result_types = extract_result_types(comment)
if not result_types:
if not comment:
# Only overloads don't have their own doxygen comments; ignore those.
print 'Ignoring "%s"' % name
else:
print 'Cannot determine result type for "%s"' % name
else:
for result_type in result_types:
add_matcher(result_type, name, args, comment)
else:
print '*** Unparsable: "' + declaration + '" ***'
def sort_table(matcher_type, matcher_map):
"""Returns the sorted html table for the given row map."""
table = ''
for key in sorted(matcher_map.keys()):
table += matcher_map[key] + '\n'
return ('<!-- START_%(type)s_MATCHERS -->\n' +
'%(table)s' +
'<!--END_%(type)s_MATCHERS -->') % {
'type': matcher_type,
'table': table,
}
# Parse the ast matchers.
# We alternate between two modes:
# body = True: We parse the definition of a matcher. We need
# to parse the full definition before adding a matcher, as the
# definition might contain static asserts that specify the result
# type.
# body = False: We parse the comments and declaration of the matcher.
comment = ''
declaration = ''
allowed_types = []
body = False
for line in open(MATCHERS_FILE).read().splitlines():
if body:
if line.strip() and line[0] == '}':
if declaration:
act_on_decl(declaration, comment, allowed_types)
comment = ''
declaration = ''
allowed_types = []
body = False
else:
m = re.search(r'is_base_of<([^,]+), NodeType>', line)
if m and m.group(1):
allowed_types += [m.group(1)]
continue
if line.strip() and line.lstrip()[0] == '/':
comment += re.sub(r'/+\s?', '', line) + '\n'
else:
declaration += ' ' + line
if ((not line.strip()) or
line.rstrip()[-1] == ';' or
line.rstrip()[-1] == '{'):
if line.strip() and line.rstrip()[-1] == '{':
body = True
else:
act_on_decl(declaration, comment, allowed_types)
comment = ''
declaration = ''
allowed_types = []
node_matcher_table = sort_table('DECL', node_matchers)
narrowing_matcher_table = sort_table('NARROWING', narrowing_matchers)
traversal_matcher_table = sort_table('TRAVERSAL', traversal_matchers)
reference = open('../LibASTMatchersReference.html').read()
reference = re.sub(r'<!-- START_DECL_MATCHERS.*END_DECL_MATCHERS -->',
'%s', reference, flags=re.S) % node_matcher_table
reference = re.sub(r'<!-- START_NARROWING_MATCHERS.*END_NARROWING_MATCHERS -->',
'%s', reference, flags=re.S) % narrowing_matcher_table
reference = re.sub(r'<!-- START_TRAVERSAL_MATCHERS.*END_TRAVERSAL_MATCHERS -->',
'%s', reference, flags=re.S) % traversal_matcher_table
with open('../LibASTMatchersReference.html', 'w') as output:
output.write(reference)
| jeltz/rust-debian-package | src/llvm/tools/clang/docs/tools/dump_ast_matchers.py | Python | apache-2.0 | 9,746 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
本测试模块用于测试与 :class:`sqlite4dummy.schema.MetaData` 有关的方法
class, method, func, exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from sqlite4dummy import *
from sqlite4dummy.tests.basetest import *
from datetime import datetime, date
import unittest
class MetaDataUnittest(unittest.TestCase):
"""Unittest of :class:`sqlite4dummy.schema.MetaData`.
MetaData的方法的单元测试。
"""
def setUp(self):
self.engine = Sqlite3Engine(":memory:", autocommit=False)
self.metadata = MetaData()
self.int_ = 1
self.float_ = 3.14
self.str_ = r"""\/!@#$%^&*()_+-=~`|[]{}><,.'"?"""
self.bytes_ = "abc".encode("utf-8")
self.date_ = date(2000, 1, 1)
self.datetime_ = datetime(2015, 10, 1, 18, 30, 0, 123)
self.pickle_ = [1, 2, 3]
self.test = Table("test", self.metadata,
Column("_id", dtype.INTEGER, primary_key=True, nullable=False),
Column("_int_with_default", dtype.INTEGER, default=self.int_),
Column("_float_with_default", dtype.REAL, default=self.float_),
Column("_str_with_default", dtype.TEXT, default=self.str_),
Column("_bytes_with_default", dtype.BLOB, default=self.bytes_),
Column("_date_with_default", dtype.DATE, default=self.date_),
Column("_datetime_with_default", dtype.DATETIME, default=self.datetime_),
Column("_pickle_with_default", dtype.PICKLETYPE, default=self.pickle_),
Column("_int", dtype.INTEGER),
Column("_float", dtype.REAL),
Column("_str", dtype.TEXT),
Column("_bytes", dtype.BLOB),
Column("_date", dtype.DATE),
Column("_datetime", dtype.DATETIME),
Column("_pickle", dtype.PICKLETYPE),
)
self.metadata.create_all(self.engine)
self.index = Index("test_index", self.metadata,
[self.test.c._int,
self.test.c._float.desc(),
self.test.c._date,
desc(self.test.c._datetime)],
table_name=self.test,
unique=True,
skip_validate=False,
)
self.index.create(self.engine)
self.assertEqual(
len(self.engine.execute("PRAGMA table_info(test);").fetchall()),
15,
)
self.assertEqual(
len(self.engine.execute(
"SELECT * FROM sqlite_master "
"WHERE type = 'index' AND sql NOT NULL;").fetchall()),
1,
)
def tearDown(self):
self.engine.close()
def test_drop_all(self):
"""测试drop_all是否能drop所有的表。
"""
self.assertEqual(
len(self.engine.execute(
"SELECT * FROM sqlite_master WHERE type = 'table';").fetchall()),
1,
)
self.metadata.drop_all(self.engine)
self.assertEqual(
len(self.engine.execute(
"SELECT * FROM sqlite_master WHERE type = 'table';").fetchall()),
0,
)
self.assertEqual(len(self.metadata.t), 0) # 没有表了
def test_str_repr(self):
# print(self.metadata)
# print(repr(self.metadata))
pass
def test_get_table(self):
"""测试MetaData.get_table(table)方法是否能正确获得Table。
"""
self.assertEqual(self.metadata.get_table("test"), self.test)
self.assertRaises(KeyError,
self.metadata.get_table, "not_existing_table")
def test_get_index(self):
"""测试MetaData.get_index(index)方法是否能正确获得Index。
"""
self.assertEqual(self.metadata.get_index("test_index"), self.index)
self.assertRaises(KeyError,
self.metadata.get_index, "not_existing_index")
def test_reflect(self):
"""测试MetaData.reflect(engine)是否能正确解析出Table, Column, Index的
metadata, 并且解析出Column的default值。
"""
second_metadata = MetaData()
second_metadata.reflect(self.engine,
pickletype_columns=[
"test._pickle_with_default",
"test._pickle",
])
self.assertEqual(second_metadata.get_table("test").\
c._int_with_default.default, self.int_)
self.assertEqual(second_metadata.get_table("test").\
c._float_with_default.default, self.float_)
self.assertEqual(second_metadata.get_table("test").\
c._str_with_default.default, self.str_)
self.assertEqual(second_metadata.get_table("test").\
c._bytes_with_default.default, self.bytes_)
self.assertEqual(second_metadata.get_table("test").\
c._date_with_default.default, self.date_)
self.assertEqual(second_metadata.get_table("test").\
c._datetime_with_default.default, self.datetime_)
self.assertEqual(second_metadata.get_table("test").\
c._pickle_with_default.default, self.pickle_)
self.assertEqual(second_metadata.get_index("test_index").\
index_name, "test_index")
self.assertEqual(second_metadata.get_index("test_index").\
table_name, "test")
self.assertEqual(second_metadata.get_index("test_index").\
unique, True)
self.assertEqual(second_metadata.get_index("test_index").\
params, self.index.params)
if __name__ == "__main__":
unittest.main() | MacHu-GWU/sqlite4dummy-project | sqlite4dummy/tests/functionality/test_MetaData.py | Python | mit | 5,883 |
'''
Copyright (C) 2013-2014 Robert Powers
This file is part of MikeNetGUI.
MikeNetGUI is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MikeNetGUI is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MikeNetGUI. If not, see <http://www.gnu.org/licenses/>.
'''
from PySide import QtGui,QtCore
from custom_widgets import CustomListWidget,CustomWiringWidget
from custom_widgets import CustomTreeWidget,CustomPhaseWidget
from custom_widgets import CustomRecordingWidget,CustomComponentSelectionWidget
from custom_widgets import CustomInteractiveParamWidget,CustomTestSetSelectionWidget
from custom_widgets import CustomWeightNoiseWidget,CustomActivationNoiseWidget
from custom_widgets import CustomInputNoiseWidget,CustomApplyIterationWidget
from editor_windows import DefaultsEditor
from multiproc import ScriptThread
import psutil
import sys
import pydot
# test pydot to find out if Graphviz is installed
if pydot.find_graphviz():
pass
else:
print 'Graphviz executables not found. "Visualize" feature will be disabled.'
from matplotlib import pyplot
from scipy import misc
import gen_utils as guts
import os
import dialogs
from time import time
class ScriptTab(QtGui.QWidget):
'''Creates a tab with tools for script-level editing.
The widget is arranged in two columns. Each column is arranged using
a vertical layout.
'''
def __init__(self,script):
super(ScriptTab, self).__init__()
self.script = script
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
#..............................................................
# LAYOUTS
# create main horizontal layout
h_layout = QtGui.QHBoxLayout()
# left and right column layouts
left_v_layout = QtGui.QVBoxLayout()
right_v_layout = QtGui.QVBoxLayout()
# sublayouts for timeline control, script property editing, start button
time_layout = QtGui.QGridLayout()
props_layout = QtGui.QFormLayout()
start_layout = QtGui.QHBoxLayout()
#..............................................................
# HIERARCHICAL TREE VIEW OBJECT (ENTIRE LEFT COLUMN)
# see custom_widgets module for CustomTreeWidget definitions
self.tree_view = CustomTreeWidget(self,script)
#..............................................................
# TIMELINE EDITING CONTROLS (ADD RUN/ITERATION, REMOVE, ETC...)
# create timeline editing buttons
self.add_run_btn = QtGui.QPushButton('Add Run')
self.add_iter_btn = QtGui.QPushButton('Add Iterator')
self.del_btn = QtGui.QPushButton('Remove Selected')
self.del_btn.setEnabled(False)
self.dup_btn = QtGui.QPushButton('Duplicate Selected')
self.dup_btn.setEnabled(False)
# create timeline editing group box
timeline_container = QtGui.QHBoxLayout() # to shift everything over
timeline = QtGui.QGroupBox('Edit Script Timeline')
timeline.setAlignment(QtCore.Qt.AlignHCenter)
time_layout.setSpacing(10)
time_layout.addWidget(self.add_run_btn,0,0,1,1)
time_layout.addWidget(self.add_iter_btn,1,0,1,1)
time_layout.addWidget(self.del_btn,0,1,1,1)
time_layout.addWidget(self.dup_btn,1,1,1,1)
timeline.setLayout(time_layout)
timeline_container.addWidget(timeline)
#timeline_container.addStretch(1)
# connect button signals
self.add_run_btn.clicked.connect(self.tree_view.newRun)
self.add_iter_btn.clicked.connect(self.tree_view.newIterator)
self.dup_btn.clicked.connect(self.tree_view.duplicateCurrentObject)
self.del_btn.clicked.connect(self.tree_view.removeCurrentObject)
#..............................................................
# SCRIPT PROPERTIES BOX
# create script properties panel
props = QtGui.QGroupBox('Script Properties')
props.setAlignment(QtCore.Qt.AlignHCenter)
script_name,script_box = self.script.getParameter('script_name').getWidget()
#defaults_btn = QtGui.QPushButton('Edit global parameter defaults')
#defaults_btn.clicked.connect(self.editDefaults)
props_layout.addRow(script_name,script_box)
#props_layout.addRow('',defaults_btn)
props.setLayout(props_layout)
# connect signals
script_box.editingFinished.connect(self.updateTabName)
#..............................................................
# START BUTTON
self.start_btn = StartButton(self)
self.start_btn.clicked.connect(self.scanScript)
#..............................................................
# putting it all together
self.setLayout(h_layout)
h_layout.addLayout(left_v_layout)
left_v_layout.addWidget(QtGui.QLabel('Script Timeline'))
left_v_layout.addWidget(self.tree_view)
h_layout.addLayout(right_v_layout)
right_v_layout.addLayout(timeline_container)
right_v_layout.addWidget(props)
right_v_layout.addLayout(start_layout)
start_layout.addStretch(1)
start_layout.addWidget(self.start_btn)
start_layout.addStretch(1)
right_v_layout.addStretch(1)
# initialize
self.tree_view.syncToModel()
def scanScript(self):
self.start_btn.setScanning()
self.scan_thread = dialogs.ScanningThread(self.script)
self.scan_thread.finished.connect(self.reportScriptIssues)
self.scan_thread.start()
def startScript(self):
self.start_btn.setInProgress()
# start script run
self.prog = dialogs.ProgressWindow(self.script.getGUI())
self.prog.show()
self.prog.raise_()
self.prog.activateWindow()
self.script_thread = ScriptThread(self.script)
self.script_thread.finished.connect(self.notifyScriptEnded)
# time the entire script
self.tic = time()
self.script_thread.start()
def abortScript(self):
early_abort = True
self.script_thread.quit()
# kill all processes
for proc in psutil.process_iter():
if 'mikenet_master' in proc.name:
proc.kill()
#if 'mikenet_master' in proc.name():
# print 'killed a process'
# proc.kill()
#try:
# print proc.name()
# if 'mikenet_master' in proc.name():
# proc.kill()
#except:
# print 'excepted process search'
@QtCore.Slot()
def reportScriptIssues(self):
if self.scan_thread.issues:
screener = dialogs.ScriptScreener(self.script.getGUI(),
self.scan_thread.issues)
screener.exec_()
self.start_btn.setFree()
else:
self.startScript()
@QtCore.Slot(int)
def updateCores(self,i):
self.prog.updateCores(i)
@QtCore.Slot(int,int)
def updateTotalProgress(self,complete,total):
toc = time()
self.prog.updateTotalProgress(complete,total,toc-self.tic)
@QtCore.Slot(int,int)
def updateSuccessRatio(self,good,total):
self.prog.updateSuccessRatio(good,total)
def notifyScriptEnded(self):
# gets activated after script runs and database is finished updating
toc = time()
self.prog.simulationOver(toc-self.tic)
self.script.getGUI().emailNotify(toc-self.tic)
self.start_btn.setFree()
def getNewRunNames(self):
return self.tree_view.getNewRunNames()
def getIteratorNames(self):
return self.tree_view.getIteratorNames()
def editDefaults(self):
ed = DefaultsEditor(self.script.getGUI(),self.script)
ed.exec_()
def refreshTabContents(self):
self.tree_view.syncToModel()
def getTabName(self):
return str('Script Object: ' + self.script.getValueOf('script_name'))
def updateTabName(self):
self.script.getGUI().getMainTabs().refreshTabNames()
def getLevel(self):
return 0
class StartButton(QtGui.QPushButton):
def __init__(self,parent):
super(StartButton, self).__init__('Start Script',parent=parent)
self.setStyleSheet('background-color: green;' +
'border-style: outset;' +
'border-width: 2px;' +
'border-radius: 10px;' +
'border-color: white;' +
'font: bold 14px;' +
'min-width: 10em;' +
'padding: 6px;')
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Maximum)
self.timer = QtCore.QTimer(self)
self.timer.start(10)
self.timer.timeout.connect(self.hover)
def sizeHint(self):
return QtCore.QSize(self.fontMetrics().width('xxxxxxxxxxxxxxxxxxxxx'),
3*self.fontMetrics().height())
def hover(self):
self.timer.start(10)
if self.underMouse() == True:
self.setStyleSheet('background-color: rgb(152,245,255);' +
'border-style: outset;' +
'border-width: 2px;' +
'border-radius: 10px;' +
'border-color: white;' +
'font: bold 14px;' +
'min-width: 10em;' +
'padding: 6px;')
else:
self.setStyleSheet('background-color: green;' +
'border-style: outset;' +
'border-width: 2px;' +
'border-radius: 10px;' +
'border-color: white;' +
'font: bold 14px;' +
'min-width: 10em;' +
'padding: 6px;')
def setScanning(self):
self.timer.stop()
self.setStyleSheet('background-color: orange;' +
'border-style: outset;' +
'border-width: 2px;' +
'border-radius: 10px;' +
'border-color: white;' +
'font: bold 14px;' +
'min-width: 10em;' +
'padding: 6px;')
self.setText('Checking for errors...')
self.setEnabled(False)
self.repaint()
def setInProgress(self):
self.timer.stop()
self.setStyleSheet('background-color: orange;' +
'border-style: outset;' +
'border-width: 2px;' +
'border-radius: 10px;' +
'border-color: white;' +
'font: bold 14px;' +
'min-width: 10em;' +
'padding: 6px;')
self.setText('In progress...')
self.setEnabled(False)
self.repaint()
def setFree(self):
self.timer.start(10)
self.setStyleSheet('background-color: green;' +
'border-style: outset;' +
'border-width: 2px;' +
'border-radius: 10px;' +
'border-color: white;' +
'font: bold 14px;' +
'min-width: 10em;' +
'padding: 6px;')
self.setText('Start Script')
self.setEnabled(True)
self.repaint()
class RunTab(QtGui.QWidget):
def __init__(self,run):
super(RunTab, self).__init__()
self.run = run
self.current_phase = 0
self.current_phase_item = None
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
#..............................................................
# LAYOUTS
# main layout
main_layout = QtGui.QVBoxLayout()
# properties layout
properties_layout = QtGui.QHBoxLayout()
# network design layouts: first vertical layout for everything
net_design_layout = QtGui.QVBoxLayout()
# 1) layout for group editing controls
ge_layout = QtGui.QFormLayout()
# 2) layout for wiring controls
wc_layout = QtGui.QHBoxLayout()
# 3) for group property controls in a single row
group_layout = QtGui.QHBoxLayout()
# parameter set layouts: first a main horizontal layout for everything
h_phase_layout = QtGui.QHBoxLayout()
# 1) layouts for phase editing controls
ph_edit_layout = QtGui.QVBoxLayout()
# 2) layout for parameter set (training and/or testing) editing controls
self.pset_layout = QtGui.QVBoxLayout()
#..............................................................
# TAB WIDGET
# create tab widget that divides this larger tab
self.tab_divider = QtGui.QTabWidget()
# create blank widgets for each subsection
properties_widget = QtGui.QWidget(self)
properties_widget.setLayout(properties_layout)
net_design_widget = QtGui.QWidget(self)
net_design_widget.setLayout(net_design_layout)
parameter_sets_widget = QtGui.QWidget(self)
parameter_sets_widget.setLayout(h_phase_layout)
self.tab_divider.addTab(properties_widget,'Run Properties')
self.tab_divider.addTab(net_design_widget,'Groups and Connections')
self.tab_divider.addTab(parameter_sets_widget,'Events')
#..............................................................
# WIRING WIDGET
self.wiring = CustomWiringWidget(self,run)
# create wiring controls
self.add_group_btn = QtGui.QPushButton('Add Group')
self.delete_group_btn = QtGui.QPushButton('Delete Group')
self.delete_group_btn.setEnabled(False)
self.visualize_btn = QtGui.QPushButton('Visualize')
if pydot.find_graphviz():
pass
else:
self.visualize_btn.setEnabled(False)
self.wire_helper = QtGui.QLabel("Click a group name to select group. Click a color cell to toggle connection.")
wc_layout.addWidget(QtGui.QLabel('Network Adjacency Matrix:'))
wc_layout.addWidget(self.add_group_btn)
wc_layout.addWidget(self.delete_group_btn)
wc_layout.addWidget(self.visualize_btn)
wc_layout.addStretch(1)
wc_layout.addWidget(self.wire_helper)
# connect signals
self.add_group_btn.clicked.connect(self.wiring.newGroup)
self.delete_group_btn.clicked.connect(self.wiring.deleteGroup)
self.visualize_btn.clicked.connect(self.visualizeNet)
#..............................................................
# GROUP DATA CONTROLS
self.group_edit = QtGui.QGroupBox('Group Properties')
self.group_edit.setAlignment(QtCore.Qt.AlignHCenter)
self.group_name = QtGui.QLineEdit()
self.group_name.setEnabled(False)
self.group_units = QtGui.QSpinBox()
self.group_units.setMinimum(0)
self.group_units.setMaximum(10000)
self.group_units.setEnabled(False)
self.group_activation_type = QtGui.QComboBox()
self.group_activation_type.addItem('LOGISTIC_ACTIVATION')
self.group_activation_type.addItem('TANH_ACTIVATION')
self.group_activation_type.setEnabled(False)
self.group_error_type = QtGui.QComboBox()
self.group_error_type.addItem('SUM_SQUARED_ERROR')
self.group_error_type.addItem('CROSS_ENTROPY_ERROR')
self.group_error_type.setEnabled(False)
ge_layout.addRow('Name',self.group_name)
ge_layout.addRow('Units',self.group_units)
ge_layout.addRow('Activation type',self.group_activation_type)
ge_layout.addRow('Error type',self.group_error_type)
self.group_edit.setLayout(ge_layout)
# connect signals
self.group_name.editingFinished.connect(lambda:
self.wiring.setGroupProperty('name',
self.group_name.text()))
self.group_units.valueChanged.connect(lambda:
self.wiring.setGroupProperty('units',
self.group_units.value()))
self.group_activation_type.currentIndexChanged.connect(lambda i:
self.wiring.setGroupProperty('activation_type',
self.group_activation_type.itemText(i)))
self.group_error_type.currentIndexChanged.connect(lambda i:
self.wiring.setGroupProperty('error_computation_type',
self.group_error_type.itemText(i)))
#..............................................................
# RUN-LEVEL DATA
# create run-level data controls
self.run_form = QtGui.QFormLayout()
name_lab,self.name_box = self.run.getParameter('run_name').getWidget()
seed_lab,self.seed_box = self.run.getParameter('seed').getWidget()
ticks_lab,self.ticks_box = self.run.getParameter('ticks').getWidget()
range_lab,self.range_box = self.run.getParameter('weight_range').getWidget()
bias_lab,self.bias_box = self.run.getParameter('bias_value').getWidget()
self.run_form.addRow(name_lab,self.name_box)
self.run_form.addRow(seed_lab,self.seed_box)
self.run_form.addRow(ticks_lab,self.ticks_box)
self.run_form.addRow(range_lab,self.range_box)
self.run_form.addRow(bias_lab,self.bias_box)
# connect signals
# NOTE: no need to send signlas to update data changes...widgets are tied to the
# model data automatically...in this case you only have to update the tab label
self.name_box.editingFinished.connect(self.run.getGUI().getMainTabs().refreshTabNames)
#..............................................................
# PHASE-LEVEL DATA
phases = QtGui.QGroupBox('Interleaving and multi-phase control')
phases.setAlignment(QtCore.Qt.AlignHCenter)
self.phase_table = CustomPhaseWidget(self,run)
self.add_phase_btn = QtGui.QPushButton('Add Phase')
self.dup_phase_btn = QtGui.QPushButton('Duplicate Selected')
self.delete_phase_btn = QtGui.QPushButton('Remove Selected')
ph_btn_layout = QtGui.QHBoxLayout()
ph_btn_layout.addWidget(self.add_phase_btn)
ph_btn_layout.addWidget(self.dup_phase_btn)
ph_btn_layout.addWidget(self.delete_phase_btn)
ph_what_btn = QtGui.QLabel('<qt><a href="http://dummytext.com/">What is This?</a></qt>')
ph_what_btn.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction)
ph_what_btn.linkActivated.connect(self.phaseWhat)
ph_edit_layout.addLayout(ph_btn_layout)
ph_edit_layout.addWidget(QtGui.QLabel('Double-click name to edit.'))
ph_edit_layout.addWidget(self.phase_table)
ph_edit_layout.addWidget(ph_what_btn)
phases.setLayout(ph_edit_layout)
# connect signals
self.add_phase_btn.clicked.connect(self.phase_table.addPhase)
self.dup_phase_btn.clicked.connect(self.phase_table.duplicatePhase)
self.delete_phase_btn.clicked.connect(self.phase_table.deletePhase)
#..............................................................
# PHASE ITEM DATA
self.pset = QtGui.QGroupBox('Phase Timeline')
self.pset.setAlignment(QtCore.Qt.AlignHCenter)
self.new_phase_item_btn = QtGui.QPushButton('Add Train/Test Event')
self.edit_phase_item_btn = QtGui.QPushButton('Edit Selected')
self.edit_phase_item_btn.setEnabled(False)
self.delete_phase_item_btn = QtGui.QPushButton('Remove Selected')
self.delete_phase_item_btn.setEnabled(False)
self.dup_phase_item_btn = QtGui.QPushButton('Duplicate Selected')
self.dup_phase_item_btn.setEnabled(False)
pset_btn_layout1 = QtGui.QHBoxLayout()
pset_btn_layout1.addWidget(self.new_phase_item_btn)
pset_btn_layout1.addWidget(self.delete_phase_item_btn)
pset_btn_layout2 = QtGui.QHBoxLayout()
pset_btn_layout2.addWidget(self.edit_phase_item_btn)
pset_btn_layout2.addWidget(self.dup_phase_item_btn)
self.pset_layout.addLayout(pset_btn_layout1)
self.pset_layout.addLayout(pset_btn_layout2)
self.pset.setLayout(self.pset_layout)
# connect signals
self.new_phase_item_btn.clicked.connect(self.newPhaseItem)
self.edit_phase_item_btn.clicked.connect(self.editPhaseItem)
self.delete_phase_item_btn.clicked.connect(self.deletePhaseItem)
self.dup_phase_item_btn.clicked.connect(self.duplicatePhaseItem)
#..............................................................
# putting it all together
self.setLayout(main_layout)
main_layout.addWidget(self.tab_divider)
h_phase_layout.addWidget(phases)
arrow_label = QtGui.QLabel(self)
arrow_label.setPixmap(QtGui.QPixmap(os.path.join(os.getcwd(),'resources',
'images','right_arrow.png')))
h_phase_layout.addWidget(arrow_label)
h_phase_layout.addWidget(self.pset)
net_design_layout.addLayout(wc_layout)
net_design_layout.addWidget(self.wiring)
net_design_layout.addLayout(group_layout)
group_layout.addStretch(1)
group_layout.addWidget(self.group_edit)
group_layout.addStretch(1)
properties_layout.addStretch(1)
properties_layout.addLayout(self.run_form)
properties_layout.addStretch(1)
def refreshTabContents(self):
self.phase_table.syncToRun()
self.refreshChildPhaseWidgets()
self.wiring.syncToRun()
def refreshChildPhaseWidgets(self):
for phase in self.run.getChildren():
if phase.getWidget():
phase.getWidget().syncToPhase()
def getTabName(self):
return str('Run Object: ' + self.run.getValueOf('run_name'))
def updateTabName(self):
self.run.getGUI().getMainTabs().refreshTabNames()
def setHelperText(self,text):
self.wire_helper.setText(text)
def phaseWhat(self,URL):
ph_instructions = '''A phase provides a way to group together training and test sets.
You can define the phase order as sequential (default) or
probabilistic (to interleave training sets non-deterministically).
Use multiple phases ONLY if you want to interleave different sets
of parameters (ie. sets with different example files).'''
dialogs.showInfo(self,ph_instructions)
def registerPhaseWidget(self,w):
self.pset_layout.insertWidget(self.pset_layout.count()-1,w)
def updateGroupInfo(self,index):
if index == None:
self.group_name.setText('')
self.group_name.setEnabled(False)
self.group_units.setValue(0)
self.group_units.setEnabled(False)
self.group_activation_type.setEnabled(False)
self.group_error_type.setEnabled(False)
self.delete_group_btn.setEnabled(False)
else:
group = self.run.getGroups()[index]
self.group_name.setText(group['name'])
self.group_name.setEnabled(True)
self.group_units.setValue(group['units'])
self.group_units.setEnabled(True)
activation = group['activation_type']
actIndex = self.group_activation_type.findText(activation)
self.group_activation_type.setCurrentIndex(actIndex)
self.group_activation_type.setEnabled(True)
error = group['error_computation_type']
errorIndex = self.group_error_type.findText(error)
self.group_error_type.setCurrentIndex(errorIndex)
self.group_error_type.setEnabled(True)
self.delete_group_btn.setEnabled(True)
def updatePhaseInfo(self,index,phase_name):
self.current_phase = index
# rename the group box holding the phase timeline
self.pset.setTitle(phase_name + ' timeline')
for j,sibling in enumerate(self.run.getChildren()):
if j == index:
sibling.showWidget()
else:
sibling.hideWidget()
def updatePhaseItemInfo(self,index):
if index is not None:
self.current_phase_item = index
self.edit_phase_item_btn.setEnabled(True)
self.dup_phase_item_btn.setEnabled(True)
self.delete_phase_item_btn.setEnabled(True)
else:
self.current_phase_item = None
self.edit_phase_item_btn.setEnabled(False)
self.dup_phase_item_btn.setEnabled(False)
self.delete_phase_item_btn.setEnabled(False)
def newPhaseItem(self):
phase = self.run.getChildren()[self.current_phase]
phase.newPhaseItem() # will update its own widget
def editPhaseItem(self):
phase = self.run.getChildren()[self.current_phase]
self.run.getGUI().getMainTabs().requestTab(phase.getChildren()[self.current_phase_item])
self.run.getGUI().getMainTabs().switchCurrentTab(phase.getChildren()[self.current_phase_item])
def duplicatePhaseItem(self):
if not self.current_phase_item is None:
phase = self.run.getChildren()[self.current_phase]
current_item = phase.getChildren()[self.current_phase_item]
the_copy = current_item.getCopy()
the_copy.getParameter('item_name').value = str(current_item.getValueOf('item_name') +
' - COPY')
the_copy.parent = phase
phase.getChildren().append(the_copy)
the_copy.createTab()
self.run.getGUI().updateAllTabs()
def deletePhaseItem(self):
phase = self.run.getChildren()[self.current_phase]
# remove from the main tab view
self.run.getGUI().getMainTabs().removeTabByObject(phase.getChildren()[self.current_phase_item])
# unregister tab from GUI
pitem = phase.getChildren().pop(self.current_phase_item)
self.run.getGUI().unRegisterTabbedObject(pitem)
# disable buttons that require a selected object
self.updatePhaseItemInfo(None)
# also you have to manually change current item in the phase item widget
phase.getWidget().current_item = None
# remove references in any parent iterators to this phase "event"
traceNode = phase.parent.parent
while True:
if traceNode.getClassName() == 'MikenetIterator':
if traceNode.getAppliedPaths() != 'ALL':
newPaths = [x for x in traceNode.getAppliedPaths()
if x.split(':')[1] != pitem.getValueOf('item_name')]
traceNode.setAppliedPaths(newPaths)
traceNode = traceNode.parent
else:
break
# update display to match underlying data
self.run.getGUI().updateAllTabs()
def visualizeNet(self):
tmp_fn = str(guts.getRandomString(8) + '.png')
graph = pydot.Dot(graph_type='digraph')
nodes = []
for g in self.run.getGroups():
nodes.append(pydot.Node(g['name']))
graph.add_node(nodes[-1])
matrix = self.run.getMatrix()
for i in range(len(self.run.getGroups())):
for j in range(len(self.run.getGroups())):
if matrix[i][j] == 1:
graph.add_edge(pydot.Edge(nodes[i],nodes[j]))
graph.write_png(tmp_fn)
img = misc.imread(tmp_fn)
pyplot.figure('MikeNetGUI - Network Visualization')
pyplot.imshow(img)
pyplot.axis('off')
pyplot.title(self.run.getValueOf('run_name'))
pyplot.show()
os.remove(tmp_fn)
def getLevel(self):
return 1
class IteratorTab(QtGui.QWidget):
def __init__(self,iterator):
super(IteratorTab, self).__init__()
self.iterator = iterator
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
# null initialize special objects for later
self.rand_int = None
self.rand_dbl = None
self.rand_gauss = None
self.rand_1_lab = None
self.rand_1 = None
self.rand_2_lab = None
self.rand_2 = None
#..............................................................
# LAYOUTS
# main layout
main_layout = QtGui.QVBoxLayout()
form_layout = QtGui.QFormLayout()
bottom_layout = QtGui.QHBoxLayout()
name_layout = QtGui.QFormLayout()
#..............................................................
# CONTROLS
# first, name which does not depend on this iterator's child
name_lab,self.name_box = self.iterator.getParameter('iterator_name').getWidget()
name_layout.addRow(name_lab,self.name_box)
# connect signal
self.name_box.editingFinished.connect(self.iterator.getGUI().getMainTabs().refreshTabNames)
# then there is a form which holds parameters which DO depend on the child
# create form groupBox.
self.form_box = QtGui.QGroupBox()
self.form_box.setAlignment(QtCore.Qt.AlignHCenter)
self.form_holder_layout = QtGui.QVBoxLayout()
self.form_box.setLayout(self.form_holder_layout)
self.form_holder = None
self.refreshForm()
# actual form will go inside the box, added to the form holder layout
# form holder is a widget
# each time new widgets come into play, the old form holder is closed and a new one
# is added to the form holder layout
# in case this iterator has no associated run, display message
self.no_run_msg = QtGui.QLabel('No run has been associated with this yet.\n' +
'In the script timeline, click a run \n' +
'and drag it into this iterator object.')
self.no_run_msg.setAlignment(QtCore.Qt.AlignHCenter)
#..............................................................
# putting it all together
self.setLayout(main_layout)
main_layout.addLayout(name_layout)
main_layout.addWidget(self.no_run_msg)
main_layout.addLayout(bottom_layout)
bottom_layout.addStretch(1)
bottom_layout.addWidget(self.form_box)
bottom_layout.addStretch(1)
main_layout.addStretch(1)
def setRun(self,run):
if run:
self.form_box.show()
self.no_run_msg.hide()
self.refreshForm()
else:
self.form_box.hide()
self.no_run_msg.show()
def refreshForm(self):
if self.form_holder:
self.form_holder.close()
self.form_holder = QtGui.QWidget()
form_layout = QtGui.QFormLayout()
vary_lab,vary_menu_box = self.iterator.getParameter('varying').getWidget()
form_layout.addRow(vary_lab,vary_menu_box)
repeat_lab,repeat_box = self.iterator.getParameter('repeat').getWidget()
form_layout.addRow(repeat_lab,repeat_box)
# for group hidden units, create a new widget
# to capture group name
if self.iterator.varying_parameter:
if self.iterator.varying_parameter.variable_name == 'hidden_units':
hu_lab,hu_box = self.iterator.varying_parameter.getWidget()
form_layout.addRow(hu_lab,hu_box)
# make a box asking if a random value should be used
random_box = QtGui.QCheckBox()
form_layout.addRow('Use random values?',random_box)
if self.iterator.getRandomFlag():
random_box.setCheckState(QtCore.Qt.Checked)
# random value boxes built here
self.rand_int = QtGui.QRadioButton('Random int',self)
if (self.iterator.getRandomFlag()[0] == 'int'
or self.iterator.varying_parameter.widget_type in ['int_spinbox','text_field']):
self.rand_int.setChecked(True)
self.rand_1_lab = QtGui.QLabel('min')
self.rand_1 = QtGui.QSpinBox()
self.rand_1.setMinimum(0)
self.rand_1.setMaximum(10000000)
self.rand_1.setValue(self.iterator.getRandomFlag()[1])
self.rand_2_lab = QtGui.QLabel('max')
self.rand_2 = QtGui.QSpinBox()
self.rand_2.setMinimum(0)
self.rand_2.setMaximum(10000000)
self.rand_2.setValue(self.iterator.getRandomFlag()[2])
form_layout.addRow('',self.rand_int)
if self.iterator.varying_parameter.widget_type == 'dbl_spinbox':
self.rand_dbl = QtGui.QRadioButton('Random double',self)
if self.iterator.getRandomFlag()[0] == 'double':
self.rand_dbl.setChecked(True)
self.rand_1_lab = QtGui.QLabel('min')
self.rand_1 = QtGui.QDoubleSpinBox()
self.rand_1.setMinimum(0)
self.rand_1.setMaximum(10000000)
if self.iterator.varying_parameter.decimals:
self.rand_1.setDecimals(self.iterator.varying_parameter.decimals)
else:
self.rand_1.setDecimals(3)
self.rand_1.setValue(self.iterator.getRandomFlag()[1])
self.rand_2_lab = QtGui.QLabel('max')
self.rand_2 = QtGui.QDoubleSpinBox()
self.rand_2.setMinimum(0)
self.rand_2.setMaximum(10000000)
if self.iterator.varying_parameter.decimals:
self.rand_2.setDecimals(self.iterator.varying_parameter.decimals)
else:
self.rand_2.setDecimals(3)
self.rand_2.setValue(self.iterator.getRandomFlag()[2])
self.rand_gauss = QtGui.QRadioButton('Random gaussian',self)
if self.iterator.getRandomFlag()[0] == 'gaussian':
self.rand_gauss.setChecked(True)
self.rand_1_lab = QtGui.QLabel('mu')
self.rand_1 = QtGui.QDoubleSpinBox()
self.rand_1.setMinimum(0)
self.rand_1.setMaximum(10000000)
if self.iterator.varying_parameter.decimals:
self.rand_1.setDecimals(self.iterator.varying_parameter.decimals)
else:
self.rand_1.setDecimals(3)
self.rand_1.setValue(self.iterator.getRandomFlag()[1])
self.rand_2_lab = QtGui.QLabel('sigma')
self.rand_2 = QtGui.QDoubleSpinBox()
self.rand_2.setMinimum(0)
self.rand_2.setMaximum(10000000)
if self.iterator.varying_parameter.decimals:
self.rand_2.setDecimals(self.iterator.varying_parameter.decimals)
else:
self.rand_2.setDecimals(3)
self.rand_2.setValue(self.iterator.getRandomFlag()[2])
form_layout.addRow('',self.rand_dbl)
form_layout.addRow('',self.rand_gauss)
# connect toggle signals after they are all initialized
self.rand_dbl.toggled.connect(self.toggledRandomRadios)
self.rand_gauss.toggled.connect(self.toggledRandomRadios)
self.rand_int.toggled.connect(self.toggledRandomRadios)
else:
self.rand_dbl = None
self.rand_gauss = None
self.rand_int.setChecked(True) # don't connect signal
self.rand_int.setEnabled(False)
form_layout.addRow(self.rand_1_lab,self.rand_1)
form_layout.addRow(self.rand_2_lab,self.rand_2)
# special signals
self.rand_1.valueChanged.connect(lambda: self.setSpecialRandValue(1,
self.rand_1.value()))
self.rand_2.valueChanged.connect(lambda: self.setSpecialRandValue(2,
self.rand_2.value()))
else:
random_box.setCheckState(QtCore.Qt.Unchecked)
init_lab,init_box = self.iterator.getParameter('initial_value').getWidget()
delta_lab,delta_box = self.iterator.getParameter('delta').getWidget()
form_layout.addRow(init_lab,init_box)
form_layout.addRow(delta_lab,delta_box)
random_box.stateChanged.connect(self.toggledRandom)
# create widget to let user apply iteration to specific phase events
self.apply_all = QtGui.QCheckBox('all events')
if self.iterator.varying_parameter.variable_name in ['seed','bias_value',\
'weight_range','hidden_units']:
self.iterator.setAppliedPaths('ALL')
self.apply_all.setEnabled(False)
if self.iterator.getAppliedPaths() == 'ALL':
self.apply_all.setCheckState(QtCore.Qt.Checked)
else:
self.apply_all.setCheckState(QtCore.Qt.Unchecked)
form_layout.addRow('Apply to',self.apply_all)
self.apply_all.stateChanged.connect(self.toggleApplyAll)
self.apply_box = CustomApplyIterationWidget(self.iterator)
if self.iterator.getAppliedPaths() != 'ALL':
self.apply_box.show()
else:
self.apply_box.hide()
form_layout.addRow('',self.apply_box)
# connect signal from varying box, because other widgets depend on the type
# of parameter selected there
vary_menu_box.currentIndexChanged.connect(lambda i: self.varyingChanged(i))
# and connect signal from applied paths checkbox
self.form_holder.setLayout(form_layout)
self.form_holder_layout.addWidget(self.form_holder)
self.form_holder.show()
def toggleApplyAll(self,state):
if state == QtCore.Qt.Checked:
self.apply_box.hide()
self.iterator.setAppliedPaths('ALL')
else:
self.iterator.setAppliedPaths([])
self.apply_box.updateLines()
self.apply_box.show()
def toggledRandom(self,i):
if i == QtCore.Qt.Checked:
self.iterator.setRandomFlag(['int',0,0])
else:
self.iterator.setRandomFlag(None)
self.refreshForm()
def toggledRandomRadios(self,b):
if self.rand_int.isChecked():
if self.iterator.getRandomFlag():
self.iterator.getRandomFlag()[0] = 'int'
else:
self.iterator.setRandomFlag(['int',0,0])
if self.rand_dbl:
if self.rand_dbl.isChecked():
if self.iterator.getRandomFlag():
self.iterator.getRandomFlag()[0] = 'double'
else:
self.iterator.setRandomFlag(['double',0,0])
elif self.rand_gauss.isChecked():
if self.iterator.getRandomFlag():
self.iterator.getRandomFlag()[0] = 'gaussian'
else:
self.iterator.setRandomFlag(['gaussian',0,0])
self.refreshForm()
def setSpecialRandValue(self,i,value):
self.iterator.getRandomFlag()[i] = value
def varyingChanged(self,i):
self.iterator.updateDependentFields(i)
self.iterator.getGUI().getScript().getTabWidget().refreshTabContents()
def refreshTabContents(self):
self.iterator.syncToRun()
def getTabName(self):
return str('Iterator Object: ' + self.iterator.getValueOf('iterator_name'))
def updateTabName(self):
self.iterator.getGUI().getMainTabs().refreshTabNames()
def getLevel(self):
return 1
class PhaseItemTab(QtGui.QWidget):
def __init__(self,phase_item):
super(PhaseItemTab, self).__init__()
self.phase_item = phase_item
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
#..............................................................
# LAYOUTS
# main layout
main_layout = QtGui.QVBoxLayout()
# properties layout (on its own tab)
properties_layout = QtGui.QHBoxLayout()
# network component layout (on its own tab)
components_layout = QtGui.QVBoxLayout()
# recording layout (on its own tab)
recording_layout = QtGui.QVBoxLayout()
# noise layout (on its own tab)
noise_layout = QtGui.QHBoxLayout()
#..............................................................
# TAB WIDGET
# create tab widget that divides this larger tab
self.tab_divider = QtGui.QTabWidget()
# create blank widgets for each subsection
properties_widget = QtGui.QWidget(self)
properties_widget.setLayout(properties_layout)
components_widget = QtGui.QWidget(self)
components_widget.setLayout(components_layout)
recording_widget = QtGui.QWidget(self)
recording_widget.setLayout(recording_layout)
noise_widget = QtGui.QWidget(self)
noise_widget.setLayout(noise_layout)
self.tab_divider.addTab(properties_widget,'Event Properties')
self.tab_divider.addTab(components_widget,'Isolate Network Components')
self.tab_divider.addTab(recording_widget,'Setup Activation Recording')
self.tab_divider.addTab(noise_widget,'Noise Control')
#..............................................................
# PHASE ITEM PROPERTIES
# create controls
self.item_form = QtGui.QFormLayout()
name_lab,self.name_box = self.phase_item.getParameter('item_name').getWidget()
prob_lab,self.prob_box = self.phase_item.getParameter('probability').getWidget()
self.item_form.addRow(name_lab,self.name_box)
self.item_form.addRow(prob_lab,self.prob_box)
prob_comment = QtGui.QLabel(self.phase_item.getParameter('probability').comment)
prob_comment.font().setItalic(True)
self.item_form.addRow(QtGui.QLabel(''),prob_comment)
# the following controls are not part of the parameters dict of this phase_item
# so they need to be manually set up
# test sets
self.test_box = CustomTestSetSelectionWidget(self.phase_item)
self.item_form.addRow('Link to test set(s)',self.test_box)
# mode
mode_box = QtGui.QGroupBox()
mode_layout = QtGui.QHBoxLayout()
self.mode_train_btn = QtGui.QRadioButton('Train')
self.mode_test_btn = QtGui.QRadioButton('Test')
if self.phase_item.getMode() == 'TRAIN':
self.mode_train_btn.setChecked(True)
else:
self.mode_test_btn.setChecked(True)
mode_layout.addWidget(self.mode_train_btn)
mode_layout.addWidget(self.mode_test_btn)
mode_box.setLayout(mode_layout)
self.item_form.addRow('Mode',mode_box)
self.profile_box = QtGui.QComboBox()
self.profile_box_lab = QtGui.QLabel('Link to training set')
self.item_form.addRow(self.profile_box_lab,self.profile_box)
self.profile_name = QtGui.QLabel()
self.profile_name.setText(self.phase_item.getProfile())
self.profile_name_lab = QtGui.QLabel('Set')
self.item_form.addRow(self.profile_name_lab,self.profile_name)
self.override_box = QtGui.QGroupBox('Parameter Overrides')
self.override_layout = QtGui.QVBoxLayout()
self.override_box.setAlignment(QtCore.Qt.AlignHCenter)
self.override_box.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
self.override_box.setLayout(self.override_layout)
self.override_layout.addWidget(QtGui.QLabel('Click any value to override that parameter.\n' +
'Parameters with overrides are highlighted in yellow. ' +
'To undo, click the value again.'))
self.parameter_view = None
self.override_box.hide()
if self.phase_item.getMode() == 'TEST':
self.profile_name.hide()
self.profile_box.hide()
self.profile_name_lab.hide()
self.profile_box_lab.hide()
# connect signals
# NOTE: no need to send signlas to update data changes...widgets are tied to the
# model data automatically...in this case you only have to update the tab label
self.name_box.editingFinished.connect(self.phase_item.getGUI().getMainTabs().refreshTabNames)
self.mode_train_btn.toggled.connect(self.modeChanged)
self.profile_box.currentIndexChanged.connect(self.profileChanged)
#..............................................................
# NET COMPONENTS
self.components = CustomComponentSelectionWidget(self,self.phase_item)
wc_layout = QtGui.QHBoxLayout()
self.visualize_btn = QtGui.QPushButton('Visualize')
if pydot.find_graphviz():
pass
else:
self.visualize_btn.setEnabled(False)
self.wire_helper = QtGui.QLabel("Click a group name to select group. Click a color cell to toggle connection.")
# connect signals
self.visualize_btn.clicked.connect(self.visualizeNet)
#..............................................................
# RECORDING SETUP
self.recording = CustomRecordingWidget(self,self.phase_item)
#..............................................................
# NOISE CONTROLS
# weight noise
w_n_layout = QtGui.QVBoxLayout()
w_n_box = QtGui.QGroupBox('Weight Noise')
w_n_box.setAlignment(QtCore.Qt.AlignHCenter)
w_n_box.setLayout(w_n_layout)
self.weight_noise = CustomWeightNoiseWidget(self.phase_item)
w_n_layout.addWidget(self.weight_noise)
# input noise
i_n_layout = QtGui.QVBoxLayout()
i_n_box = QtGui.QGroupBox('Input Noise')
i_n_box.setAlignment(QtCore.Qt.AlignHCenter)
i_n_box.setLayout(i_n_layout)
self.input_noise = CustomInputNoiseWidget(self.phase_item)
i_n_layout.addWidget(self.input_noise)
# activation noise
a_n_layout = QtGui.QVBoxLayout()
a_n_box = QtGui.QGroupBox('Activation Noise')
a_n_box.setAlignment(QtCore.Qt.AlignHCenter)
a_n_box.setLayout(a_n_layout)
self.activation_noise = CustomActivationNoiseWidget(self.phase_item)
a_n_layout.addWidget(self.activation_noise)
#..............................................................
# putting it all together
self.setLayout(main_layout)
main_layout.addWidget(self.tab_divider)
properties_layout.addStretch(1)
properties_layout.addLayout(self.item_form)
properties_layout.addWidget(self.override_box)
properties_layout.addStretch(1)
components_layout.addLayout(wc_layout)
wc_layout.addWidget(self.visualize_btn)
wc_layout.addStretch(1)
wc_layout.addWidget(self.wire_helper)
components_layout.addWidget(self.components)
recording_layout.addWidget(self.recording)
noise_layout.addWidget(w_n_box)
noise_layout.addWidget(i_n_box)
noise_layout.addWidget(a_n_box)
def modeChanged(self,b):
self.phase_item.setMode(b)
if b:
self.profile_box.show()
self.profile_name.show()
self.profile_box_lab.show()
self.profile_name_lab.show()
self.refreshTrainingProfile()
# this should update the parameter view and everything else automatically
else:
# hide training stuff
self.override_box.hide()
self.profile_box.hide()
self.profile_name.hide()
self.profile_box_lab.hide()
self.profile_name_lab.hide()
self.refreshTrainingProfile()
def profileChanged(self,i):
name = self.profile_box.itemText(i)
self.profile_name.setText(name)
self.phase_item.setProfile(name)
self.refreshParameterView()
def refreshParameterView(self):
if self.parameter_view:
try:
self.parameter_view.close()
except:
# sometimes it complains that the C++ object has already been deleted
pass
if self.phase_item.getMode() == 'TEST':
self.override_box.hide()
return
profile = self.phase_item.getGUI().getScript().getProfileByName(self.profile_name.text())
if not profile:
self.override_box.hide()
return
# update parameter view...
# profile SHOULD be a training profile, but if for some reason a test profile
# has the same name, it will screw it up...catch that here
if profile.getClassName() != 'MikenetTrainingProfile':
self.override_box.hide()
return
self.override_box.show()
self.parameter_view = QtGui.QTabWidget()
self.parameter_view.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.override_layout.addWidget(self.parameter_view)
for category in profile.getCategories():
cat_v_layout = QtGui.QVBoxLayout()
cat_tab = QtGui.QWidget(self)
cat_params = [x for x in profile.getAllParameters()
if x.category == category]
cat_v_layout.addWidget(CustomInteractiveParamWidget(cat_params,
self.phase_item))
cat_v_layout.addStretch(1)
cat_tab.setLayout(cat_v_layout)
self.parameter_view.addTab(cat_tab,category)
def refreshTrainingProfile(self):
train_profiles = self.phase_item.getGUI().getScript().getTrainingProfiles()
old_name = self.phase_item.getProfile()
self.profile_box.clear()
old_deleted = True
for prof in train_profiles.getChildren():
self.profile_box.addItem(prof.getValueOf('profile_name'))
if prof.getValueOf('profile_name') == old_name:
old_deleted = False
if not old_deleted:
self.profile_box.setCurrentIndex(self.profile_box.findText(old_name))
def visualizeNet(self):
run = self.phase_item.parent.parent
tmp_fn = str(guts.getRandomString(8) + '.png')
graph = pydot.Dot(graph_type='digraph')
nodes = []
for g in run.getGroups():
if g['name'] in self.phase_item.getComponentGroups():
nodes.append(pydot.Node(g['name'],style="filled",fillcolor="green"))
else:
nodes.append(pydot.Node(g['name']))
graph.add_node(nodes[-1])
matrix = self.phase_item.getComponentMatrix()
for i in range(len(matrix)):
for j in range(len(run.getGroups())):
if matrix[i][j] == 1:
graph.add_edge(pydot.Edge(nodes[i],nodes[j]))
elif matrix[i][j] == 2:
graph.add_edge(pydot.Edge(nodes[i],nodes[j],color="green"))
graph.write_png(tmp_fn)
img = misc.imread(tmp_fn)
pyplot.figure('MikeNetGUI - Network Visualization')
pyplot.imshow(img)
pyplot.axis('off')
pyplot.title(run.getValueOf('run_name'))
pyplot.show()
os.remove(tmp_fn)
def refreshTabContents(self):
self.components.syncToRun()
self.recording.syncToRun()
self.updateNoiseControls()
self.refreshTrainingProfile()
self.test_box.syncToPhaseItem()
def updateNoiseControls(self):
self.weight_noise.syncToPhaseItem()
self.input_noise.syncToPhaseItem()
self.activation_noise.syncToPhaseItem()
def setHelperText(self,text):
self.wire_helper.setText(text)
def getTabName(self):
return str('Event: ' + self.phase_item.getValueOf('item_name'))
def updateTabName(self):
self.run.getGUI().getMainTabs().refreshTabNames()
def getLevel(self):
return 2
class ProfilesTab(QtGui.QWidget):
def __init__(self,script):
super(ProfilesTab, self).__init__()
# layout
main_layout = QtGui.QHBoxLayout()
self.train = TrainingProfilesTab(script)
self.test = TestProfilesTab(script)
# training box
train_layout = QtGui.QVBoxLayout()
train_box = QtGui.QGroupBox('Training Sets')
train_box.setAlignment(QtCore.Qt.AlignHCenter)
train_box.setLayout(train_layout)
train_layout.addWidget(self.train)
# test box
test_layout = QtGui.QVBoxLayout()
test_box = QtGui.QGroupBox('Test Sets')
test_box.setAlignment(QtCore.Qt.AlignHCenter)
test_box.setLayout(test_layout)
test_layout.addWidget(self.test)
# putting it all together
main_layout.addWidget(train_box)
main_layout.addWidget(test_box)
self.setLayout(main_layout)
def refreshTabContents(self):
self.train.refreshTabContents()
self.test.refreshTabContents()
class TrainingProfilesTab(QtGui.QWidget):
def __init__(self,script):
super(TrainingProfilesTab, self).__init__()
self.script = script
self.current_profile = None
#..............................................................
# LAYOUTS
# create main horizontal layout
main_layout = QtGui.QVBoxLayout()
# left and right column layouts
top_layout = QtGui.QVBoxLayout()
bottom_layout = QtGui.QHBoxLayout()
# control buttons
control_layout = QtGui.QGridLayout()
#..............................................................
# LIST VIEW OBJECT (ENTIRE LEFT COLUMN)
# see custom_widgets module for CustomListWidget definitions
self.list_view = CustomListWidget(self,script.getTrainingProfiles())
#..............................................................
# EDITING CONTROLS (ADD PROFILE, REMOVE, ETC...)
# create editing buttons
self.new_btn = QtGui.QPushButton('New Training Set')
self.del_btn = QtGui.QPushButton('Remove Set')
self.del_btn.setEnabled(False)
self.edit_btn = QtGui.QPushButton('Edit Selected')
self.edit_btn.setEnabled(False)
self.dup_btn = QtGui.QPushButton('Duplicate Selected')
self.dup_btn.setEnabled(False)
# create timeline editing group box
control = QtGui.QGroupBox()
control_layout.setSpacing(10)
control_layout.addWidget(self.new_btn,0,0,1,1)
control_layout.addWidget(self.del_btn,0,1,1,1)
control_layout.addWidget(self.edit_btn,1,0,1,1)
control_layout.addWidget(self.dup_btn,1,1,1,1)
control.setLayout(control_layout)
# connect button signals
self.new_btn.clicked.connect(self.newProfile)
self.edit_btn.clicked.connect(self.editProfile)
self.dup_btn.clicked.connect(self.duplicateProfile)
self.del_btn.clicked.connect(self.removeProfile)
#..............................................................
# putting it all together
self.setLayout(main_layout)
main_layout.addLayout(top_layout)
top_layout.addWidget(self.list_view)
main_layout.addLayout(bottom_layout)
bottom_layout.addStretch(1)
bottom_layout.addWidget(control)
bottom_layout.addStretch(1)
self.list_view.syncToModel()
def updateProfileInfo(self,profile):
self.current_profile = profile
self.script.getGUI().updateAllTabs()
if profile:
self.edit_btn.setEnabled(True)
self.del_btn.setEnabled(True)
self.dup_btn.setEnabled(True)
else:
self.edit_btn.setEnabled(False)
self.del_btn.setEnabled(False)
self.dup_btn.setEnabled(False)
def newProfile(self):
self.script.newTrainingProfile()
self.list_view.syncToModel()
self.script.getGUI().updateAllTabs()
def duplicateProfile(self):
if self.current_profile:
the_copy = self.current_profile.getCopy()
the_copy.parent = self.current_profile.parent
i = the_copy.parent.getChildren().index(self.current_profile)
the_copy.parent.getChildren().insert(i+1,the_copy)
self.list_view.syncToModel()
def editProfile(self):
self.current_profile.getEditWindow()
def removeProfile(self):
if self.current_profile:
self.script.removeTrainingProfile(self.current_profile)
self.updateProfileInfo(None)
self.list_view.syncToModel()
self.script.getGUI().updateAllTabs()
def refreshTabContents(self):
self.list_view.syncToModel()
pass
def getTabName(self):
return 'Training Sets'
def getLevel(self):
return 0
class TestProfilesTab(QtGui.QWidget):
def __init__(self,script):
super(TestProfilesTab, self).__init__()
self.script = script
self.current_profile = None
#..............................................................
# LAYOUTS
# create main horizontal layout
main_layout = QtGui.QVBoxLayout()
# left and right column layouts
top_layout = QtGui.QVBoxLayout()
bottom_layout = QtGui.QHBoxLayout()
# control buttons
control_layout = QtGui.QGridLayout()
#..............................................................
# LIST VIEW OBJECT (ENTIRE LEFT COLUMN)
# see custom_widgets module for CustomListWidget definitions
self.list_view = CustomListWidget(self,script.getTestProfiles())
#..............................................................
# EDITING CONTROLS (ADD PROFILE, REMOVE, ETC...)
# create editing buttons
self.new_btn = QtGui.QPushButton('New Test Set')
self.del_btn = QtGui.QPushButton('Remove Set')
self.del_btn.setEnabled(False)
self.edit_btn = QtGui.QPushButton('Edit Selected')
self.edit_btn.setEnabled(False)
self.dup_btn = QtGui.QPushButton('Duplicate Selected')
self.dup_btn.setEnabled(False)
# create timeline editing group box
control = QtGui.QGroupBox()
control_layout.setSpacing(10)
control_layout.addWidget(self.new_btn,0,0,1,1)
control_layout.addWidget(self.del_btn,0,1,1,1)
control_layout.addWidget(self.edit_btn,1,0,1,1)
control_layout.addWidget(self.dup_btn,1,1,1,1)
control.setLayout(control_layout)
# connect button signals
self.new_btn.clicked.connect(self.newProfile)
self.edit_btn.clicked.connect(self.editProfile)
self.dup_btn.clicked.connect(self.duplicateProfile)
self.del_btn.clicked.connect(self.removeProfile)
#..............................................................
# putting it all together
self.setLayout(main_layout)
main_layout.addLayout(top_layout)
top_layout.addWidget(self.list_view)
main_layout.addLayout(bottom_layout)
bottom_layout.addStretch(1)
bottom_layout.addWidget(control)
bottom_layout.addStretch(1)
self.list_view.syncToModel()
def updateProfileInfo(self,profile):
self.current_profile = profile
if profile:
self.edit_btn.setEnabled(True)
self.del_btn.setEnabled(True)
self.dup_btn.setEnabled(True)
else:
self.edit_btn.setEnabled(False)
self.del_btn.setEnabled(False)
self.dup_btn.setEnabled(False)
def newProfile(self):
self.script.newTestProfile()
self.list_view.syncToModel()
self.script.getGUI().updateAllTabs()
def duplicateProfile(self):
if self.current_profile:
the_copy = self.current_profile.getCopy()
the_copy.parent = self.current_profile.parent
i = the_copy.parent.getChildren().index(self.current_profile)
the_copy.parent.getChildren().insert(i+1,the_copy)
self.list_view.syncToModel()
def removeProfile(self):
if self.current_profile:
# remove references to this profile in all phase items
self.DFS_removeProfile(self.script)
self.script.removeTestProfile(self.current_profile)
self.updateProfileInfo(None)
self.list_view.syncToModel()
self.script.getGUI().updateAllTabs()
def DFS_removeProfile(self,node):
for child in node.getChildren():
if child.getClassName() == 'MikenetPhaseItem':
name = self.current_profile.getValueOf('profile_name')
if name in child.getTestProfiles():
child.getTestProfiles().remove(name)
else:
self.DFS_removeProfile(child)
def editProfile(self):
self.current_profile.getEditWindow()
def refreshTabContents(self):
self.list_view.syncToModel()
def getTabName(self):
return 'Test Sets'
def getLevel(self):
return 0
| bopowers/MikenetGUI | lib/tabs.py | Python | gpl-3.0 | 63,418 |
from pandajedi.jedicore.FactoryBase import FactoryBase
from pandajedi.jediconfig import jedi_config
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
logger = PandaLogger().getLogger(__name__.split('.')[-1])
# factory class for throttling
class JobThrottler (FactoryBase):
# constructor
def __init__(self,vo,sourceLabel):
FactoryBase.__init__(self,vo,sourceLabel,logger,
jedi_config.jobthrottle.modConfig)
# main
def toBeThrottled(self,vo,sourceLabel,cloudName,workQueue,jobStat):
impl = self.getImpl(vo,sourceLabel)
retVal = impl.toBeThrottled(vo,sourceLabel,cloudName,workQueue,jobStat)
# retrieve min priority and max number of jobs from concrete class
self.minPriority = impl.minPriority
self.maxNumJobs = impl.maxNumJobs
self.lackOfJobs = impl.underNqLimit
return retVal
# check throttle level
def mergeThrottled(self,vo,sourceLabel,thrLevel):
impl = self.getImpl(vo,sourceLabel)
return impl.mergeThrottled(thrLevel)
| RRCKI/panda-jedi | pandajedi/jediorder/JobThrottler.py | Python | apache-2.0 | 1,083 |
import numpy as np
import sys
"""
Module of bandit algorithms
Bandit algorithms should implement the following methods:
1. __init__(B): constructor that takes a Bandit Simulator object.
2. init(T): prepare to run for T rounds, wipe state, etc.
3. updated(x,a,r): update any state using the current interaction
4. get_action(x): propose an action for this context.
"""
class Bandit(object):
"""
Bandit Algorithm interface.
This is a valid bandit algorithm that just plays randomly every
round.
"""
def __init__(self, B):
self.B = B
def init(self,T):
self.reward = 0.0
self.opt_reward = 0.0
self.dist = [1.0/self.B.N for i in range(self.B.N)]
def play(self,T):
self.init(T)
scores = []
for t in range(T):
if np.log2(t+1) == int(np.log2(t+1)):
print("t = %d, r = %0.3f, ave_regret = %0.3f" % (t, self.reward, (self.opt_reward - self.reward)/(t+1)))
x = self.B.get_new_context()
p = self.get_action(x)
self.reward += self.B.get_reward(p)
self.opt_reward += self.B.get_reward(self.B.Pi[self.B.Pistar,x])
self.update(x, p, self.B.get_reward(p))
scores.append(self.opt_reward - self.reward)
return scores
def update(self, x, a, r):
pass
def get_action(self, x):
dist = [1.0/self.B.K for i in range(self.B.K)]
p = np.random.multinomial(1, dist) ## Distribution over ACTIONS
p = int(np.nonzero(p)[0])
return p
class BFTPL(Bandit):
"""
Follow the perturbed leader style bandit algorithm.
@deprecated: This needs to be tuned properly, so should not be
used.
"""
def init(self,T):
self.reward = 0.0
self.weights = np.array([0 for i in range(self.B.N)])
self.noise = np.random.normal(0, 1, [1,self.B.N])
self.eta = np.sqrt(T)
def update(self, x, a, r):
## Estimate probability of playing action a
## so that we can importance weight
counts = [0.0 for i in range(self.B.K)]
for n in range(1000):
noise = np.random.normal(0, 1, [1,self.B.N])
pi = self.argmax(noise)
counts[self.B.Pi[pi, x]] += 1
counts = [x/1000 for x in counts]
print("Updating policies: action %d, reward %d, IPS %0.2f" % (a, r, counts[a]))
for i in range(self.B.N):
if self.B.Pi[i,x] == a:
self.weights[i] += r/counts[a]
def get_action(self,x):
pi = self.argmax(self.noise)
return self.B.Pi[pi,x]
def argmax(self, noise):
return np.argmax(self.weights + self.eta*noise)
| akshaykr/oracle_cb | Bandits.py | Python | mit | 2,706 |
import tensorflow as tf
"""tf.reduce_logsumexp(input_tensor, axis=None, keep_dims=False, name=None, reduction_indices=None)
功能:沿着维度axis计算log(sum(exp())),除非keep_dims=True,输出tensor保持维度为1。
输入:axis:默认为None,即沿所有维度求和。"""
a = tf.constant([[0, 0, 0], [0, 0, 0]], dtype=tf.float64)
z = tf.reduce_logsumexp(a)
z2 = tf.reduce_logsumexp(a, 0)
z3 = tf.reduce_logsumexp(a, 1)
sess = tf.Session()
print(sess.run(z))
print(sess.run(z2))
print(sess.run(z3))
sess.close()
# z==>1.79175946923#log(6)
# z2==>[0.69314718 0.69314718 0.69314718]#[log(2) log(2) log(2)]
# z3==>[1.09861229 1.09861229]#[log(3) log(3)]
| Asurada2015/TFAPI_translation | math_ops_advanced_function/tf_reduce_logsumexp.py | Python | apache-2.0 | 674 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.