repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
SalesforceFoundation/HEDAP | robot/EDA/resources/SystemSettingsPageObject.py | Python | bsd-3-clause | 5,956 | 0.009402 | from BaseObjects import BaseEDAPage
from EDA import eda_lex_locators
from cumulusci.robotframework.pageobjects import BasePage
from cumulusci.robotframework.pageobjects import pageobject
import time
@pageobject("System", "HEDA_Settings")
class SystemSettingsPage(BaseEDAPage, BasePage):
def _is_current_page(self):
"""
Verify we are on the EDA Settings page for System
by verifying the HEDA Settings URL and the System tab
"""
location = "/lightning/n/{}{}".format(self.eda.get_eda_namespace_prefix(), self._object_name)
self.selenium.location_should_contain(location)
locator_tab = eda_lex_locators["eda_settings"]["tab"].format("System")
self.selenium.wait_until_page_contains_element(
locator_tab,
error=f"System tab with locator '{locator_tab}' is not available on the page"
)
def update_system_dropdown_value(self,**kwargs):
""" This method will update the drop down field value passed in keyword arguments
Pass the expected value to be set in the drop down field from the tests
"""
for field,value in kwargs.items():
locator = eda_lex_locators["eda_settings_relationships"]["dropdown_value"].format(field,value)
self.selenium.wait_until_page_contains_element(locator,
error=f"'{value}' as dropdown value in '{field}' field is not available ")
self.selenium.click_element(locator)
def enter_account_name_format(self,**kwargs):
""" This method will enter the account name format after selecting other in the drop down
Pass the expected value to be set in the input field as arguments
"""
for field,value in kwargs.items():
locator = eda_lex_locators["eda_settings_system"]["other_accname_format"].format(field)
self.selenium.wait_until_page_contains_element(locator,
error=f"'{field}' field is not available ")
self.selenium.clear_element_text(locator)
self.selenium.get_webelement(locator).send_keys(value)
def select_recipient_type_value(self,**kwargs):
""" This method will select the drop down field value passed in keyword arguments
Pass the expected value to be selected in the drop down field from the tests
"""
for field,value in kwargs.items():
locator = eda_lex_locators["eda_settings_system"]["recipient_type_value"].format(field,value)
self.selenium.wait_until_page_contains_element(locator,
error=f"'{value}' as dropdown value in '{field}' field is not available ")
self.selenium.click_element(locator)
def select_recipient(self,**kwargs):
""" This method will select the lookup result for the recipient notification
Pass the expected value to be selected as arguments
"""
for field,value in kwargs.items():
locator = eda_lex_locators["eda_settings_system"]["recipient_name"].format(field)
self.selenium.wait_until_page_contains_element(locator,
error=f"'{field}' field is not available ")
self.selenium.clear_element_text(locator)
self.selenium.get_webelement(locator).send_keys(value)
time.sleep(0.5)
locator_lookup = eda_lex_locators["eda_settings_system"]["recipient_lookup"].format(value)
self.selenium.wait_until_page_contains_element(locator_lookup,
error=f"'{locator_lookup}' is not available ")
self.selenium.click_element(locator_lookup)
def verify_admin_toast_message(self, value):
""" Verifies the admin toast message """
locator = eda_lex_locators["eda_settings_system"]["admin_success_toast"]
time.sleep(0.5) # This wait is needed for the toast message validation
self.selenium.wait_until_page_contains_element(locator, timeout=60)
actual_value = self.selenium.get_webelement(locator).text
self.builtin.log("Toast message :" + actual_value)
if not str(value).lower() == str(actual_value).lower() :
raise Exception (f"Expected {value} but it displayed {actual_value}")
def verify_household_toast_message(self, value):
""" Verifies the household specific toast message """
locator = eda_lex_locators["eda_settings_system"]["hh_success_toast"]
time.sleep(0.5) # This wait is needed for the toast message validation
self.selenium.wait_until_page_contains_element(locator, timeout=60)
actual_value = self.selenium.get_webelement(locator).text
self.builtin.log("Toast message :" + actual_value)
if not str(value).lower() == str(actual_value).lower() :
raise Exception (f"Expected {value} but it displayed {actual_value}")
def verify_system_dropdown_value(self,**kwargs):
""" This method validates the dropdown value for the field passed in kwargs
Pass the field name and expected value to be verified from the tests using
keyword arguments
"""
for field,expected_value in kwargs.items():
locator = eda_lex_locators["eda_settings_system"]["other_dropdown_value"].format(field,expected_value)
self.selenium.page_should_contain_element(locator)
self.selenium.wait_until_element_is_visible(locator,
| error= "Element is not di | splayed for the user")
actual_value = self.selenium.get_webelement(locator).text
if not str(expected_value).lower() == str(actual_value).lower() :
raise Exception (f"Dropdown value in {field} is {actual_value} but it should be {expected_value}")
|
seankelly/buildbot | master/buildbot/test/unit/test_plugins.py | Python | gpl-2.0 | 14,759 | 0 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Unit tests for the plugin framework
"""
from __future__ import absolute_import
from __future__ import print_function
import re
import mock
from twisted.trial import unittest
from zope.interface import implementer
import buildbot.plugins.db
from buildbot.errors import PluginDBError
from buildbot.interfaces import IPlugin
from buildbot.test.util.warnings import assertNotProducesWarnings
from buildbot.test.util.warnings import assertProducesWarning
from buildbot.worker_transition import DeprecatedWorkerAPIWarning
from buildbot.worker_transition import DeprecatedWorkerNameWarning
# buildbot.plugins.db needs to be imported for patching, however just 'db' is
# much shorter for using in tests
db = buildbot.plugins.db
class FakeEntry(object):
"""
An entry suitable for unit tests
"""
def __init__(self, name, project_name, version, fail_require, value):
self._name = name
self._dist = mock.Mock(spec_set=['project_name', 'version'])
self._dist.project_name = project_name
self._dist.version = version
self._fail_require = fail_require
self._value = value
@property
def name(self):
"entry name"
return self._name
@property
def dist(self):
"dist thingie"
return self._dist
def require(self):
"""
handle external dependencies
"""
if self._fail_require:
raise RuntimeError('Fail require as requested')
def load(self):
"""
handle loading
"""
return self._value
class ITestInterface(IPlugin):
"""
test interface
"""
def hello(name):
"Greets by :param:`name`"
@implementer(ITestInterface)
class ClassWithInterface(object):
"""
a class to implement a simple interface
"""
def __init__(self, name=None):
self._name = name
def hello(self, name=None):
'implement the required method'
return name or self._name
class ClassWithNoInterface(object):
"""
just a class
"""
# NOTE: buildbot.plugins.db prepends the group with common namespace --
# 'buildbot.'
_FAKE_ENTRIES = {
'buildbot.interface': [
FakeEntry('good', 'non-existent', 'irrelevant', False,
ClassWithInterface),
FakeEntry('deep.path', 'non-existent', 'irrelevant', False,
ClassWithInterface)
],
'buildbot.interface_failed': [
FakeEntry('good', 'non-existent', 'irrelevant', True,
ClassWithInterface)
],
'buildbot.no_interface': [
FakeEntry('good', 'non-existent', 'irrelevant', False,
ClassWithNoInterface)
],
'buildbot.no_interface_again': [
FakeEntry('good', 'non-existent', 'irrelevant', False,
ClassWithNoInterface)
],
'buildbot.no_interface_failed': [
FakeEntry('good', 'non-existent', 'irrelevant', True,
ClassWithNoInterface)
],
'buildbot.duplicates': [
FakeEntry('good', 'non-existent', 'first', False,
ClassWithNoInterface),
FakeEntry('good', 'non-existent', 'second', False,
ClassWithNoInterface)
]
}
def provide_fake_entries(group):
"""
give a set of fake entries for known groups
"""
return _FAKE_ENTRIES.get(group, [])
@mock.patch('buildbot.plugins.db.iter_entry_points', provide_fake_entries)
class TestBuildbotPlugins(unittest.TestCase):
def setUp(self):
buildbot.plugins.db._DB = buildbot.plugins.db._PluginDB()
def test_check_group_registration(self):
with mock.patch.object(buildbot.plugins.db, '_DB', db._PluginDB()):
# The groups will be prepended with namespace, so info() will
# return a dictionary with right keys, but no data
groups = set(_FAKE_ENTRIES.keys())
for group in groups:
db.get_plugins(group)
registered = set(db.info().keys())
self.assertEqual(registered, groups)
self.assertEqual(registered, set(db.namespaces()))
def test_interface_provided_simple(self):
# Basic check before the actual test
self.assertTrue(ITestInterface.implementedBy(ClassWithInterface))
plugins = db.get_plugins('interface', interface=ITestInterface)
self.assertTrue('good' in plugins.names)
result_get = plugins.get('good')
result_getattr = plugins.good
self.assertFalse(result_get is None)
self.assertTrue(result_get is result_getattr)
# Make sure we actually got our class
greeter = result_get('yes')
self.assertEqual('yes', greeter.hello())
self.assertEqual('no', greeter.hello('no'))
def test_missing_plugin(self):
plugins = db.get_plugins('interface', interface=ITestInterface)
self.assertRaises(AttributeError, getattr, plugins, 'bad')
self.assertRaises(PluginDBError, plugins.get, 'bad')
self.assertRaises(PluginDBError, plugins.get, 'good.extra')
def test_interface_provided_deep(self):
# Basic check before the actual test
self.assertTrue(ITestInterface.implementedBy(ClassWithInterface))
plugins = db.get_plugins('interface', interface=ITestInterface)
self.assertTrue('deep.path' in plugins.names)
self.assertTrue('deep.path' in plugins)
self.assertFalse('even.deeper.path' in plugins)
result_get = plugins.get('deep.path')
result_getattr = plugins.deep.path
self.assertFalse(result_get is None)
self.assertTrue(result_get is result_getattr)
# Make sure we actually got our class
greeter = result_get('yes')
self.assertEqual('yes', greeter.hello())
self.assertEqual('no', greeter.hello('no'))
def test_interface_provided_deps_failed(self):
plugins = db.get_plugins('interface_failed', interface=ITestInterface,
check_extras=True)
self.assertRaises(PluginDBError, plugins.get, 'good')
def test_required_interface_not_provided(self):
plugins = db.get_plugins('no_interface_again',
interface=ITestInterface)
self.assertTrue(plugins._interface is ITestInterface)
self.assertRaises(PluginDBError, plugins.get, 'good')
def test_no_interface_provided(self):
plugins = db.get_plugins('no_interface')
self.assertFalse(plugins.get('good') is None)
def test_no_interface_provided_deps_failed(self):
plugins = db.get_plugins('no_interface_failed', check_extras=True)
self.assertRaises(PluginDBE | rror, plugins.get, 'good')
def test_failure_on_dups(self):
self.assertRaises(PluginDBError, db.get_plugins, 'duplicates',
| load_now=True)
def test_get_info_on_a_known_plugin(self):
plugins = db.get_plugins('interface')
self.assertEqual(('non-existent', 'irrelevant'), plugins.info('good'))
def test_failure_on_unknown_plugin_info(self):
plugins = db.get_plugins('interface')
self.assertRaises(PluginDBError, plugins.info, 'bad')
def test_failure_on_unknown_plugin_get(self):
plugins = db.get_plugins('interface')
self.assertRaises(PluginDBError, plugins.get, 'bad')
class SimpleFakeEntry(FakeEntry):
def __init__(self, name, value):
FakeEntry.__init__(self, name, 'non-exist |
oe-alliance/oe-alliance-enigma2 | lib/python/Screens/ChannelSelection.py | Python | gpl-2.0 | 87,895 | 0.029365 | # -*- coding: utf-8 -*-
from Tools.Profile import profile
from Screen import Screen
import Screens.InfoBar
import Components.ParentalControl
from Components.Button import Button
from Components.ServiceList import ServiceList
from Components.ActionMap import NumberActionMap, ActionMap, HelpableActionMap
from Components.MenuList import MenuList
from Components.ServiceEventTracker import ServiceEventTracker, InfoBarBase
from Components.Sources.List import List
from Components.SystemInfo import SystemInfo
from Components.UsageConfig import preferredTimerPath
from Components.Renderer.Picon import getPiconName
from Screens.TimerEdit import TimerSanityConflict
profile("ChannelSelection.py 1")
from EpgSelection import EPGSelection
from enigma import eActionMap, eServiceReference, eEPGCache, eServiceCenter, eRCInput, eTimer, ePoint, eDVBDB, iPlayableService, iServiceInformation, getPrevAsciiCode, eEnv, loadPNG
from Components.config import config, configfile, ConfigSubsection, ConfigText
from Tools.NumericalTextInput import NumericalTextInput
profile("ChannelSelection.py 2")
from Components.NimManager import nimmanager
profile("ChannelSelection.py 2.1")
from Components.Sources.RdsDecoder import RdsDecoder
profile("ChannelSelection.py 2.2")
from Components.Sources.ServiceEvent import ServiceEvent
from Components.Sources.Event import Event
profile("ChannelSelection.py 2.3")
from Components.Input import Input
profile("ChannelSelection.py 3")
from Components.ChoiceList import ChoiceList, ChoiceEntryComponent
from RecordTimer import RecordTimerEntry, AFTEREVENT
from TimerEntry import TimerEntry, InstantRecordTimerEntry
from Screens.InputBox import InputBox, PinInput
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.ServiceInfo import ServiceInfo
profile("ChannelSelection.py 4")
from Screens.PictureInPicture import PictureInPicture
from Screens.RdsDisplay import RassInteractive
from ServiceReference import ServiceReference
from Tools.BoundFunction import boundFunction
from Tools import Notifications
from time import localtime, time
from os import remove
try:
from Plugins.SystemPlugins.PiPServiceRelation.plugin import getRelationDict
plugin_PiPServiceRelation_installed = True
except:
plugin_PiPServiceRelation_installed = False
profile("ChannelSelection.py after imports")
FLAG_SERVICE_NEW_FOUND = 64 #define in lib/dvb/idvb.h as dxNewFound = 64
class BouquetSelector(Screen):
def __init__(self, session, bouquets, selectedFunc, enableWrapAround=False):
Screen.__init__(self, session)
Screen.setTitle(self, _("Choose Bouquet"))
self.selectedFunc=selectedFunc
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.okbuttonClick,
"cancel": self.cancelClick
})
entrys = [ (x[0], x[1]) for x in bouquets ]
self["menu"] = MenuList(entrys, enableWrapAround)
def getCurrent(self):
cur = self["menu"].getCurrent()
return cur and cur[1]
def okbuttonClick(self):
self.selectedFunc(self.getCurrent())
def up(self):
self["menu"].up()
def down(self):
self["menu"].down()
def cancelClick(self):
self.close(False)
class EpgBouquetSelector(BouquetSelector):
def __init__(self, session, bouquets, selectedFunc, enableWrapAround=False):
BouquetSelector.__init__(self, session, bouquets, selectedFunc, enableWrapAround=False)
self.skinName = "BouquetSelector"
self.bouquets=bouquets
def okbuttonClick(self):
self.selectedFunc(self.getCurrent(),self.bouquets)
class SilentBouquetSelector:
def __init__(self, bouquets, enableWrapAround=False, current=0):
self.bouquets = [b[1] for b in bouquets]
self.pos = current
self.count = len(bouquets)
self.enableWrapAround = enableWrapAround
def up(self):
if self.pos > 0 or self.enableWrapAround:
self.pos = (self.pos - 1) % self.count
def down(self):
if self.pos < (self.count - 1) or self.enableWrapAround:
self.pos = (self.pos + 1) % self.count
def getCurrent(self):
return self.bouquets[self.pos]
# csel.bouquet_mark_edit values
OFF = 0
EDIT_BOUQUET = 1
EDIT_ALTERNATIVES = 2
def append_when_current_valid(current, menu, args, level = 0, key = ""):
if current and current.valid() and level <= config.usage.setup_level.index:
menu.append(ChoiceEntryComponent(key, args))
class ChannelContextMenu(Screen):
def __init__(self, session, csel):
Screen.__init__(self, session)
Screen.setTitle(self, _("Channel list context menu"))
#raise Exception("we need a better summary screen here")
self.csel = csel
self.bsel = None
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "NumberActions"],
{
"ok": self.okbuttonClick,
"cancel": self.cancelClick,
"blue": self.showServiceInPiP
})
menu = [ ]
self.pipAvailable = False
current = csel.getCurrentSelection()
current_root = csel.getRoot()
current_sel_path = current.getPath()
current_sel_flags = current.flags
inBouquetRootList = current_root and 'FROM BOUQUET "bouquets.' in current_root.getPath() #FIXME HACK
inAlternativeList = current_root and 'FROM BOUQUET "alternatives' in current_root.getPath()
inBouquet = csel.getMutableList() is not None
haveBouquets = config.usage.multibouquet.value
menu.append(ChoiceEntryComponent(text = (_("Settings..."), boundFunction(self.openSetup, "channelselection"))))
if not (current_sel_path or current_sel_flags & (eServiceReference.isDirectory|eServiceReference.isMarker)):
append_when_current_valid(current, menu, (_("show transponder info"), self.showServiceInformations), level = 2)
if csel.bouquet_mark_edit == OFF and not csel.movemode:
if not inBouquetRootList:
isPlayable = not (current_sel_flags & (eServiceReference.isMarker|eServiceReference.isDirectory))
if isPlayable:
if config.servicelist.startupservice.value == self.csel.getCurrentSelection().toString():
append_when_current_valid(current, menu, (_("stop using as startup service"), self.unsetStartupService), level = 0)
else:
append_when_current_valid(current, menu, (_("set as startup service"), self.setStartupService), level = 0)
if config.ParentalControl.configured.value:
from Components.ParentalControl import parentalControl
if parentalControl.getProtectionLevel(csel.getCurrentSelection().toCompareString()) == -1:
append_when_current_valid(current, menu, (_("add to parental protection"), boundFunction(self.addParentalProtection, csel.getCurrentSelection())), level = 0)
else:
append_when_current_valid(current, menu, (_("remove from parental protection"), boundFunction(self.removeParentalProtection, csel.getCurrentSelection())), level = 0)
if haveBouquets:
bouquets = self.csel.getBouquetList()
if bouquets is None:
bouquetCnt = 0
else:
bouquetCnt = len(bouquets)
if not inBouquet or bouquetCnt > 1:
append_when_current_valid(current, menu, (_("add service to bouquet"), self.addServiceToBouquetSelected), level = 0)
else:
if not inBouquet:
append_when_current_valid(current, menu, (_("add service to favourites"), self.addServiceToBouquetSelected), level = 0)
if SystemInfo["PIPAvailable"]:
# only allow the service to be played directly in pip / mainwindow when the service is not under parental control
if not config.ParentalControl.configured.value or parentalControl.getProtectionLevel(csel.getCurrentSelection().toCompareString()) | == -1:
if not csel.dopipzap:
append_when_current_valid(current, menu, (_("play as picture in picture"), self.showServiceInPiP), level = 0, key = "blue")
self.pipAvailable = True
else:
append_when_current_valid(current, menu, (_("play in mainwindow"), self.playMain), level = 0) |
else:
if 'FROM SATELLITES' in current_root.getPath():
append_when_current_valid(current, menu, (_("remove selected satellite"), self.removeSatelliteServices), level = 0)
if haveBouquets:
if not inBouquet and not "PROVIDERS" in current_sel_path:
append_when_current_valid(current, menu, (_("copy to bouquets"), self.copyCurrentToBouquetList), |
uliss/quneiform | tests/py/cf.py | Python | gpl-3.0 | 12,883 | 0.006443 | #!@PYTHON_EXECUTABLE@
# -*- coding: utf-8 -*-
import os, sys
import zipfile
import platform
from subprocess import *
from xml.dom import minidom
import re
import glob
os.environ['CF_DATADIR'] = "@CMAKE_SOURCE_DIR@/datafiles"
os.environ['PATH'] = os.environ['PATH'] + ":@CMAKE_BINARY_DIR@"
# globals exe's and paths
CUNEIFORM = "@CUNEIFORM_EXE@"
CUNEIFORM_PROCESS = "@CUNEIFORM_PROCESS@"
ACCSUM = "@CMAKE_BINARY_DIR@/cf_accsum"
ACCURACY = "@CMAKE_BINARY_DIR@/cf_accuracy"
IMAGEDIR = "@CMAKE_SOURCE_DIR@/images"
DIFFOPTS = '@CF_DIFFOPTS@'
class bcolor:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OK = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
END = '\033[0m'
def disable(self):
| self.HEADER = ''
self.OKBLUE = ''
self.OK = ''
self.WARNING = ''
self.FAIL = ''
self.END = ''
@staticmethod
def clear():
bcolor.HEADER = ''
bcolor.OKBLUE = ''
bcolor.OK = ''
bcolor.WARNING = ''
bcolor.FAIL = ''
bcolor.END = ''
if platform.system | () == 'Windows':
bcolor.clear()
class Tester:
_imagedir = ''
_images = []
_version = None
_tests_passed = 0
_tests_failed = 0
_language = None
_output = None
_output_image_dir = None
_format = None
_line_breaks = False
_sample_ext = None
_args = []
_turn = 0
_pagenum = 0
def __init__(self, imagedir=''):
self._imagedir = os.path.join(IMAGEDIR, imagedir)
def __del__(self):
self.printTestStat()
def accuracy(self, img):
output = self.makeOutput(img)
report_str = Popen([ACCURACY, self.makeSampleName(img), output], stdout=PIPE).communicate()[0]
report_file = open("%s.acc" % output, 'w')
report_file.write(report_str)
lines = report_str.split('\n')
if len(lines) > 4:
print lines[3]
print lines[4]
def addArg(self, arg):
self._args.append(arg)
def addFail(self):
self._tests_failed += 1
def addImage(self, img):
self._images.append(img)
''' adds image to tester '''
def addImages(self, files):
self._images += files
''' removes all images from tester '''
def clear(self):
self._images = []
def cuneiform(self, args, **kwargs):
cmd = [CUNEIFORM] + args
retcode = call(cmd, **kwargs)
if retcode != 0:
print ' '.join(cmd)
return retcode
def cuneiformProcess(self, args, **kwargs):
cmd = [CUNEIFORM_PROCESS] + args
retcode = call(cmd, **kwargs)
if retcode != 0:
print ' '.join(cmd)
return retcode
def cuneiformTest(self, img, process=False):
if not os.path.exists(img):
self.printError("image file not exists: %s\n" % img)
return False
if self._format is None:
self.printError('output file not specified and output format is not set')
return False
self._output = self.makeOutput(img)
if process:
retcode = self.cuneiformProcess(self.makeArgs(img), stdout=PIPE, stderr=PIPE)
else:
retcode = self.cuneiform(self.makeArgs(img), stdout=PIPE, stderr=PIPE)
if retcode != 0:
self.printFail(img, "")
self._tests_failed += 1
return False
if os.path.getsize(self._output) == 0:
self.printFail(img, "No output")
self._tests_failed += 1
return False
else:
self.printOk(img)
self._tests_passed += 1
return True
def diff(self, first, second, **kwargs):
cmd = ['diff', DIFFOPTS, first, second]
#print cmd
return call(cmd, **kwargs)
def diffNative(self, first, second, **kwargs):
self.fileReplace(second, r'serialization::archive \d+', 'serialization::archive 9')
return self.diff(first, second, **kwargs)
def diffOdf(self, first, second, **kwargs):
first_odf = zipfile.ZipFile(first)
second_odf = zipfile.ZipFile(second)
first_content = first_odf.read('content.xml')
second_content = second_odf.read('content.xml')
if first_content == second_content:
return 0
else:
first_xml = os.path.basename(first) + '.xml'
f = open(first_xml,'w')
f.write(first_content)
f.close()
second_xml = second + '.xml'
f = open(second_xml, 'w')
f.write(second_content)
f.close()
return self.diff(first_xml, second_xml, **kwargs)
def diffXml(self, xml1, xml2, **kwargs):
first_xml = open(xml1, 'r')
second_xml = open(xml2, 'r')
dom1 = minidom.parseString(first_xml.read())
dom2 = minidom.parseString(second_xml.read())
self.unsetBoostVersion(dom1)
self.unsetBoostVersion(dom2)
first_xml.close()
second_xml.close()
res = self.isEqualElement(dom1.documentElement, dom2.documentElement)
if res == True:
return 0
else:
self.diff(xml1, xml2, **kwargs)
return 1
def diffTestContent(self, img, content):
if not self.cuneiformTest(img):
return False
self._format = 'text'
if not os.path.exists(self._output):
self.printFail(img, "\n(output not exists: %s)" % self._output)
self._tests_failed += 1
return False
second_file = open(self._output, 'r')
second_content = second_file.read().strip()
second_file.close()
os.unlink(self._output)
if second_content == content:
self._tests_passed += 1
return True
else:
print second_content, '!=', content
self._tests_failed += 1
self.printFail(img, '(not equal)')
return False
def diffTest(self, img):
if not self.cuneiformTest(img):
return False
if self._format == 'odf':
self.setSampleExt('odt')
elif self._sample_ext == 'odt':
self._sample_ext = None
sample_name = self.makeSampleName(img)
if not os.path.exists(sample_name):
self.printFail(img, "\n(sample output not exists: %s)" % sample_name)
self._tests_failed += 1
return False
diff_name = self.makeDiffName()
diff_output = open(diff_name, 'w')
if self._format == 'odf':
retcode = self.diffOdf(sample_name, self._output, stdout=diff_output)
elif self._format == 'native':
retcode = self.diffNative(sample_name, self._output, stdout=diff_output)
elif self._format == 'native-xml':
retcode = self.diffXml(sample_name, self._output, stdout=diff_output)
else:
retcode = self.diff(sample_name, self._output, stdout=diff_output)
if retcode != 0:
diff_output.close()
self.printFail(img, '(difference found)')
print ' '.join([CUNEIFORM] + self.makeArgs(img))
self._tests_failed += 1
return False
else:
self._tests_passed += 1
diff_output.close()
os.unlink(self._output)
os.unlink(diff_name)
return True
def fileReplace(self, filename, pattern, to):
f = open(filename, 'r')
new_f = re.sub(pattern, to, f.read())
f.close()
f = open(filename, 'w')
f.write(new_f)
f.close()
def isEqualXML(a, b):
da, db = minidom.parseString(a), minidom.parseString(b)
return isEqualElement(da.documentElement, db.documentElement)
def isEqualElement(self, a, b):
if a.tagName != b.tagName:
print " [XML] Different tag names: %s %s" % (a.tagName, b.tagName)
|
aladdinwang/django-cms | cms/migrations/0049_auto__del_field_page_template.py | Python | bsd-3-clause | 15,791 | 0.008233 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaM | igration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Page.template'
db.delete_column(u'cms_page', 'template')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Page.template'
raise Runtime | Error("Cannot reverse this migration. 'Page.template' and its values cannot be restored.")
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cms_template': {
'Meta': {'object_name': 'CMS_Template'},
'cms_template': ('filebrowser.fields.FileBrowseField', [], {'max_length': '500', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'content_template_path': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'exported': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeF |
enriquesanchezb/practica_utad_2016 | venv/lib/python2.7/site-packages/_pytest/main.py | Python | apache-2.0 | 26,215 | 0.002022 | """ core implementation of testing process: init, session, runtest loop. """
import imp
import os
import re
import sys
import _pytest
import _pytest._code
import py
import pytest
try:
from collections import MutableMapping as MappingMixin
except ImportError:
from UserDict import DictMixin as MappingMixin
from _pytest.runner import collect_one_node
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
EXIT_NOTESTSCOLLECTED = 5
name_re = re.compile("^[a-zA-Z_]\w*$")
def pytest_addoption(parser):
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg'])
parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.",
type="args", default=[])
#parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
#)
group = parser.getgroup("general", "running and selection options")
group._addoption('-x', '--exitfirst', action="store_true", default=False,
dest="exitfirst",
help="exit instantly on first error or failed test."),
group._addoption('--maxfail', metavar="num",
action="store", type=int, dest="maxfail", default=0,
help="exit after first num failures or errors.")
group._addoption('--strict', action="store_true",
help="run pytest in strict mode, warnings become errors.")
group._addoption("-c", metavar="file", type=str, dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
group = parser.getgroup("collect", "collection")
group.addoption('--collectonly', '--collect-only', action="store_true",
help="only collect tests, don't execute them."),
group.addoption('--pyargs', action="store_true",
help="try to interpret all arguments as python packages.")
group.addoption("--ignore", action="append", metavar="path",
help="ignore path during collection (multi-allowed).")
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption('--confcutdir', dest="confcutdir", default=None,
metavar="dir",
help="only load conftest.py's relative to specified dir.")
group.addoption('--noconftest', action="store_true",
dest="noconftest", default=False,
help="Don't load any conftest.py files.")
group = parser.getgroup("debugconfig",
"test session debugging and configuration")
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
help="base temporary directory for this test run.")
def pytest_namespace():
collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
return dict(collect=collect)
def pytest_configure(config):
pytest.config = config # compatibiltiy
if config.option.exitfirst:
config.option.maxfail = 1
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
session.exitstatus = doit(config, session) or 0
except pytest.UsageError:
raise
except KeyboardInterrupt:
excinfo = _pytest._code.ExceptionInfo()
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = EXIT_INTERRUPTED
except:
excinfo = _pytest._code.ExceptionInfo()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session,
exitstatus=session.exitstatus)
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
if session.testsfailed:
return EXIT_TESTSFAILED
elif session.testscollected == 0:
return EXIT_NOTESTSCOLLECTED
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.config.option.collectonly:
return True
def getnextitem(i):
# this is a function to avoid python2
# keeping sys.exc_info set when calling into a test
# python2 keeps sys.exc_info till the frame is left
try:
return session.items[i+1]
except IndexError:
return None
for i, item in enumerate(session.items):
nextitem = getnextitem(i)
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def pytest_ignore_collect(path, config):
p = path.dirpath()
ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
return path in ignore_paths
class FSHookProxy:
def __init__(self, fspath, pm, remove_mods):
self.fspath = fspath
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
def compatproperty(name):
def fget(self):
# deprecated - use pytest.name
return getattr(pytest, name)
return property(fget)
class NodeKeywords(MappingMixin):
def __init__(self, node):
self.node = node
self.parent = node.parent
self._markers = {node.name: True}
def __getitem__(self, key):
try:
return self._markers[key]
except KeyError:
if self.parent is None:
raise
return self.parent.keywords[key]
def __setitem__(self, key, value):
self._markers[key] = value
def __delitem__(self, key):
raise ValueError("cannot delete key in keywords dict")
def __iter__(self):
seen = set(self._markers)
if self.parent is not None:
seen.update(self.parent.keywords)
return iter(seen)
def __len__(self):
return len(self.__iter__() | )
def keys(self):
return list(self)
def __repr__(self):
return "<NodeKeywords for node %s>" % (self.node, )
class Node(object):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(self, name, parent=None, config=None, session=None):
#: a unique name within the | scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
self.config = config or parent.config
#: the session this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = getattr(parent, 'fspath', None)
#: keywords/markers collecte |
clicheio/cliche | cliche/celery.py | Python | mit | 7,480 | 0 | """:mod:`cliche.celery` --- Celery_-backed task queue worker
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes web app should provide time-consuming features that cannot
immediately respond to user (and we define "immediately" as "shorter than
a second or two seconds" in here). Such things should be queued and then
processed by background workers. Celery_ does that in natural way.
We use this at serveral points like resampling images to make thumbnails,
or crawling ontology data from other services. Such tasks are definitely
cannot "immediately" respond.
.. seealso::
:ref:`faq-when-to-use` --- Celery FAQ
Answer to what kinds of benefits are there in Celery.
`Queue everything and delight everyone`__
This article describes why you should use a queue in a web application.
__ http://decafbad.com/blog/2008/07/04/queue-everything-and-delight-everyone
.. _Celery: http://celeryproject.org/
How to define tasks
-------------------
In order to defer some types of tasks, you have to make these functions
a task. It's not a big deal, just attach a decorator to them::
@celery.task(ignore_result=True)
def do_heavy_work(some, inputs):
'''Do something heavy work.'''
...
How to defer tasks
------------------
It's similar to ordinary function calls except it uses :meth:`delay()
<celery.app.task.Task.delay>` method (or :meth:`apply_async()
<celery.app.task.Task.apply_async>` method) instead of calling operator::
do_heavy_work.delay('some', inputs='...')
That command will be queued and sent to one of distributed workers.
That means these argument values are serialized using :mod:`json`.
If any argument value isn't serializable it will error.
Simple objects like numbers, strings, tuples, lists, dictionaries are
safe to serialize.
In the other hand, entity objects (that an instance of :class:`cliche.orm.Base`
and its subtypes) mostly fail to serialize, so use primary key values like
entity id instead of object itself.
What things are ready for task?
-------------------------------
Every deferred call of task share equivalent inital state:
- You can get a database session using :func:`get_session()`.
- You also can get a database engine using :func:`get_database_engine()`.
While there are several things not ready either:
- Flask's request context isn't ready for each task. You should explicitly
deal with it using :meth:`~flask.Flask.request_context()` method
to use context locals like :class:`flask.request`.
See also :ref:`request-context`.
- Physical computers would differ from web environment. Total memory,
CPU capacity, the number of processors, IP address, operating system,
Python VM (which of PyPy or CPython), and other many environments also
can vary. Assume nothing on these variables.
- Hence global states (e.g. module-level global variables) are completely
isolated from web environment which called the task. Don't depend on
such global states.
How to run Celery worker
------------------------
:program:`celery worker` (formerly :program:`celeryd`) takes Celery app object
as its endpoint, and Cliche's endpoint is :data:`cliche.celery.celery`.
You can omit the latter variable name and module name: :mod:`cliche`.
Execute the following command in the shell:
.. sourcecode:: console
$ celery worker -A cliche --config dev.cfg.yml
-------------- celery@localhost v3.1.13 (Cipater)
---- **** -----
--- * *** * -- Darwin-13.3.0-x86_64-i386-64bit
-- * - **** ---
- ** ---------- [config]
- ** ---------- .> app: cliche.celery:0x1... (cliche.celery.Loader)
- ** ---------- .> transport: redis://localhost:6379/5
- ** ---------- .> results: disabled
- *** --- * --- .> concurrency: 4 (prefork)
-- ******* ----
--- ***** ----- [queues]
-------------- .> celery exchange=celery(direct) key=celery
[2014-09-12 00:31:25,150: WARNING/MainProcess] celery@localhost ready.
Note that you should pass the same configuration file (``--config`` option)
to the WSGI application. It should contain ``DATABASE_URL`` and so on.
References
----------
"""
import os
import pathlib
from celery import Celery, current_app, current_task
from celery.loaders.base import BaseLoader
from celery.signals import celeryd_init, task_failure, task_postrun
from raven import Client
from raven.conf import setup_logging
from raven.handlers.logging import SentryHandler
from sqlalchemy.engine import Engine, create_engine
from .config import ConfigDict, read_config
from .orm import Session, import_all_modules
__all__ = (
'Loader',
'get_database_engine',
'get_session',
'get_raven_client',
'app',
)
app = Celery(__name__, loader=__name__ + ':Loader')
class Loader(BaseLoader):
"""The loader used by Cliche app."""
def read_configuration(self):
config = ConfigDict()
config_path = os.environ.get(
'CELERY_CONFIG_MODULE',
os.environ.get('CLICHE_CONFIG')
)
if config_path is not None:
config = read_config(pathlib.Path(config_path))
config['CELERY_IMPORTS'] = import_all_modules()
config['CELERY_ACCEPT_CONTENT'] = ['pickle', 'json']
return config
def get_database_engine() -> Engine:
"""Get a database engine.
:returns: a database engine
:rtype: :class:`sqlalchemy.engine.base.Engine`
"""
config = current_app.conf
if 'DATABASE_ENGINE' not in config:
db_url = config['DATABASE_URL']
config['DATABASE_ENGINE'] = create_engine(db_url)
if 'BROKER_URL' not in config:
config['BROKER_URL'] = 'sqla+' + db_url
if 'CELERY_RESULT_BACKEND' not in config and \
'CELERY_RESULT_DBURI' not in config:
config['CELERY_RESULT_BACKEND'] = 'database'
config['CELERY_RESULT_DBURI'] = db_url
return config['DATABASE_ENGINE']
def get_session() -> Session:
"""Get a | database session.
:returns: a database session
:rtype: :class:`~.orm.Session`
"""
task = current_task._get_current_object()
request = task.request
if getattr(request, 'db_session', None) is None:
request.db_session = Session(bind=get_database_engine())
return request.db_session
@task_postrun.connect
def close_session(task_id, task, *args, **kwargs):
"""Close the session if there's the opened session.""" |
session = getattr(task.request, 'db_session', None)
if session is not None:
session.close()
def get_raven_client() -> Client:
"""Get a raven client.
:returns: a raven client
:rtype: :class:`raven.Client`
"""
config = current_app.conf
if 'SENTRY_DSN' in config:
if 'RAVEN_CLIENT' not in config:
sentry_dsn = config['SENTRY_DSN']
config['RAVEN_CLIENT'] = Client(
dsn=sentry_dsn,
include_paths=[
'cliche',
],
)
return config['RAVEN_CLIENT']
else:
return None
@celeryd_init.connect
def setup_raven_logging(conf=None, **kwargs):
client = get_raven_client()
if client is not None:
handler = SentryHandler(client)
setup_logging(handler)
@task_failure.connect
def report_task_failure(task_id, exception, args, kwargs,
traceback, einfo, sender):
client = get_raven_client()
client.captureException(einfo.exc_info)
|
pim89/youtube-dl | youtube_dl/extractor/dramafever.py | Python | unlicense | 7,450 | 0.002148 | # coding: utf-8
from __future__ import unicode_literals
import itertools
from .amp import AMPIE
from ..compat import (
compat_HTTPError,
compat_urlparse,
)
from ..utils import (
ExtractorError,
clean_html,
int_or_none,
sanitized_Request,
urlencode_postdata
)
class DramaFeverBaseIE(AMPIE):
_LOGIN_URL = 'https://www.dramafever.com/accounts/login/'
_NETRC_MACHINE = 'dramafever'
_CONSUMER_SECRET = 'DA59dtVXYLxajktV'
_consumer_secret = None
def _get_consumer_secret(self):
mainjs = self._download_webpage(
'http://www.dramafever.com/static/51afe95/df2014/scripts/main.js',
None, 'Downloading main.js', fatal=False)
if not mainjs:
return self._CONSUMER_SECRET
return self._search_regex(
r"var\s+cs\s*=\s*'([^']+)'", mainjs,
'consumer secret', default=self._CONSUMER_SECRET)
def _real_initialize(self):
self._login()
self._consumer_secret = self._get_consumer_secret()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_form = {
'username': username,
'password': password,
}
request = sanitized_Request(
self._LOGIN_URL, urlencode_postdata(login_form))
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
if all(logout_pattern not in response
for logout_pattern in ['href="/accounts/logout/"', '>Log out<']):
error = self._html_search_regex(
r'(?s)class="hidden-xs prompt"[^>]*>(.+?)<',
response, 'error message', default=None)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
class DramaFeverIE(DramaFeverBaseIE):
IE_NAME = 'dramafever'
_VALID_URL = r'https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+/[0-9]+)(?:/|$)'
_TESTS = [{
'url': 'http://www.dramafever.com/drama/4512/1/Cooking_with_Shin/',
'info_dict': {
'id': '4512.1',
'ext': 'mp4',
'title': 'Cooking with Shin 4512.1',
'description': 'md5:a8eec7942e1664a6896fcd5e1287bfd0',
'episode': 'Episode 1',
'episode_number': 1,
'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1404336058,
'upload_date': '20140702',
'duration': 343,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
'url': 'http://www.dramafever.com/drama/4826/4/Mnet_Asian_Music_Awards_2015/?ap=1',
'info_dict': {
'id': '4826.4',
'ext': 'mp4',
'title': 'Mnet Asian Music Awards 2015 4826.4',
'description': 'md5:3ff2ee8fedaef86e076791c909cf2e91',
'episode': 'Mnet Asian Music Awards 2015 - Part 3',
'episode_number': 4,
'thumbnail': 're:^https?://.*\.jpg',
'timestamp': 1450213200,
'upload_date': '20151215',
'duration': 5602,
},
'params': {
# m3u8 download
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url).replace('/', '.')
try:
info = self._extract_feed_info(
'http://www.dramafever.com/amp/episode/feed.json?guid=%s' % video_id)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
raise ExtractorError(
'Currently unavailable in your country.', expected=True)
raise
series_id, episode_number = video_id.split('.')
episode_info = self._download_json(
# We only need a single episode info, so restricting page size to one episode
# and dealing with page number as with episode number
r'http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_number=%s&page_size=1'
% (self._consumer_secret, series_id, episode_number),
video_id, 'Downloading episode info JSON', fatal=False)
if episode_info:
value = episode_info.get('value')
if isinstance(value, list):
for v in value:
if v.get('type') == 'Episode':
subfile = v.get('subfile') or v.get('new_subfile')
if subfile and subfile != 'http://www.dramafever.com/st/':
info.setdefault('subtitles', {}).setdefault('English', []).append({
'ext': 'srt',
'url': subfile,
})
episode_number = int_or_none(v.get('number'))
episode_fallback = 'Episode'
if episode_number:
episode_fallback += ' %d' % episode_number
info['episode'] = v.get('title') or episode_fallback
info['episode_number'] = episode_number
break
return info
class DramaFeverSeriesIE(DramaFeverBaseIE):
IE_NAME = 'dramafever:series'
_VALID_URL = r'https?://(?:www\.)?dramafever\.com/drama/(?P<id>[0-9]+)(?:/(?:(?!\d+(?:/|$)).+)?)?$'
_TESTS = [{
'url': 'http://www.dramafever.com/drama/4512/Cooking_with_Shin/',
'info_dict': {
'id': '4512',
'title': 'Cooking with Shin',
'description': 'md5:84a3f26e3cdc3fb7f500211b3593b5c1',
},
'playlist_count': 4,
}, {
'url': 'http://www.dramafever.com/drama/124/IRIS/',
'info_dict': {
'id': '124',
'title': 'IRIS',
'description': 'md5:b3a30e587cf20c59bd1c01ec0ee1b862',
},
'playlist_count': 20,
}]
_PAGE_SIZE = 60 # max is 60 (see http://api.drama9.com/#get--api-4-episode-series-)
def _real_extract(self, url):
series_id = self._match_id(url)
series = self._download_json(
'http://www.dramafever.com/api/4/series/query/?cs=%s&series_id=%s'
% (self._consumer_secret, series_id),
series_id, 'Downloading series JSON')['series'][series_id]
title = clean_html(series['name'])
description = clean_html(series.get('description') or series.get('description_short'))
entries = []
for page_num in itertools.count(1):
episodes = self._download_json(
'http://www.dramafever.com/api/4/episode/series/?cs=%s&series_id=%s&page_size=%d&page_number=%d'
% (self._consumer_secret, series_id, self._PAGE_SIZE, page_num),
series_id, 'Downloading episodes JSON page #%d' % page_num)
for episode in episodes.get('value', []):
episode_url = episode.get('episode_url')
if not episode_url:
| continue
entries.append(self.url_result(
compat_urlparse.urljoin(url, episode_u | rl),
'DramaFever', episode.get('guid')))
if page_num == episodes['num_pages']:
break
return self.playlist_result(entries, series_id, title, description)
|
jart/tensorflow | tensorflow/python/keras/engine/training_utils.py | Python | apache-2.0 | 32,266 | 0.006446 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related utilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import math
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.ops import math_ops
def _map_nested(data, func):
"""Maps each nested element using func."""
if isinstance(data, list):
return [_map_nested(nested_data, func) for nested_data in data]
elif isinstance(data, tuple):
return tuple(_map_nested(nested_data, func) for nested_data in data)
elif isinstance(data, dict):
return {
k: _map_nested(nested_data, func) for k, nested_data in data.items()
}
else:
return func(data)
def _nested_all(data, cond_func):
"""Checks if all elements in a nested structure satisfy cond_func."""
if isinstance(data, (tuple, list)):
return all([_nested_all(nested_data, cond_func) for nested_data in data])
elif isinstance(data, dict):
return all(
[_nested_all(nested_data, cond_func) for nested_data in data.values()])
else:
return cond_func(data)
def _nested_any(data, cond_func):
"""Checks if any nested_elements in a nested structure satisfy cond_func."""
if isinstance(data, (tuple, list)):
return any([_nested_any(nested_data, cond_func) for nested_data in data])
elif isinstance(data, dict):
return any(
[_nested_any(nested_data, cond_func) for nested_data in data.values()])
else:
return cond_func(data)
def _convert_lists_to_tuples(data):
"""Converts all lists to tuples, since Datasets expect tuples."""
if isinstance(data, (tuple, list)):
return tuple(_convert_lists_to_tuples(nested_data) for nested_data in data)
elif isinstance(data, dict):
return {
k: _convert_lists_to_tuples(nested_data)
for k, nested_data in data.items()
}
else:
return data
def _get_batch_axis_size(data):
"""Returns batch axis shape for nested data."""
if isinstance(data, (tuple, list)):
return _get_batch_axis_size(data[0])
elif isinstance(data, dict):
return _get_batch_axis_size(list(data.values()))
else:
return int(data.shape[0])
def convert_to_iterator(x=None,
y=None,
sample_weights=None,
batch_size=None,
steps_per_epoch=None,
epochs=1,
shuffle=False):
"""Converts NumPy arrays or EagerTensors to an EagerIterator.
Combines all provided data into a single EagerIterator.
Arguments:
x: NumPy array or EagerTensor, or list of Numpy arrays or EagerTensors
representing inputs to a model.
y: Optional. NumPy array or EagerTensor, or list of Numpy arrays or
EagerTensors representing targets of a model.
sample_weights: Optional NumPy array or EagerTensor representing sample
weights.
batch_size: Used to batch data and calculate how many steps EagerIterator
should take per epoch.
steps_per_epoch: If provided, how many steps EagerIterator should take per
epoch.
epochs: Epochs to repeat iterator for.
shuffle: Whether to shuffle data after each epoch.
Raises:
ValueError: if steps_per_epoch cannot be calculated from the data
provided.
Returns:
(Iterator, steps_per_epoch).
"""
if isinstance(x, iterator_ops.EagerIterator):
return x, steps_per_epoch
if not _nested_any(sample_weights, lambda x: x is None):
data = (x, y, sample_weights)
elif not _nested_any(y, lambda x: x is None):
data = (x, y)
else:
# always wrap in a tuple, so we know y, sample_weights weren't set
# even when x has multiple elements
data = (x,)
data = _convert_lists_to_tuples(data)
if steps_per_epoch is None and batch_size is not None:
num_samples = _get_batch_axis_size(data)
steps_per_epoch = int(math.ceil(num_samples / batch_size))
if steps_per_epoch is None:
raise ValueError('Could not determine steps_per_epoch.'
'Please provide either batch_size or'
'steps_per_epoch.')
# TODO(omalleyt) for | NumPy arrays in graph mode
# placeholder ops should be used
# this i | s only ideal for eager mode
dataset = dataset_ops.Dataset.from_tensor_slices(data)
if batch_size is not None:
dataset = dataset.batch(batch_size)
if shuffle:
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.repeat(epochs)
iterator = dataset.make_one_shot_iterator()
return iterator, steps_per_epoch
def check_num_samples(ins,
batch_size=None,
steps=None,
steps_name='steps'):
"""Determine the number of samples provided for training and evaluation.
The number of samples is not defined when running with `steps`,
in which case the number of samples is set to `None`.
Arguments:
ins: List of tensors to be fed to the Keras function.
batch_size: Integer batch size or `None` if not defined.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
steps_name: The public API's parameter name for `steps`.
Raises:
ValueError: when `steps` is `None` and the attribute `ins.shape`
does not exist. Also raises ValueError when `steps` is not `None`
and `batch_size` is not `None` because they are mutually
exclusive.
Returns:
When steps is `None`, returns the number of samples to be
processed based on the size of the first dimension of the
first input numpy array. When steps is not `None` and
`batch_size` is `None`, returns `None`.
Raises:
ValueError: In case of invalid arguments.
"""
if steps is not None and batch_size is not None:
raise ValueError(
'If ' + steps_name + ' is set, the `batch_size` must be None.')
if check_steps_argument(ins, steps, steps_name):
return None
if hasattr(ins[0], 'shape'):
return int(ins[0].shape[0])
return None # Edge case where ins == [static_learning_phase]
def standardize_single_array(x):
if x is None:
return None
elif tensor_util.is_tensor(x):
return x
elif x.ndim == 1:
x = np.expand_dims(x, 1)
return x
def standardize_input_data(data,
names,
shapes=None,
check_batch_axis=True,
exception_prefix=''):
"""Normalizes inputs and targets provided by users.
Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
Arguments:
data: User-provided input data (polymorphic).
names: List of expected array names.
shapes: Optional list of expected array shapes.
check_batch_axis: Boolean; whether to check that
the batch axis of the arrays matches the expected
value found in `shapes`.
exce |
drewrobb/marathon-python | marathon/models/deployment.py | Python | mit | 3,907 | 0.003071 | from .base import MarathonObject, MarathonResource
class MarathonDeployment(MarathonResource):
"""Marathon Application resource.
See: https://mesosphere.github.io/marathon/docs/rest-api.html#deployments
https://mesosphere.github.io/marathon/docs/generated/api.html#v2_deployments_get
:param list[str] affected_apps: list of affected app ids
:param current_actions: current actions
:type current_actions: list[:class:`marathon.models.deployment.MarathonDeploymentAction`] or list[dict]
:param int current_step: current step
:param str id: deployment id
:param steps: deployment steps
:type steps: list[:class:`marathon.models.deployment.MarathonDeploymentAction`] or list[dict]
:param int total_steps: total number of steps
:param str version: version id
"""
def __init__(
self, affected_apps=None, current_actions=None, current_step=None, id=None, steps=None,
total_steps=None, version=None):
self.affected_apps = affected_apps
self.current_actions = [
a if isinstance(
a, MarathonDeploymentAction) else MarathonDeploymentAction().from_json(a)
for a in (current_actions or [])
]
self.current_step = current_step
self.id = id
self.steps = [self.parse_deployment_step(step) for step in (steps or [])]
self.total_steps = total_steps
self.version = version
| def parse_deployment_step(self, step):
if step.__class__ == dict:
# This is what Marathon 1.0.0 returns: steps
return MarathonDeploymentStep().from_json(step)
elif step.__class__ == list:
# This is Marathon < 1.0.0 style, a list of actions
return [s if isinstance(s, MarathonDeploymentAction) else MarathonDeployme | ntAction().from_json(s) for s in step]
else:
return step
class MarathonDeploymentAction(MarathonObject):
"""Marathon Application resource.
See: https://mesosphere.github.io/marathon/docs/rest-api.html#deployments
:param str action: action
:param str app: app id
:param str apps: app id (see https://github.com/mesosphere/marathon/pull/802)
:param type readiness_check_results: Undocumented
"""
def __init__(self, action=None, app=None, apps=None, type=None, readiness_check_results=None):
self.action = action
self.app = app
self.apps = apps
self.type = type # TODO: Remove builtin shadow
self.readiness_check_results = readiness_check_results # TODO: The docs say this is called just "readinessChecks?"
class MarathonDeploymentPlan(MarathonObject):
def __init__(self, original=None, target=None,
steps=None, id=None, version=None):
self.original = MarathonDeploymentOriginalState.from_json(original)
self.target = MarathonDeploymentTargetState.from_json(target)
self.steps = [MarathonDeploymentStep.from_json(x) for x in steps]
self.id = id
self.version = version
class MarathonDeploymentStep(MarathonObject):
def __init__(self, actions=None):
self.actions = [a if isinstance(a, MarathonDeploymentAction) else MarathonDeploymentAction.from_json(a) for a in (actions or [])]
class MarathonDeploymentOriginalState(MarathonObject):
def __init__(self, dependencies=None,
apps=None, id=None, version=None, groups=None):
self.apps = apps
self.groups = groups
self.id = id
self.version = version
self.dependencies = dependencies
class MarathonDeploymentTargetState(MarathonObject):
def __init__(self, groups=None, apps=None,
dependencies=None, id=None, version=None):
self.apps = apps
self.groups = groups
self.id = id
self.version = version
self.dependencies = dependencies
|
wjo1212/aliyun-log-python-sdk | aliyun/log/cursor_time_response.py | Python | mit | 808 | 0.001238 | #!/usr/bin/env python
# encoding: utf-8
# Copyright (C) Alibaba Cloud Computing
# All rights reserved.
from .logresponse import LogResponse
class GetCursorTimeResponse(LogResponse):
""" The response of the get_cursor_time API from log.
:type header: dict
:param header: GetCursorTime | Response HTTP response header
:type resp: dict
:param resp: the HTTP response body
"""
def __init__(self, resp, header):
LogResponse.__init__(self, header, resp)
self.cursor_time = resp['cursor_time']
def get_cursor_time(self):
"""
:return:
"" | "
return self.cursor_time
def log_print(self):
print('GetCursorTimeResponse')
print('headers:', self.get_all_headers())
print('cursor_time:', self.cursor_time)
|
PyCon/pycon | pycon/sponsorship/admin.py | Python | bsd-3-clause | 6,184 | 0.002264 | from urllib import quote
from django import forms
from django.contrib import admin
from django.db import models
from django.utils.html import escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from pycon.sponsorship.models import SponsorLevel, SponsorPackage, Sponsor, Benefit, \
BenefitLevel, BenefitPackage, SponsorBenefit, BENEFITS
from pycon.sponsorship.views import email_selected_sponsors_action
class BenefitLevelInline(admin.TabularInline):
model = BenefitLevel
extra = 0
class BenefitPackageInline(admin.TabularInline):
model = BenefitPackage
extra = 0
class SponsorBenefitInline(admin.StackedInline):
model = SponsorBenefit
extra = 0
fieldsets = [
(None, {
"fields": [
("benefit", "active"),
("max_words", "other_limits"),
"text",
"upload",
]
})
]
class SponsorAdmin(admin.ModelAdmin):
save_on_top = True
actions = [email_selected_sponsors_action]
list_per_page = 1000000 # Do not limit sponsors per page, just one big page
fieldsets = [
(None, {
"fields": ["name", "applicant", "level", ("active", "approval_time"),
"packages", "external_url", "display_url", "twitter_username",
"annotation", "web_description", "web_logo", "print_logo",]
}),
("Desired benefits", {
"fields": ["wants_table", "wants_booth", "small_entity_discount"],
}),
("Sponsor Data", {
"fields": ["booth_number", "job_fair_participant",
"job_fair_table_number", "registration_promo_codes",
"expo_promo_codes", "additional_discounted_registration_promo_codes",
"a_la_carte_registration_promo_codes"],
}),
("Contact Information", {
"fields": ["contact_name", "contact_emails", "contact_phone",
"contact_address"],
}),
("Metadata", {
"fields": ["added"],
"classes": ["collapse"],
})
]
formfield_overrides = {
models.ManyToManyField: {'widget': forms.CheckboxSelectMultiple},
}
inlines = [SponsorBenefitInline]
# NB: We add to list_display and list_filter below
list_display = ["name", "level", "contact", "applicant_field", "active",
"approval_time"]
list_filter = ["level", "active"]
readonly_fields = ["approval_time"]
def contact(self, sponsor):
# comma-separated emails in mailto: should work: https://www.ietf.org/rfc/rfc2368.txt
# but the commas need to be URL-quoted
return format_html( |
u'<a href="mailto:{}">{}</a>',
quote(u','.join(sponsor.contact_emails)),
sponsor.contact_name
)
def applicant_field(self, sponsor):
name = sponsor.applicant.get_full_name()
email = sponsor.applicant.email
return mark_safe('<a href="mailto:%s">%s</a>' % (escape(email), escape(name)))
applicant_field.short_ | description = _(u"Applicant")
def get_form(self, *args, **kwargs):
# @@@ kinda ugly but using choices= on NullBooleanField is broken
form = super(SponsorAdmin, self).get_form(*args, **kwargs)
form.base_fields["active"].widget.choices = [
(u"1", _(u"unreviewed")),
(u"2", _(u"approved")),
(u"3", _(u"rejected"))
]
applicant_qs = form.base_fields['applicant'].queryset
applicant_qs = applicant_qs.order_by('first_name', 'last_name', 'pk')
form.base_fields['applicant'].queryset = applicant_qs
return form
# Define accessor functions for our benefit fields and add them to
# list_display, so we can sort on them and give them sensible names.
# Add the fields to list_filters while we're at it.
for benefit in BENEFITS:
benefit_name = benefit['name']
field_name = benefit['field_name']
def func_generator(ben):
def column_func(obj):
return getattr(obj, ben['field_name'])
column_func.short_description = ben['column_title']
column_func.boolean = True
column_func.admin_order_field = ben['field_name']
return column_func
list_display.append(func_generator(benefit))
list_filter.append(field_name)
def save_related(self, request, form, formsets, change):
super(SponsorAdmin, self).save_related(request, form, formsets, change)
obj = form.instance
obj.save()
class BenefitAdmin(admin.ModelAdmin):
inlines = [BenefitLevelInline, BenefitPackageInline]
list_display = ['name', 'type', 'levels', 'packages']
list_filter = ['benefit_levels__level', 'benefit_packages__package']
def levels(self, benefit):
return u", ".join(l.level.name for l in benefit.benefit_levels.all())
def packages(self, benefit):
return u", ".join(p.package.name for p in benefit.benefit_packages.all())
class SponsorLevelAdmin(admin.ModelAdmin):
list_display = ['name', 'available', 'order', 'cost', 'benefits']
list_editable = ['order']
list_filter = ['conference', 'benefit_levels__benefit']
inlines = [BenefitLevelInline]
def benefits(self, obj):
return ', '.join(obj.benefit_levels.values_list('benefit__name', flat=True))
class SponsorPackageAdmin(admin.ModelAdmin):
list_display = ['name', 'available', 'order', 'cost', 'benefits']
list_editable = ['order']
list_filter = ['conference', 'benefit_packages__benefit']
inlines = [BenefitPackageInline]
def benefits(self, obj):
return ', '.join(obj.benefit_packages.values_list('benefit__name', flat=True))
admin.site.register(SponsorLevel, SponsorLevelAdmin)
admin.site.register(SponsorPackage, SponsorPackageAdmin)
admin.site.register(Sponsor, SponsorAdmin)
admin.site.register(Benefit, BenefitAdmin)
admin.site.register(SponsorBenefit,
list_display=('benefit', 'sponsor', 'active', '_is_complete'))
|
alex/pyvcs | tests/andrew_tests.py | Python | bsd-3-clause | 1,801 | 0.004997 | #!/usr/bin/env python
from datetime import datetime
import unit | test
from pyvcs.backends import get_backend
from pyvcs.exceptions import FileDoesNotExist, FolderDoes | NotExist
class BzrTest(unittest.TestCase):
def setUp(self):
bzr = get_backend('bzr')
self.repo = bzr.Repository('/home/andrew/junk/django/')
def test_commits(self):
commit = self.repo.get_commit_by_id('6460')
self.assert_(commit.author.startswith('gwilson'))
self.assertEqual(commit.time, datetime(2008, 12, 23, 18, 25, 24, 19000))
self.assert_(commit.message.startswith('Fixed #8245 -- Added a LOADING flag'))
self.assertEqual(commit.files, ['tests/regressiontests/bug8245', 'tests/regressiontests/bug8245/__init__.py', 'tests/regressiontests/bug8245/admin.py', 'tests/regressiontests/bug8245/models.py', 'tests/regressiontests/bug8245/tests.py', 'django/contrib/admin/__init__.py'])
def test_recent_commits(self):
results = self.repo.get_recent_commits()
def test_list_directory(self):
files, folders = self.repo.list_directory('tests/', '7254')
self.assertEqual(files, ['runtests.py', 'urls.py'])
self.assertEqual(folders, ['modeltests', 'regressiontests', 'templates'])
self.assertRaises(FolderDoesNotExist, self.repo.list_directory, 'tests/awesometests/')
def test_file_contents(self):
contents = self.repo.file_contents('django/db/models/fields/related.py',
'7254')
self.assertEqual(contents.splitlines()[:2], [
'from django.db import connection, transaction',
'from django.db.backends import util'
])
self.assertRaises(FileDoesNotExist, self.repo.file_contents, 'django/db/models/jesus.py')
if __name__ == '__main__':
unittest.main()
|
andmos/ansible | lib/ansible/playbook/role/metadata.py | Python | gpl-3.0 | 4,362 | 0.001834 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleParserError, AnsibleError
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems, string_types
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.helpers import load_list_of_roles
from ansible.playbook.role.include import RoleInclude
from ansible.playbook.role.requirement import RoleRequirement
__all__ = ['RoleMetadata']
class RoleMetadata(Base):
'''
This class wraps the parsing and validation of the optional metadata
within each Role (meta/main.yml).
'''
_allow_duplicates = FieldAttribute(isa='bool', default=False)
_dependencies = FieldAttribute(isa='list', default=list)
_galaxy_info = FieldAttribute(isa='GalaxyInfo')
def __init__(self, owner=None):
self._owner = owner
super(RoleMetadata, self).__init__()
@staticmethod
def load(data, owner, variable_manager=None, loader=None):
'''
Returns a new RoleMetadata object based on the datastructure passed in.
'''
if not isinstance(data, dict):
raise AnsibleParserError("the 'meta/main.yml' for role % | s is not a dictionary" % owner.get_name())
m = RoleMetadata(owner=owner).load_data(data, variable_manager=variable_manager, loader=loader)
return | m
def _load_dependencies(self, attr, ds):
'''
This is a helper loading function for the dependencies list,
which returns a list of RoleInclude objects
'''
roles = []
if ds:
if not isinstance(ds, list):
raise AnsibleParserError("Expected role dependencies to be a list.", obj=self._ds)
for role_def in ds:
if isinstance(role_def, string_types) or 'role' in role_def or 'name' in role_def:
roles.append(role_def)
continue
try:
# role_def is new style: { src: 'galaxy.role,version,name', other_vars: "here" }
def_parsed = RoleRequirement.role_yaml_parse(role_def)
if def_parsed.get('name'):
role_def['name'] = def_parsed['name']
roles.append(role_def)
except AnsibleError as exc:
raise AnsibleParserError(to_native(exc), obj=role_def, orig_exc=exc)
current_role_path = None
if self._owner:
current_role_path = os.path.dirname(self._owner._role_path)
try:
return load_list_of_roles(roles, play=self._owner._play, current_role_path=current_role_path, variable_manager=self._variable_manager,
loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed list of role dependencies was encountered.", obj=self._ds, orig_exc=e)
def _load_galaxy_info(self, attr, ds):
'''
This is a helper loading function for the galaxy info entry
in the metadata, which returns a GalaxyInfo object rather than
a simple dictionary.
'''
return ds
def serialize(self):
return dict(
allow_duplicates=self._allow_duplicates,
dependencies=self._dependencies,
)
def deserialize(self, data):
setattr(self, 'allow_duplicates', data.get('allow_duplicates', False))
setattr(self, 'dependencies', data.get('dependencies', []))
|
pratikmallya/heat | heat/engine/lifecycle_plugin.py | Python | apache-2.0 | 2,141 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class LifecyclePlugin(object):
"""Base class for pre-op and post-op work on a stack.
Implementations should extend this class and override the methods.
"""
def do_pre_op(self, cnxt, stack, current_stack=None, action=None):
"""Method to be run by heat before stack operations."""
pass
def do_post_op(self, cnxt, stack, current_stack=None, action=None,
is_stack_failure=False):
"""Method to be run by heat after stack operations, including failures.
On failure to execute all the registered pre_ops, this method will be
called if and only if the corresponding pre_op was successfully called.
On failures of the actual stack operation, this method will
be called if all the pre operations were successfully called.
"""
pass
de | f get_ordinal(self):
"""Order class instances for pre and post operation execution.
The values returned by get_ordinal are used to create a partial order
for pre and post operation method invocations. The default ordinal
value of 100 may be overridden.
If class1inst.ordinal() < class2inst.ordinal(), then the method on
class1inst will be executed before the method on c | lass2inst.
If class1inst.ordinal() > class2inst.ordinal(), then the method on
class1inst will be executed after the method on class2inst.
If class1inst.ordinal() == class2inst.ordinal(), then the order of
method invocation is indeterminate.
"""
return 100
|
openweave/openweave-core | src/test-apps/happy/tests/service/wdmNext/test_weave_wdm_next_service_mutual_subscribe_08.py | Python | apache-2.0 | 2,898 | 0.010697 | #!/usr/bin/env python3
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Calls Weave WDM mutual subscribe between mock device and real service.
# F04: Mutual Subscribe: Root path. Null Version. Idle. Publisher in initiator aborts
# M08: Stress Mutual Subscribe: Root path. Null Version. Idle. Publisher in initiator aborts
#
from __future__ import absolute_i | mport
from __future__ import print_function
import unittest
from weave_wdm_next_test_service_base import weave_wdm_next_test_service_base
class test_weave_wdm_next_service_mutual_subscribe_08(weave_wdm_next_test_service_base):
def test_weave_wdm_next_service_mutual_subscribe_08(self):
wdm_next_args = {}
wdm_next_args['wdm_option'] = "mutual_subscribe"
wdm_next_args['final_client_status'] = 3
wdm_next_args['enable_client_flip'] = 0
wdm_next_args['test_cl | ient_iterations'] = 10
wdm_next_args['client_clear_state_between_iterations'] = True
wdm_next_args['client_log_check'] = [('bound mutual subscription is going away', wdm_next_args['test_client_iterations']),
('Handler\[0\] \[(ALIVE|CONFM)\] AbortSubscription Ref\(\d+\)', wdm_next_args['test_client_iterations']),
('Client->kEvent_OnNotificationProcessed', wdm_next_args['test_client_iterations']),
('Client\[0\] moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['test_tag'] = self.__class__.__name__
wdm_next_args['test_case_name'] = ['Wdm-NestService-F04: Mutual Subscribe: Root path. Null Version. Idle. Publisher in initiator aborts',
'Wdm-NestService-M08: Stress Mutual Subscribe: Root path. Null Version. Idle. Publisher in initiator aborts']
print('test file: ' + self.__class__.__name__)
print("weave-wdm-next test F04 and M08")
super(test_weave_wdm_next_service_mutual_subscribe_08, self).weave_wdm_next_test_service_base(wdm_next_args)
if __name__ == "__main__":
unittest.main()
|
nixon/pretty | setup.py | Python | bsd-2-clause | 725 | 0 | from setuptoo | ls import find_packages
from distutils.core import setup
from pretty_times import VERSION
REQUIREMENTS = []
TEST_REQUIREMENTS = [
'cover | age',
'pep8',
'pyflakes',
'nose',
'nosexcover',
]
def do_setup():
setup(
name="pretty-times",
version=VERSION,
author="nixon",
description="pretty_times provides fixes for the py-pretty library.",
long_description=open('README.txt', 'r').read(),
url="https://github.com/nixon/pretty-times",
packages=find_packages(exclude=['example']),
install_requires=REQUIREMENTS,
tests_require=TEST_REQUIREMENTS,
zip_safe=False,
)
if __name__ == '__main__':
do_setup()
|
aldanor/SocketIO-Flask-Debug | app.py | Python | mit | 1,749 | 0.000572 | from gevent import monkey
from socketio.server import | SocketIOServer
from socketio import socketio_manage
from flask import Flask, request, render_template, Response
from werkzeug.serving import run_with_reloader
from socketio.namespace import BaseNamespace
from debu | gger import SocketIODebugger
monkey.patch_all()
class Namespace(BaseNamespace):
def __init__(self, *args, **kwargs):
print '\nNamespace.__init__(args=%s, kwargs=%s)' % (
repr(args), repr(kwargs))
super(Namespace, self).__init__(*args, **kwargs)
def recv_connect(self):
print '\nNamespace.recv_connect()'
def recv_disconnect(self):
print '\nNamespace.recv_disconnect()'
self.disconnect()
def on_foo(self, msg=None):
print '\nNamespace.on_foo(): msg=%s' % repr(msg)
self.emit('bar', {'data': 'some server data'})
def on_debug(self, msg=None):
print '\nNamespace.on_debug()'
raise Exception('in-namespace exception')
def index():
print '\nindex()'
return render_template('index.html')
def debug():
print '\ndebug()'
raise Exception('in-flask exception')
def run_socketio(path):
print '\nrun_socketio(path=%s)' % repr(path)
socketio_manage(request.environ, {'/api': Namespace}, request)
return Response()
if __name__ == '__main__':
app = Flask(__name__)
app.route('/')(index)
app.route('/debug')(debug)
app.route('/socket.io/<path:path>')(run_socketio)
app.debug = True
app = SocketIODebugger(app, evalex=True, namespace=Namespace)
server = SocketIOServer(('', 8080), app,
resource='socket.io', policy_server=False)
print '\nserver.serve_forever()'
run_with_reloader(server.serve_forever)()
|
kostya0shift/SyncToGit | synctogit/Config.py | Python | mit | 1,728 | 0.001157 | from __future__ import absolute_import
try:
import configparser
except:
import ConfigParser as configparser
class _NotSet(object):
pass
class ConfigException(Exception):
pass
class Config:
def __init__(self, conffile):
self.conffile = conffile
self.conf = configparser.ConfigParser()
with open(self.conffile, 'r') as f:
self.conf.readfp(f)
def _get(self, section, key, getter, default=_NotSet()):
| if not self.conf.has_section(section):
if isinstance(default, _NotSet):
raise ConfigException('Section %s is missing' % section)
else:
return default
if not self.conf.has_option(section, key):
if isinstance(default, _NotSet):
raise ConfigException('Key %s from section %s is missing' % (key, section))
else:
v = default
el | se:
v = getter(section, key)
return v
def get_int(self, section, key, default=_NotSet()):
v = self._get(section, key, self.conf.getint, default)
return int(v)
def get_string(self, section, key, default=_NotSet()):
v = self._get(section, key, self.conf.get, default)
return "" + v
def get_boolean(self, section, key, default=_NotSet()):
v = self._get(section, key, self.conf.getboolean, default)
return bool(v)
def _write(self):
with open(self.conffile, 'w') as f:
self.conf.write(f)
def set(self, section, key, value):
self.conf.set(section, key, value)
self._write()
def unset(self, section, key):
self.conf.remove_option(section, key)
self._write()
|
denfromufa/clrmagic | clrmagic.py | Python | mit | 2,540 | 0.014567 |
import clr
def create_cs_function(name, code, dependencies = None):
clr.AddReference("clrmagic")
from MagicIPython import MagicCS
from System import String
from System.Collections.Generic import List
if dependencies is not None and len(dependencies) > 0 :
myarray = List[String]()
for i,d in enumerate(dependencies):
myarray.Add( d )
myarray = myarray.ToArray()
else:
myarray = List[String]().ToArray()
obj = MagicCS.CreateFunction(name, code, myarray)
return lambda *params: run_cs_function(obj, params)
def run_cs_function(func, params):
clr.AddReference("clrmagic")
from MagicIPython import MagicCS
from System.Collections.Generic import List
from System import Object
par = List[Object]()
for p in params :
par.Add ( p )
return MagicCS.RunFunction(func, par.ToArray())
import sys
from IPython.core.magic import Magics, magics_class, line_magic, cell_magic
from IPython.core.magic import line_cell_magic
from IPython.core.display import HTML
from IPython.core import display
@magics_class
class CustomMagics(Magics):
@cell_magic
def CS(self, line, cell):
| """
Defines command ``%%CS``.
"""
#if not sys.platform.startswith("win"):
# raise Exception("Works only on Windows.")
#from clrfunction import create_cs_function
if line is not None:
spl = line.strip().split(" ")
name = spl[0]
deps = " ".join(spl[1:]) if len(sp | l) > 1 else ""
deps = deps.split(";")
if name == "-h":
print( "Usage: "
" %%CS function_name dependency1;dependency2"
" function code")
else :
try:
f = create_cs_function(name, cell, deps)
except Exception as e :
print(e)
return
if self.shell is not None:
self.shell.user_ns[name] = f
return f
def load_ipython_extension(ip):
"""
register magics function, can be called from a notebook
"""
#ip = get_ipython()
ip.register_magics(CustomMagics)
# enable C# (CSHARP) highlight
patch = ("IPython.config.cell_magic_highlight['clrmagic'] = "
"{'reg':[/^%%CS/]};")
js = display.Javascript(data=patch,
lib=["https://github.com/codemirror/CodeMirror/blob/master/mode/clike/clike.js"])
#register_magics()
|
praekelt/molo-gem | gem/admin.py | Python | bsd-2-clause | 3,379 | 0 | from collections import Counter
from django.contrib import admin
from django.contrib.auth.models import User
from gem.models import GemCommentReport, Invite
from gem.rules import ProfileDataRule, CommentCountRule
from molo.commenting.admin import MoloCommentAdmin, MoloCommentsModelAdmin
from molo.commenting.models import MoloComment
from molo.profiles.models import UserProfile
from molo.forms.models import FormsSegmentUserGroup
from wagtail.contrib.modeladmin.helpers import PermissionHelper
from wagtail.contrib.modeladmin.options import (
ModelAdmin as WagtailModelAdmin, modeladmin_register)
from wagtail.contrib.modeladmin.views import CreateView
class InviteAdmin(WagtailModelAdmin):
model = Invite
menu_order = 600
menu_icon = 'mail'
menu_label = 'Invites'
add_to_settings_menu = True
search_fields = ['email']
list_filter = ['is_accepted', 'created_at']
list_display = [
'email', 'created_at', 'modified_at', 'is_accepted', 'user',
]
class InviteCreateView(CreateView):
def form_valid(self, form):
site = self.request._wagtail_site
if not form.instance.user:
form.instance.user = self.re | quest.user
if not form.instance.site:
form.instance.site = site
return super().form_valid(form)
create_view_class = InviteCreateView
modeladmin_register(InviteAdmin)
class UserProfileInlin | eModelAdmin(admin.StackedInline):
model = UserProfile
can_delete = False
class GemCommentReportModelAdmin(admin.StackedInline):
model = GemCommentReport
can_delete = True
max_num = 0
actions = None
readonly_fields = ["user", "reported_reason", ]
class FormsSegementUserPermissionHelper(PermissionHelper):
def __init__(self, model, inspect_view_enabled=False):
model = FormsSegmentUserGroup
super(FormsSegementUserPermissionHelper, self).__init__(
model, inspect_view_enabled
)
class GemCommentModelAdmin(MoloCommentsModelAdmin):
list_display = (
'comment', 'parent_comment', 'moderator_reply', 'content', '_user',
'is_removed', 'is_reported', 'reported_count', 'reported_reason',
'submit_date', 'country')
def reported_reason(self, obj):
all_reported_reasons = list(
GemCommentReport.objects.filter(comment=obj.pk).values_list(
'reported_reason', flat=True))
breakdown_of_reasons = []
for value, count in Counter(all_reported_reasons).most_common():
reason = '%s, (%s)' % (value, count)
breakdown_of_reasons.append(reason)
return breakdown_of_reasons
def reported_count(self, obj):
return GemCommentReport.objects.filter(comment=obj.pk).count()
class GemCommentReportAdmin(MoloCommentAdmin):
inlines = (GemCommentReportModelAdmin,)
class ProfileDataRuleAdminInline(admin.TabularInline):
"""
Inline the ProfileDataRule into the administration
interface for segments.
"""
model = ProfileDataRule
class CommentCountRuleAdminInline(admin.TabularInline):
"""
Inline the CommentCountRule into the administration
interface for segments.
"""
model = CommentCountRule
admin.site.unregister(User)
admin.site.unregister(MoloComment)
admin.site.register(MoloComment, GemCommentReportAdmin)
|
benjamincongdon/adept | inventory.py | Python | mit | 6,093 | 0.005744 | from item import Item
import random
from floatingText import FloatingText,FloatingTextManager
from playerConsole import PlayerConsole
from serializable import Serializable
from eventRegistry import Event
from eventRegistry import EventRegistry
class Inventory(Serializable):
INV_SIZE_X = 10
INV_SIZE_Y = 3
BASE_EVENT_TYPE = 'inv_'
def __init__(self, **kwargs):
self.items = kwargs.get("items",[[None]*3 for _ in range(10)])
self.hotbar = kwargs.get("hotbar",[None]*10)
self.hotbarSelection = kwargs.get("hotbarSelection",0)
self.update()
def addItem(self, item):
EventRegistry.registerEvent(Event(
Inventory.BASE_EVENT_TYPE + 'add',
| {'item':item}
))
for x in range(Inventory.INV_SIZE_X):
if self.hotbar[x] != None and self.hotbar[x].name == item | .name:
self.hotbar[x].quantity += item.quantity
return
if self.hotbar[x] == None and isinstance(item, Item):
self.hotbar[x] = item
return
for x in range(Inventory.INV_SIZE_X):
for y in range(Inventory.INV_SIZE_Y):
if self.items[x][y] != None and self.items[x][y].name == item.name:
self.items[x][y].quantity += item.quantity
return
if self.items[x][y] == None and isinstance(item, Item):
self.items[x][y] = item
return
def removeItem(self, item):
EventRegistry.registerEvent(Event(
Inventory.BASE_EVENT_TYPE + 'remove',
{'item':item}
))
for x in range(Inventory.INV_SIZE_X):
if self.hotbar[x] == item:
self.hotbar[x] = None
return
for x in range(Inventory.INV_SIZE_X):
for y in range(Inventory.INV_SIZE_Y):
if self.items[x][y] == item:
self.items[x][y] = None
return
def removeItemQuantity(self, item, quantity):
"""
NOTE: Takes an item NAME as the 'item' param, not an Item object.
"""
EventRegistry.registerEvent(Event(
Inventory.BASE_EVENT_TYPE + 'remove_quantity',
{'item_name':item,
'quantity':quantity}
))
quantityRemoved = 0;
for x in range(Inventory.INV_SIZE_X):
if self.hotbar[x] is not None and self.hotbar[x].name == item:
currItem = self.hotbar[x]
if currItem.quantity > quantity:
currItem.quantity -= quantity
quantityRemoved = quantity
elif currItem.quantity <= quantity:
quantityRemoved += currItem.quantity
self.hotbar[x] = None
if(quantityRemoved >= quantity):
return
for x in range(Inventory.INV_SIZE_X):
for y in range(Inventory.INV_SIZE_Y):
if self.items[x][y] is not None and self.items[x][y].name == item:
currItem = self.items[x][y]
if currItem.quantity > quantity:
currItem.quantity -= quantity
quantityRemoved = quantity
elif currItem.quantity <= quantity:
quantityRemoved += currItem.quantity
self.items[x][y] = None
if(quantityRemoved >= quantity):
return
def removeHotbarItem(self,item):
EventRegistry.registerEvent(Event(
Inventory.BASE_EVENT_TYPE + 'remove',
{'item':item}
))
for x in range(Inventory.INV_SIZE_X):
if self.hotbar[x] == item:
self.hotbar[x] = None
return
def placeItem(self, item, pos):
EventRegistry.registerEvent(Event(
Inventory.BASE_EVENT_TYPE + 'add',
{'item':item}
))
if isinstance(item,Item):
oldItem = self.items[int(pos[0])][int(pos[1])]
self.items[int(pos[0])][int(pos[1])] = item
return oldItem
def placeItemInHotbar(self, item, pos):
EventRegistry.registerEvent(Event(
Inventory.BASE_EVENT_TYPE + 'add',
{'item':item}
))
if isinstance(item,Item):
oldItem = self.hotbar[pos[0]]
self.hotbar[pos[0]] = item
return oldItem
def getTotalItemQuantity(self, item):
"""
Gets total quantity held of a specific item accross all stacks within Inventory
"""
quantity = 0;
for x in range(Inventory.INV_SIZE_X):
for y in range(Inventory.INV_SIZE_Y):
if self.items[x][y] is not None:
if self.items[x][y].name == item:
quantity += self.items[x][y].quantity
for x in range(Inventory.INV_SIZE_X):
if self.hotbar[x] is not None:
if self.hotbar[x].name == item:
quantity += self.hotbar[x].quantity
return quantity
def addItemToHotbar(item):
EventRegistry.registerEvent(Event(
Inventory.BASE_EVENT_TYPE + 'add',
{'item':item}
))
for x in range(INV_SIZE_X):
if hotbar[x] == None:
hotbar[x] = item
return
def update(self):
for x in range(Inventory.INV_SIZE_X):
for y in range(Inventory.INV_SIZE_Y):
if self.items[x][y] is not None and self.items[x][y].quantity <= 0:
self.items[x][y] = None
if self.items[x][y] is not None:
self.items[x][y].update()
for x in range(Inventory.INV_SIZE_X):
if self.hotbar[x] is not None and self.hotbar[x].quantity <= 0:
self.hotbar[x] = None
if self.hotbar[x] is not None:
self.hotbar[x].update()
|
CompassionCH/compassion-modules | crm_compassion/models/event_compassion.py | Python | agpl-3.0 | 22,387 | 0 | ##############################################################################
#
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import math
from datetime import datetime, timedelta
from odoo import api, models, fields, exceptions, _
from odoo.exceptions import ValidationError
class EventCompassion(models.Model):
"""A Compassion event. """
_name = "crm.event.compassion"
_description = "Compassion event"
_order = "start_date desc"
_inherit = ["mail.thread", "mail.activity.mixin"]
##########################################################################
# FIELDS #
##########################################################################
name = fields.Char(size=128, required=True, track_visibility="onchange")
full_name = fields.Char(compute="_compute_full_name")
type = fields.Selection(
[
("stand", _("Stand")),
("concert", _("Concert")),
("presentation", _("Presentation")),
("meeting", _("Meeting")),
("sport", _("Sport event")),
("tour", _("Sponsor tour")),
],
required=True,
track_visibility="onchange",
)
start_date = fields.Datetime(required=True)
year = fields.Char(compute="_compute_year", store=True)
end_date = fields.Datetime(required=True)
partner_id = fields.Many2one(
"res.partner", "Customer", track_visibility="onchange", readonly=False
)
zip_id = fields.Many2one("res.city.zip", "Address", readonly=False)
street = fields.Char(size=128)
street2 = fields.Char(size=128)
city = fields.Char(size=128)
state_id = fields.Many2one("res.country.state", "State", readonly=False)
zip = fields.Char(size=24)
country_id = fields.Many2one("res.country", "Country", readonly=False)
user_id = fields.Many2one(
"res.users", "Ambassador", track_visibility="onchange", readonly=False
)
hold_ids = fields.One2many("compassion.hold", "event_id", readonly=True)
allocate_child_ids = fields.One2many(
"compassion.child",
compute="_compute_allocate_children",
string="Allocated children",
readonly=False,
)
effective_allocated = fields.Integer(compute="_compute_allocate_children")
staff_ids = fields.Many2many(
"res.partner",
"partners_to_staff_event",
"event_id",
"partner_id",
"Staff",
track_visibility="onchange",
readonly=False,
)
user_ids = fields.Many2many(
"res.users",
compute="_compute_users",
track_visibility="onchange",
readonly=False,
)
description = fields.Text()
analytic_id = fields.Many2one(
"account.analytic.account", "Analytic Account",
copy=False, readonly=False
)
origin_id = fields.Many2one(
"recurring.contract.origin", "Origin", copy=False, readonly=False
)
contract_ids = fields.One2many(
"recurring.contract", related="origin_id.contract_ids", readonly=True
)
expense_line_ids = fields.One2many(
"account.analytic.line",
compute="_compute_expense_lines",
string="Expenses",
readonly=False,
)
invoice_line_ids = fields.One2many(
"account.invoice.line", "event_id", readonly=True
)
income_line_ids = fields.One2many(
"account.invoice.line",
compute="_compute_income_lines",
string="Income",
readonly=False,
)
total_expense = fields.Float(
compute="_compute_expense", readonly=True, store=True)
total_income = fields.Float(
compute="_compute_income", readonly=True, store=True)
balance = fields.Float(
compute="_compute_balance", readonly=True, store=True)
number_allocate_children = fields.Integer(
"Number of children to allocate",
track_visibility="onchange",
required=True,
default=0,
)
planned_sponsorships = fields.Integer(
"Expected sponsorships", track_visibility="onchange",
required=True, default=0
)
lead_id = fields.Many2one(
"crm.lead", "Opportunity", track_visibility="onchange", readonly=False
)
won_sponsorships = fields.Integer(
related="origin_id.won_sponsorships", store=True)
conversion_rate = fields.Float(
related="origin_id.conversion_rate", store=True)
calendar_event_id = fields.Many2one("calendar.event", readonly=False)
hold_start_date = fields.Date(required=True)
hold_end_date = fields.Date()
campaign_id = fields.Many2one("utm.campaign", "Campaign", readonly=False)
# Multi-company
company_id = fields.Many2one(
"res.company",
"Company",
required=True,
index=True,
default=lambda self: self.env.user.company_id.id,
readonly=False,
)
##########################################################################
# FIELDS METHODS #
##########################################################################
@api.multi
def _compute_expense_lines(self):
for event in self:
event.expense_line_ids = event.analytic_id.line_ids.filtered(
lambda l: l.amount < 0.0
)
@api.multi
def _compute_income_lines(self):
for event in self:
event.income_line_ids = event.invoice_line_ids.filtered(
lambda l: l.state == "paid"
and not l.contract_id
and l.invoice_id.type == "out_invoice"
)
@api.multi
@api.depends("analytic_id.line_ids")
def _compute_expense(self):
for event in self:
expenses = event.expense_line_ids.filtered(lambda l: l.amount < 0)
event.total_expense = abs(sum(expenses.mapped("amount") or [0]))
@api.multi
@api.depends("invoice_line_ids.state")
def _compute_income(self):
for event in self:
incomes = event.income_line_ids
event.total_income = sum(incomes.mapped("price_subtotal") or [0])
@api.multi
@api.depends("total_income", "total_expense")
def _compute_balance(self):
for event in self:
if event.total_expense and event.total_income:
| event.balance = event.total_income / float(event.total_expense) |
else:
event.balance = 0.0
@api.multi
@api.depends("start_date")
def _compute_year(self):
for event in self.filtered("start_date"):
event.year = str(event.start_date.year)
@api.multi
def _compute_full_name(self):
for event in self:
event.full_name = \
event.type.title() + " " + event.name + " " + event.year
@api.multi
@api.depends("hold_ids")
def _compute_allocate_children(self):
for event in self:
children = event.hold_ids.mapped("child_id")
event.allocate_child_ids = children
nb_child = 0
for child in children:
if child.state in ("N", "I"):
nb_child += 1
event.effective_allocated = nb_child
@api.constrains("hold_start_date", "start_date")
def _check_hold_start_date(self):
for event in self:
if event.hold_start_date > event.start_date.date():
raise ValidationError(
_("The hold start date must "
"be before the event starting date !")
)
def compute_hold_start_date(self, start=None):
delta = self.env["res.config.settings"].sudo().get_param(
"days_allocate_before_event")
return (start if start else self.start_date.date()) - \
timedelta(days=delta)
@api.multi
@api.depends("staff_ids")
def _compute_use |
bluefish/kdi | src/python/kdi/splits.py | Python | gpl-2.0 | 4,429 | 0.01445 | #!/usr/bin/env python
#
# Copyright (C) 2008 Josh Taylor (Kosmix Corporation)
# Created 2008-06-15
#
# This file is part of KDI.
#
# KDI is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the | License, or any later version.
#
# KDI is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU | General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
def iterSplitRanges(splitPoints, maxKey):
minT = 0.0
minKey = ''
for t,s in splitPoints:
yield (minT, t, minKey, s)
minT, minKey = t,s
yield (minT, 1.0, minKey, maxKey)
def iterSplits(nPartitions, splitPoints, alphabet):
if nPartitions < 2:
return
maxKey = alphabet.chr(alphabet.size()-1) * 10
splitRanges = iterSplitRanges(splitPoints, maxKey)
minT,maxT,minStr,maxStr = splitRanges.next()
from util.string_interpolate import interpolate_string
for i in xrange(1, nPartitions):
t = float(i) / nPartitions
while t > maxT:
minT,maxT,minStr,maxStr = splitRanges.next()
pt = (t - minT) / (maxT - minT)
s = interpolate_string(pt, minStr, maxStr, 8, alphabet)
yield s
def iterRowPredicates(nParts, splitPoints, alphabet):
if nParts < 2:
yield ''
else:
splits = iterSplits(nParts, splitPoints, alphabet)
hi = splits.next()
yield 'row < %r' % hi
lo = hi
for hi in splits:
yield '%r <= row < %r' % (lo, hi)
lo = hi
yield 'row >= %r' % lo
def loadSplitPointsFromFile(fn):
f = file(fn)
try:
pts = []
for line in f:
t,s = line.strip().split()
t = float(t)
if 0 <= t <= 1:
pts.append((t,s))
else:
raise ValueError('split out of range: %f %r' % (t,s))
finally:
f.close()
pts.sort()
return pts
def loadSplitPointsFromMeta(metaUri):
if not metaUri.startswith('meta+'):
raise ValueError('not a meta table: %r' % metaUri)
metaUri = metaUri[5:]
import urllib,cgi
p,q = urllib.splitquery(metaUri)
q = cgi.parse_qs(q or '')
tableName = q.get('name',[''])[0]
if not tableName:
raise ValueError('meta uri needs table name')
from util.zero import zeroEncode,zeroDecode
pred = '%r <= row < %r and column = "location"' % \
(zeroEncode(tableName, '\x01', ''),
zeroEncode(tableName, '\x02', ''))
rows = []
import pykdi
for r,c,t,v in pykdi.Table(metaUri).scan(pred):
n,x,r = zeroDecode(r)
rows.append(r)
f = 1.0 / (len(rows) + 1)
return [((i+1)*f, r) for i,r in enumerate(rows)]
def main():
import optparse
op = optparse.OptionParser()
op.add_option('-m','--meta',help='Load splits from a meta-table')
op.add_option('-f','--file',help='Load splits from a file')
op.add_option('-s','--splits',action='store_true',help='Dump split points')
op.add_option('-n','--num',type='int',default=1,help='Number of partitions to use')
op.add_option('-i','--index',type='int',help='Emit row predicate for single partition')
op.add_option('-r','--rows',action='store_true',help='Dump row predicates for all partitions')
op.add_option('-b','--binary',action='store_true',help='Generate splits for binary strings')
opt,args = op.parse_args()
from util.string_interpolate import Alphabet,chr_range
if opt.binary:
alpha = Alphabet(chr_range('\x00', '\xff'))
else:
alpha = Alphabet(chr_range(' ', '~'))
pts = []
if opt.meta:
pts = loadSplitPointsFromMeta(opt.meta)
if opt.file:
pts = loadSplitPointsFromFile(opt.file)
if opt.splits:
for t,s in pts:
print t,s
rows = list(iterRowPredicates(opt.num, pts, alpha))
if opt.rows:
for r in rows:
print r
if opt.index is not None:
print rows[opt.index]
if __name__ == '__main__':
main()
|
SWRG/semanticsdata | RDFTypeSummary.py | Python | gpl-3.0 | 1,493 | 0.013396 | # -*- coding: utf-8 -*-
"""
This class can be used to load an RDF-type summary graph.
:author: Spyridon Kazanas
:contact: s.kazanas@gmail.com
"""
import networkx as nx
import cPickle,csv
from os import p | ath
class RDFTypeSummary():
# db info
db_graph = None
db_file = None
def __init__(self,inputfile=None):
if inputfile is not None:
self.loaddb(inputfile)
def loaddb(self,inputfile):
"""
Loads db_graph, shortest path data and labeli | ngs from save directory.
Sorts labelings.
:param dirpath: The directory containing data.
"""
inputfile = path.abspath(path.expanduser(inputfile))
print "Loading RDF-type summary graph: ",inputfile
print "(please wait)"
if path.isfile(inputfile):
self.unloaddb()
self.db_graph=nx.MultiDiGraph()
with open(inputfile,"rb") as f:
csvreader=csv.reader(f,delimiter=' ')
for row in csvreader:
r=cPickle.loads(row[0])
self.db_graph.add_edge(r[0],r[1],r[2][3],{2:r[2][2],3:r[2][3]})
else:
print "File not found."
return
self.db_file = inputfile
print "Done loading."
return
def unloaddb(self):
self.db_graph = None
self.db_file = None
def dbinfo(self):
print "RDF Type Summary Graph information:"
print " File : ",self.db_file
return |
pFernbach/hpp-rbprm-corba | script/scenarios/sandbox/siggraph_asia/chair/bezier_traj.py | Python | lgpl-3.0 | 22,357 | 0.028984 | from gen_data_from_rbprm import *
from hpp.corbaserver.rbprm.tools.com_constraints import get_com_constraint
from hpp.gepetto import PathPla | yer
from hpp.corbaserver.rbprm.s | tate_alg import computeIntermediateState, isContactCreated
from numpy import matrix, asarray
from numpy.linalg import norm
from spline import bezier
def __curveToWps(curve):
return asarray(curve.waypoints().transpose()).tolist()
def __Bezier(wps, init_acc = [0.,0.,0.], end_acc = [0.,0.,0.], init_vel = [0.,0.,0.], end_vel = [0.,0.,0.]):
c = curve_constraints();
c.init_vel = matrix(init_vel);
c.end_vel = matrix(end_vel);
c.init_acc = matrix(init_acc);
c.end_acc = matrix(end_acc);
matrix_bezier = matrix(wps).transpose()
curve = bezier(matrix_bezier, c)
return __curveToWps(curve), curve
#~ return __curveToWps(bezier(matrix_bezier))
allpaths = []
def play_all_paths():
for _, pid in enumerate(allpaths):
ppl(pid)
def play_all_paths_smooth():
for i, pid in enumerate(allpaths):
if i % 2 == 1 :
ppl(pid)
def play_all_paths_qs():
for i, pid in enumerate(allpaths):
if i % 2 == 0 :
ppl(pid)
def test(s1,s2, path = False, use_rand = False, just_one_curve = False, num_optim = 0, effector = False, mu=0.5, use_Kin = True) :
q1 = s1.q()
q2 = s2.q()
stateid = s1.sId
stateid1 = s2.sId
sInt = computeIntermediateState(s1,s2)
com_1 = s1.getCenterOfMass()
com_2 = s2.getCenterOfMass()
createPtBox(viewer.client.gui, 0, com_1, 0.01, [0,1,1,1.])
createPtBox(viewer.client.gui, 0, com_2, 0.01, [0,1,1,1.])
#~ isContactCreated_= isContactCreated(s1,s2)
isContactCreated_ = True
data = gen_sequence_data_from_state_objects(s1,s2,sInt,mu = mu, isContactCreated = isContactCreated_)
c_bounds_1 = s1.getComConstraint(limbsCOMConstraints)
c_bounds_mid = sInt.getComConstraint(limbsCOMConstraints)
c_bounds_2 = s2.getComConstraint(limbsCOMConstraints)
success, c_mid_1, c_mid_2 = solve_quasi_static(data, c_bounds = [c_bounds_1, c_bounds_2, c_bounds_mid], use_rand = use_rand, mu = mu, use_Kin = use_Kin)
print "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ calling effector", effector
paths_ids = []
if path and success:
#~ fullBody.straightPath([c_mid_1[0].tolist(),c_mid_2[0].tolist()])
#~ fullBody.straightPath([c_mid_2[0].tolist(),com_2])
if just_one_curve:
bezier_0, curve = __Bezier([com_1,c_mid_1[0].tolist(),c_mid_2[0].tolist(),com_2])
createPtBox(viewer.client.gui, 0, c_mid_1[0].tolist(), 0.01, [0,1,0,1.])
createPtBox(viewer.client.gui, 0, c_mid_2[0].tolist(), 0.01, [0,1,0,1.])
#testing intermediary configurations
partions = [0.,0.3,0.8,1.]
#~ if(not isContactCreated_):
#~ partions = [0.,0.6,0.8,1.]
print 'paritions:', partions[1], " "
com_interm2 = curve(partions[2])
#~ print "com_1", com_1
#~ print "com_1", curve(partions[0])
#~ print "com_interm2", com_interm2
#~ print "com_2", com_2
#~ print "com_2", curve(partions[-1])
success_proj1 = False;
success_proj2 = False
for _ in range(7):
print "WRTFF", partions[1]
com_interm1 = curve(partions[1])
print "com_interm1", com_interm1
success_proj1 = project_com_colfree(fullBody, stateid , asarray((com_interm1).transpose()).tolist()[0])
if success_proj1:
break
else:
print "decreasing com"
partions[1] -= 0.04
for _ in range(7):
print "WRTFF", partions[-2]
com_interm2 = curve(partions[-2])
print "com_interm2", com_interm2
success_proj2 = project_com_colfree(fullBody, stateid1 , asarray((com_interm2).transpose()).tolist()[0])
if success_proj2:
break
else:
print "decreasing com"
partions[-2] += 0.039
#~ success_proj2 = project_com_colfree(fullBody, stateid1 , asarray((com_interm2).transpose()).tolist()[0])
#~ if success_proj1:
#~ q_1 = fullBody.projectToCom(stateid, asarray((com_interm1).transpose()).tolist()[0])
#~ viewer(q_1)
if not success_proj1:
print "proj 1 failed"
return False, c_mid_1, c_mid_2, paths_ids
if not success_proj2:
print "proj 2 failed"
return False, c_mid_1, c_mid_2, paths_ids
p0 = fullBody.generateCurveTrajParts(bezier_0,partions)
#~ pp.displayPath(p0+1)
#~ pp.displayPath(p0+2)
ppl.displayPath(p0)
#~ ppl.displayPath(p0+1)
#~ ppl.displayPath(p0+2)
#~ ppl.displayPath(p0+3)
if(effector):
#~ assert False, "Cant deal with effectors right now"
paths_ids = [int(el) for el in fullBody.effectorRRT(stateid,p0+1,p0+2,p0+3,num_optim)]
else:
paths_ids = [int(el) for el in fullBody.comRRTFromPosBetweenState(stateid,stateid1,p0+1,p0+2,p0+3,num_optim)]
else:
success_proj1 = project_com_colfree(fullBody, stateid , c_mid_1[0].tolist())
success_proj2 = project_com_colfree(fullBody, stateid1 , c_mid_2[0].tolist())
if not success_proj1:
print "proj 1 failed"
return False, c_mid_1, c_mid_2, paths_ids
if not success_proj2:
print "proj 2 failed"
return False, c_mid_1, c_mid_2, paths_ids
bezier_0, curve = __Bezier([com_1,c_mid_1[0].tolist()] , end_acc = c_mid_1[1].tolist() , end_vel = [0.,0.,0.])
bezier_1, curve = __Bezier([c_mid_1[0].tolist(),c_mid_2[0].tolist()], end_acc = c_mid_2[1].tolist(), init_acc = c_mid_1[1].tolist(), init_vel = [0.,0.,0.], end_vel = [0.,0.,0.])
bezier_2, curve = __Bezier([c_mid_2[0].tolist(),com_2] , init_acc = c_mid_2[1].tolist(), init_vel = [0.,0.,0.])
p0 = fullBody.generateCurveTraj(bezier_0)
fullBody.generateCurveTraj(bezier_1)
fullBody.generateCurveTraj(bezier_2)
ppl.displayPath(p0)
#~ ppl.displayPath(p0+1)
#~ ppl.displayPath(p0+2)
paths_ids = [int(el) for el in fullBody.comRRTFromPosBetweenState(stateid,stateid1, p0,p0+1,p0+2,num_optim)]
#~ paths_ids = []
global allpaths
allpaths += paths_ids[:-1]
#~ allpaths += [paths_ids[-1]]
#~ pp(paths_ids[-1])
#~ return success, paths_ids, c_mid_1, c_mid_2
return success, c_mid_1, c_mid_2, paths_ids
#~ data = gen_sequence_data_from_state(fullBody,3,configs)
#~ pp(29),pp(9),pp(17)
from hpp.corbaserver.rbprm.tools.path_to_trajectory import *
def createPtBox(gui, winId, config, res = 0.01, color = [1,1,1,0.3]):
print "plottiun ", config
#~ resolution = res
#~ global scene
#~ global b_id
#~ boxname = scene+"/"+str(b_id)
#~ b_id += 1
#~ gui.addBox(boxname,resolution,resolution,resolution, color)
#~ gui.applyConfiguration(boxname,[config[0],config[1],config[2],1,0,0,0])
#~ gui.addSceneToWindow(scene,winId)
#~ gui.refresh()
def test_ineq(stateid, constraints, n_samples = 10, color=[1,1,1,1.]):
Kin = get_com_constraint(fullBody, stateid, fullBody.getConfigAtState(stateid), constraints, interm = False)
#~ print "kin ", Kin
#create box around current com
fullBody.setCurrentConfig(fullBody.getConfigAtState(stateid))
com = fullBody.getCenterOfMass()
bounds_c = flatten([[com[i]-1., com[i]+1.] for i in range(3)]) # arbitrary
for i in range(n_samples):
c = array([uniform(bounds_c[2*i], bounds_c[2*i+1]) for i in range(3)])
print "c: ", c
if(Kin[0].dot(c)<=Kin[1]).all():
print "boundaries satisfied"
createPtBox(viewer.client.gui, 0, c, 0.01, color)
#~ test_ineq(0,{ rLegId : {'file' |
ioos/catalog-harvesting | catalog_harvesting/__init__.py | Python | mit | 762 | 0 | #!/usr/bin/env python
'''
catalog_harvesting/__init__.py
'''
import logging
import os
__version__ = '1.2.0'
| LOGGER = None
def get_logger():
'''
Returns an initialized logger
'''
global LOGGER
if LOGGER is None:
LOGGER = logging.getLogger(__name__)
return LOGGER
def get_r | edis_connection():
redis_url = os.environ.get('REDIS_URL', 'redis://localhost:6379/0')
protocol, address = redis_url.split('://')
if protocol != 'redis':
raise ValueError('REDIS_URL must be protocol redis')
connection_str, path = address.split('/')
if ':' in connection_str:
host, port = connection_str.split(':')
else:
port = 6379
host = connection_str
db = path
return host, port, db
|
duplocloud/duploiotagent | samples/basicShadow/basicShadowUpdater.py | Python | apache-2.0 | 5,041 | 0.004166 | '''
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache Licens | e, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the L | icense for the specific language governing
* permissions and limitations under the License.
*/
'''
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
import logging
import time
import json
import argparse
# Shadow JSON schema:
#
# Name: Bot
# {
# "state": {
# "desired":{
# "property":<INT VALUE>
# }
# }
# }
# Custom Shadow callback
def customShadowCallback_Update(payload, responseStatus, token):
# payload is a JSON string ready to be parsed using json.loads(...)
# in both Py2.x and Py3.x
if responseStatus == "timeout":
print("Update request " + token + " time out!")
if responseStatus == "accepted":
payloadDict = json.loads(payload)
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Update request with token: " + token + " accepted!")
print("property: " + str(payloadDict["state"]["desired"]["property"]))
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Update request " + token + " rejected!")
def customShadowCallback_Delete(payload, responseStatus, token):
if responseStatus == "timeout":
print("Delete request " + token + " time out!")
if responseStatus == "accepted":
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Delete request with token: " + token + " accepted!")
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Delete request " + token + " rejected!")
# Read in command-line parameters
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host", help="Your AWS IoT custom endpoint")
parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="Root CA file path")
parser.add_argument("-c", "--cert", action="store", dest="certificatePath", help="Certificate file path")
parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="Private key file path")
parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False,
help="Use MQTT over WebSocket")
parser.add_argument("-n", "--thingName", action="store", dest="thingName", default="Bot", help="Targeted thing name")
parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicShadowUpdater", help="Targeted client id")
args = parser.parse_args()
host = args.host
rootCAPath = args.rootCAPath
certificatePath = args.certificatePath
privateKeyPath = args.privateKeyPath
useWebsocket = args.useWebsocket
thingName = args.thingName
clientId = args.clientId
if args.useWebsocket and args.certificatePath and args.privateKeyPath:
parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.")
exit(2)
if not args.useWebsocket and (not args.certificatePath or not args.privateKeyPath):
parser.error("Missing credentials for authentication.")
exit(2)
# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Init AWSIoTMQTTShadowClient
myAWSIoTMQTTShadowClient = None
if useWebsocket:
myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId, useWebsocket=True)
myAWSIoTMQTTShadowClient.configureEndpoint(host, 443)
myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath)
else:
myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId)
myAWSIoTMQTTShadowClient.configureEndpoint(host, 8883)
myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath)
# AWSIoTMQTTShadowClient configuration
myAWSIoTMQTTShadowClient.configureAutoReconnectBackoffTime(1, 32, 20)
myAWSIoTMQTTShadowClient.configureConnectDisconnectTimeout(10) # 10 sec
myAWSIoTMQTTShadowClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect to AWS IoT
myAWSIoTMQTTShadowClient.connect()
# Create a deviceShadow with persistent subscription
deviceShadowHandler = myAWSIoTMQTTShadowClient.createShadowHandlerWithName(thingName, True)
# Delete shadow JSON doc
deviceShadowHandler.shadowDelete(customShadowCallback_Delete, 5)
# Update shadow in a loop
loopCount = 0
while True:
JSONPayload = '{"state":{"desired":{"property":' + str(loopCount) + '}}}'
deviceShadowHandler.shadowUpdate(JSONPayload, customShadowCallback_Update, 5)
loopCount += 1
time.sleep(1)
|
lightbase/LBApp | lbapp/config/routing.py | Python | gpl-2.0 | 743 | 0.001346 |
def make_routes(config):
from lbapp.config.routes.base import make_base_routes
from lbapp.config.routes.user import make_user_routes
# ** STATIC **
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_static_view('templates', 'templates', cache_max_age=3600)
# ** HOME **
config.add_route('home', '/')
config.add_route('master', 'master')
| # ** BASE **
make_base_routes(config)
# ** USER **
make_user_routes(config)
| config.add_route('delete_tmp_storage', 'base/{id}/tmp-storage/{storage}')
config.add_route('tmp_storage', 'base/{id}/tmp-storage')
# ** ERROR **
config.add_route('error-404', 'error-404')
config.add_route('error-500', 'error-500')
|
593141477/pyA13_swiftboard | source/A13_SPI.py | Python | mit | 6,079 | 0.01168 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A13_SPI.py
#
# Copyright 2013 Stefan Mavrodiev <support@olimex.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import A13_GPIO as GPIO
import sys
import time
MODE = 0
MOSI = GPIO.MOSI
MISO = GPIO.MISO
SCK = GPIO.SCK
CS = GPIO.CS
OUT = GPIO.OUT
IN = GPIO.INP
HIGH = GPIO.HIGH
LOW = GPIO.LOW
class ModeError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return 'Invalid mode selected: ' + repr(self.value)
def init(mode):
print ("Selected mode: " + str(mode))
if mode < 0 or mode > 3:
raise ModeError(mode);
global MODE
MODE = mode
GPIO.init()
GPIO.setcfg(MOSI, OUT)
GPIO.setcfg(MISO, IN)
GPIO.setcfg(SCK, OUT)
GPIO.setcfg(CS, OUT)
if mode == 0 or mode == 1:
GPIO.output(CS, HIGH)
GPIO.output(SCK, LOW)
GPIO.output(MOSI, LOW)
else:
GPIO.output(CS, HIGH)
GPIO.output(SCK, HIGH)
GPIO.output(MOSI, HIGH)
return
def read(address, n):
pol = (MODE >> 1) & 1
pha = MODE & 1
def SendByte(byte):
for i in range(8):
if pha == 0:
if byte & 1:
if pol == 0:
GPIO.output(MOSI, HIGH)
else:
GPIO.output(MOSI, LOW)
else:
if pol == 0:
GPIO.output(MOSI, LOW)
else:
GPIO.output(MOSI, HIGH)
time.sleep(0.000001)
if pol == 0:
GPIO.output(SCK, HIGH)
else:
GPIO.output(SCK, LOW)
if pha == 1:
if byte & 1:
if pol == 0:
GPIO.output(MOSI, HIGH)
else:
GPIO.output(MOSI, LOW)
else:
if pol == 0:
GPIO.output(MOSI, LOW)
else:
GPIO.output(MOSI, HIGH)
time.sleep(0.000001)
if pol == 0:
| GPIO.output(SCK, LOW)
else:
GPIO.output(SCK, HIGH)
byte >>= 1
def ReadByte():
byte = 0
for i in range(8):
time.sleep(0.000001)
| if pol == 0:
GPIO.output(SCK, HIGH)
else:
GPIO.output(SCK, LOW)
if pha == 0:
if GPIO.input(MISO) == 1:
if pol == 0:
byte |= 1
else:
byte |= 0
else:
if pol == 0:
byte |= 0
else:
byte |= 1
if i != 7:
byte <<= 1
time.sleep(0.000001)
GPIO.output(SCK, LOW)
if pha == 1:
if GPIO.input(MISO) == 1:
if pol == 0:
byte |= 1
else:
byte |= 0
else:
if pol == 0:
byte |= 0
else:
byte |= 1
if i != 7:
byte <<= 1
return byte;
GPIO.output(CS, LOW)
time.sleep(0.000001)
SendByte(address)
args = []
for i in range(n):
args.append(ReadByte())
time.sleep(0.000001)
GPIO.output(CS, HIGH)
return args
def write(*args):
pol = (MODE >> 1) & 1
pha = MODE & 1
def SendByte(byte):
for i in range(8):
if pha == 0:
if byte & 0x80:
if pol == 0:
GPIO.output(MOSI, HIGH)
else:
GPIO.output(MOSI, LOW)
else:
if pol == 0:
GPIO.output(MOSI, LOW)
else:
GPIO.output(MOSI, HIGH)
time.sleep(0.000001)
if pol == 0:
GPIO.output(SCK, HIGH)
else:
GPIO.output(SCK, LOW)
if pha == 1:
if byte & 0x80:
if pol == 0:
GPIO.output(MOSI, HIGH)
else:
GPIO.output(MOSI, LOW)
else:
if pol == 0:
GPIO.output(MOSI, LOW)
else:
GPIO.output(MOSI, HIGH)
time.sleep(0.000001)
if pol == 0:
GPIO.output(SCK, LOW)
else:
GPIO.output(SCK, HIGH)
byte <<= 1
GPIO.output(CS, LOW)
time.sleep(0.000001)
for i in range(len(args)):
SendByte(args[i])
time.sleep(0.000001)
GPIO.output(CS, HIGH)
return
|
JoelBender/bacpypes | py27/bacpypes/service/object.py | Python | mit | 15,660 | 0.007088 | #!/usr/bin/env python
from ..debugging import bacpypes_debugging, ModuleLogger
from ..capability import Capability
from ..basetypes import ErrorType, PropertyIdentifier
from ..primitivedata import Atomic, Null, Unsigned
from ..constructeddata import Any, Array, ArrayOf, List
from ..apdu import \
SimpleAckPDU, ReadPropertyACK, ReadPropertyMultipleACK, \
ReadAccessResult, ReadAccessResultElement, ReadAccessResultElementChoice
from ..errors import ExecutionError
from ..object import PropertyError
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# handy reference
ArrayOfPropertyIdentifier = ArrayOf(PropertyIdentifier)
#
# ReadProperty and WriteProperty Services
#
@bacpypes_debugging
class ReadWritePropertyServices(Capability):
def __init__(self):
if _debug: ReadWritePropertyServices._debug("__init__")
Capability.__init__(self)
def do_ReadPropertyRequest(self, apdu):
"""Return the value of some property of one of our objects."""
if _debug: ReadWritePropertyServices._debug("do_ReadPropertyRequest %r", apdu)
# extract the object identifier
objId = apdu.objectIdentifier
# check for wildcard
if (objId == ('device', 4194303)) and self.localDevice is not None:
if _debug: ReadWrit | ePropertyServices._debug(" - wildcard device identifier")
objId = self.localDevice.objectIdentifier
# get the object
obj = self.get_object_id(objId)
if _debug: ReadWritePropertyServices._debug(" - object: %r", obj)
if | not obj:
raise ExecutionError(errorClass='object', errorCode='unknownObject')
try:
# get the datatype
datatype = obj.get_datatype(apdu.propertyIdentifier)
if _debug: ReadWritePropertyServices._debug(" - datatype: %r", datatype)
# get the value
value = obj.ReadProperty(apdu.propertyIdentifier, apdu.propertyArrayIndex)
if _debug: ReadWritePropertyServices._debug(" - value: %r", value)
if value is None:
raise PropertyError(apdu.propertyIdentifier)
# change atomic values into something encodeable
if issubclass(datatype, Atomic) or (issubclass(datatype, (Array, List)) and isinstance(value, list)):
value = datatype(value)
elif issubclass(datatype, Array) and (apdu.propertyArrayIndex is not None):
if apdu.propertyArrayIndex == 0:
value = Unsigned(value)
elif issubclass(datatype.subtype, Atomic):
value = datatype.subtype(value)
elif not isinstance(value, datatype.subtype):
raise TypeError("invalid result datatype, expecting {0} and got {1}" \
.format(datatype.subtype.__name__, type(value).__name__))
elif issubclass(datatype, List):
value = datatype(value)
elif not isinstance(value, datatype):
raise TypeError("invalid result datatype, expecting {0} and got {1}" \
.format(datatype.__name__, type(value).__name__))
if _debug: ReadWritePropertyServices._debug(" - encodeable value: %r", value)
# this is a ReadProperty ack
resp = ReadPropertyACK(context=apdu)
resp.objectIdentifier = objId
resp.propertyIdentifier = apdu.propertyIdentifier
resp.propertyArrayIndex = apdu.propertyArrayIndex
# save the result in the property value
resp.propertyValue = Any()
resp.propertyValue.cast_in(value)
if _debug: ReadWritePropertyServices._debug(" - resp: %r", resp)
except PropertyError:
raise ExecutionError(errorClass='property', errorCode='unknownProperty')
# return the result
self.response(resp)
def do_WritePropertyRequest(self, apdu):
"""Change the value of some property of one of our objects."""
if _debug: ReadWritePropertyServices._debug("do_WritePropertyRequest %r", apdu)
# get the object
obj = self.get_object_id(apdu.objectIdentifier)
if _debug: ReadWritePropertyServices._debug(" - object: %r", obj)
if not obj:
raise ExecutionError(errorClass='object', errorCode='unknownObject')
try:
# check if the property exists
if obj.ReadProperty(apdu.propertyIdentifier, apdu.propertyArrayIndex) is None:
raise PropertyError(apdu.propertyIdentifier)
# get the datatype, special case for null
if apdu.propertyValue.is_application_class_null():
datatype = Null
else:
datatype = obj.get_datatype(apdu.propertyIdentifier)
if _debug: ReadWritePropertyServices._debug(" - datatype: %r", datatype)
# special case for array parts, others are managed by cast_out
if issubclass(datatype, Array) and (apdu.propertyArrayIndex is not None):
if apdu.propertyArrayIndex == 0:
value = apdu.propertyValue.cast_out(Unsigned)
else:
value = apdu.propertyValue.cast_out(datatype.subtype)
else:
value = apdu.propertyValue.cast_out(datatype)
if _debug: ReadWritePropertyServices._debug(" - value: %r", value)
# change the value
value = obj.WriteProperty(apdu.propertyIdentifier, value, apdu.propertyArrayIndex, apdu.priority)
# success
resp = SimpleAckPDU(context=apdu)
if _debug: ReadWritePropertyServices._debug(" - resp: %r", resp)
except PropertyError:
raise ExecutionError(errorClass='property', errorCode='unknownProperty')
# return the result
self.response(resp)
#
# read_property_to_any
#
@bacpypes_debugging
def read_property_to_any(obj, propertyIdentifier, propertyArrayIndex=None):
"""Read the specified property of the object, with the optional array index,
and cast the result into an Any object."""
if _debug: read_property_to_any._debug("read_property_to_any %s %r %r", obj, propertyIdentifier, propertyArrayIndex)
# get the datatype
datatype = obj.get_datatype(propertyIdentifier)
if _debug: read_property_to_any._debug(" - datatype: %r", datatype)
if datatype is None:
raise ExecutionError(errorClass='property', errorCode='datatypeNotSupported')
# get the value
value = obj.ReadProperty(propertyIdentifier, propertyArrayIndex)
if _debug: read_property_to_any._debug(" - value: %r", value)
if value is None:
raise ExecutionError(errorClass='property', errorCode='unknownProperty')
# change atomic values into something encodeable
if issubclass(datatype, Atomic) or (issubclass(datatype, (Array, List)) and isinstance(value, list)):
value = datatype(value)
elif issubclass(datatype, Array) and (propertyArrayIndex is not None):
if propertyArrayIndex == 0:
value = Unsigned(value)
elif issubclass(datatype.subtype, Atomic):
value = datatype.subtype(value)
elif not isinstance(value, datatype.subtype):
raise TypeError("invalid result datatype, expecting %s and got %s" \
% (datatype.subtype.__name__, type(value).__name__))
elif not isinstance(value, datatype):
raise TypeError("invalid result datatype, expecting %s and got %s" \
% (datatype.__name__, type(value).__name__))
if _debug: read_property_to_any._debug(" - encodeable value: %r", value)
# encode the value
result = Any()
result.cast_in(value)
if _debug: read_property_to_any._debug(" - result: %r", result)
# return the object
return result
#
# read_property_to_result_element
#
@bacpypes_debugging
def read_property_to_result_element(obj, propertyIdentifier, propertyArrayIndex=None):
"""Read the specified property of the object, with the optional array index,
and cast the result into an Any object."""
|
pshchelo/heat | heat/engine/resources/openstack/ceilometer/alarm.py | Python | apache-2.0 | 14,721 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
from heat.engine import watchrule
COMMON_PROPERTIES = (
ALARM_ACTIONS, OK_ACTIONS, REPEAT_ACTIONS, INSUFFICIENT_DATA_ACTIONS,
DESCRIPTION, ENABLED,
) = (
'alarm_actions', 'ok_actions', 'repeat_actions',
'insufficient_data_actions', 'description', 'enabled',
)
common_properties_schema = {
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for the alarm.'),
update_allowed=True
),
ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('True if alarm evaluation/actioning is enabled.'),
default='true',
update_allowed=True
),
ALARM_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('A list of URLs (webhooks) to invoke when state transitions to '
'alarm.'),
update_allowed=True
),
OK_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('A list of URLs (webhooks) to invoke when state transitions to '
'ok.'),
update_allowed=True
),
INSUFFICIENT_DATA_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('A list of URLs (webhooks) to invoke when state transitions to '
'insufficient-data.'),
update_allowed=True
),
REPEAT_ACTIONS: properties.Schema(
properties.Schema.BOOLEAN,
_("False to trigger actions when the threshold is reached AND "
"the alarm's state has changed. By default, actions are called "
"each time the threshold is reached."),
default='true',
update_allowed=True
)
}
NOVA_METERS = ['instance', 'memory', 'memory.usage',
'cpu', 'cpu_util', 'vcpus',
'disk.read.requests', 'disk.read.requests.rate',
'disk.write.requests', 'disk.write.requests.rate',
'disk.read.bytes', 'disk.read.bytes.rate',
'disk.write.bytes', 'disk.write.bytes.rate',
'disk.device.read.requests', 'disk.device.read.requests.rate',
'disk.device.write.requests', 'disk.device.write.requests.rate',
'disk.device.read.bytes', 'disk.device.read.bytes.rate',
'disk.device.write.bytes', 'disk.device.write.bytes.rate',
'disk.root.size', 'disk.ephemeral.size',
'network.incoming.bytes', 'network.incoming.bytes.rate',
'network.outgoing.bytes', 'network.outgoing.bytes.rate',
'network.incoming.packets', 'network.incoming.packets.rate',
'network.outgoing.packets', 'network.outgoing.packets.rate']
def actions_to_urls(sta | ck, properties):
kwargs = {}
for k, v in iter(properties.items()):
if k in [ALARM_ACTIONS, OK_ACTIONS,
INSUFFICIENT_DATA_ACTIONS] and v is not None:
kwargs[k] = []
for act in v:
# if the action is a resource name
| # we ask the destination resource for an alarm url.
# the template writer should really do this in the
# template if possible with:
# {Fn::GetAtt: ['MyAction', 'AlarmUrl']}
if act in stack:
url = stack[act].FnGetAtt('AlarmUrl')
kwargs[k].append(url)
else:
if act:
kwargs[k].append(act)
else:
kwargs[k] = v
return kwargs
class CeilometerAlarm(resource.Resource):
PROPERTIES = (
COMPARISON_OPERATOR, EVALUATION_PERIODS, METER_NAME, PERIOD,
STATISTIC, THRESHOLD, MATCHING_METADATA, QUERY,
) = (
'comparison_operator', 'evaluation_periods', 'meter_name', 'period',
'statistic', 'threshold', 'matching_metadata', 'query',
)
QUERY_FACTOR_FIELDS = (
QF_FIELD, QF_OP, QF_VALUE,
) = (
'field', 'op', 'value',
)
QF_OP_VALS = constraints.AllowedValues(['le', 'ge', 'eq',
'lt', 'gt', 'ne'])
properties_schema = {
COMPARISON_OPERATOR: properties.Schema(
properties.Schema.STRING,
_('Operator used to compare specified statistic with threshold.'),
constraints=[
constraints.AllowedValues(['ge', 'gt', 'eq', 'ne', 'lt',
'le']),
],
update_allowed=True
),
EVALUATION_PERIODS: properties.Schema(
properties.Schema.INTEGER,
_('Number of periods to evaluate over.'),
update_allowed=True
),
METER_NAME: properties.Schema(
properties.Schema.STRING,
_('Meter name watched by the alarm.'),
required=True
),
PERIOD: properties.Schema(
properties.Schema.INTEGER,
_('Period (seconds) to evaluate over.'),
update_allowed=True
),
STATISTIC: properties.Schema(
properties.Schema.STRING,
_('Meter statistic to evaluate.'),
constraints=[
constraints.AllowedValues(['count', 'avg', 'sum', 'min',
'max']),
],
update_allowed=True
),
THRESHOLD: properties.Schema(
properties.Schema.NUMBER,
_('Threshold to evaluate against.'),
required=True,
update_allowed=True
),
MATCHING_METADATA: properties.Schema(
properties.Schema.MAP,
_('Meter should match this resource metadata (key=value) '
'additionally to the meter_name.'),
default={},
update_allowed=True
),
QUERY: properties.Schema(
properties.Schema.LIST,
_('A list of query factors, each comparing '
'a Sample attribute with a value. '
'Implicitly combined with matching_metadata, if any.'),
update_allowed=True,
support_status=support.SupportStatus(version='2015.1'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
QF_FIELD: properties.Schema(
properties.Schema.STRING,
_('Name of attribute to compare. '
'Names of the form metadata.user_metadata.X '
'or metadata.metering.X are equivalent to what '
'you can address through matching_metadata; '
'the former for Nova meters, '
'the latter for all others. '
'To see the attributes of your Samples, '
'use `ceilometer --debug sample-list`.')
),
QF_OP: properties.Schema(
properties.Schema.STRING,
_('Comparison operator'),
constraints=[QF_OP_VALS]
),
QF_VALUE: properties.Schema(
properties.Schema.STRING,
_('String value with which to compare')
)
}
)
)
}
properties_schema.update(common_properties_schema)
default_client_name = 'ceilometer'
def cfn_to_cei |
ONEcampaign/humanitarian-data-service | resources/data/raw/example/transform_scripts/parse_acled_all_africa.py | Python | mit | 1,542 | 0.003243 | import re
import pandas as pd
ACLED_FILE = 'ACLED-All-Africa-File_20170101-to-20170429.csv'
def clean_and_save():
encoding_key = 'iso-8859-1'
df = pd.read_csv(ACLED_FILE, encoding=encoding_key)
print df.head()
print df.columns
print df.describe()
cleaned_file = 'cleaned_{}'.format(ACLED_FILE)
df.to_csv(cleaned_file, encoding='utf-8', index=False)
return df, cleaned_file
# From a clean file, attempt to derive the country name
def derive_cols(cleaned_file):
df = pd.read_csv(cleaned_file, encoding='utf-8')
print df.head()
# string patterns to search for
_digits = re.compile('\d')
_parens = re.compile(r'^(.*?)(?: \((.*)\))?$')
def extract_country(actor):
country = None
results = re.findall(_parens, actor)
if results:
| descript, country = results[0]
if bool(_digits.search(country)):
# here it's probably a year, not a country
# try to get last word of first string as proxy for region
country = descript.split()[-1]
return country.strip()
df['extracted_country_or_region'] = df['ACTOR1'].apply(extract_country)
print df.head()
derived_file = 'derived_{}'. | format(ACLED_FILE)
df.to_csv(derived_file, encoding='utf-8', index=False)
return df, derived_file
def run():
print 'Transforming ACLED data...'
cleaned_df, cleaned_file = clean_and_save()
derived_df, derived_file = derive_cols(cleaned_file)
print 'Done!'
run()
|
dana-i2cat/felix | ofam/src/src/foam/sfa/methods/Start.py | Python | apache-2.0 | 402 | 0.014925 | from foa | m.sfa.util.xrn import urn_to_hrn
from foam.sfa.trust.credential import Credential
from foam.sfa.trust.auth import Auth
class Start:
def __init__(self, xrn, creds, **kwargs):
hrn, type = urn_to_hrn(xrn)
valid_creds = Auth().checkCredentials(creds, 'startslice', hrn)
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
return | |
sairon/motoscrape | motoscrape/settings.py | Python | unlicense | 3,018 | 0.009609 | # -*- coding: utf-8 -*-
# Scrapy settings for motoscrape project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'motoscrape'
SPIDER_MODULES = ['motoscrape.spiders']
NEWSPIDER_MODULE = 'motoscrape.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'motoscrape (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
#COOKIES_ENABLED=False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = | {
# 'motoscr | ape.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'motoscrape.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'motoscrape.pipelines.NewAdsPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
|
jkeen/tracking_number_data | utils/gen_s10_countries.py | Python | mit | 4,361 | 0.000917 | #!/usr/bin/env python
"""Update the file s10_country_code.json with
the latest list of member countries
requirements:
requests
beautifulsoup
grequests
"""
import requests
import bs4
from os.path import join
import json
import grequests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
BASE_URL = "http://www.upu.int/"
URL = "http://www.upu.int/en/the-upu/member-countries.html"
OUT_FILE = "s10_countries.json"
def main():
out_dct = {
"_comment_s10_countries": (
"The country code is part of the S10 standard for international"
" mail. The official reference for this is here:"
" http://www.upu.int/uploads/tx_sbdownloader/"
"S10TechnicalStandard.pdf"),
"_comment_s10_countries": "Auto-generated file. Do not modify."
}
reqs = grequests.imap(
fetch_country_data(), size=15, exception_handler=exception_handler)
# (x for n, x in enumerate(fetch_country_data())
# if n < 200 and n > 190),
# size=3, exception_handler=exception_handler)
out_dct['s10_countries'] = sorted(
[gen_country_json_dct(req) for req in reqs], key=lambda x: x['country'])
with open(OUT_FILE, 'w') as fout:
json.dump(out_dct, fout, indent=2, ensure_ascii=False)
def exception_handler(request, exception):
print("WARNING: Likely missing countries from this list. Exception: %s"
% repr(exception))
def fetch_country_data():
r = requests.get(URL)
soup = bs4.BeautifulSoup(r.content, 'html.parser')
for tag in soup.find_all("strong", attrs={"itemprop": "member"}):
href = tag.nextSibling()[0]
url2 = href.attrs['href']
yield grequests.get(
join(BASE_URL, url2.lstrip('/')),
session=retry(3)
)
def gen_country_json_dct(request, *args, **kwargs):
soup = bs4.BeautifulSoup(request.content, 'html.parser')
attr_map = {
"name": "country",
"iso2": "country_code",
"operator": [], # special handling for this one
"postcodes": "co | urier_url"
}
countrydct = {}
for attr in attr_map:
if attr == 'operator':
value, child = _get_value(attr, soup)
if value is None: # try to get ministry
value, child = _get_value("ministry", soup)
| countrydct['courier'] = child.next_element.text \
if value is not None else None
countrydct['courier_url2'] = value
else:
value, child = _get_value(attr, soup)
countrydct[attr_map[attr]] = value
if countrydct['courier_url'] is None:
countrydct['courier_url'] = countrydct['courier_url2']
countrydct.pop('courier_url2')
countrydct['upu_reference_url'] = request.url
if countrydct['country_code']:
countrydct['regex'] = (
"(?<ApplicationIdentifier>[A-Z]{2})"
"(?<SerialNumber>[0-9]{8})"
"(?<CheckDigit>[0-9])"
"(?<CountryCode>%s{2})") % countrydct['country_code']
else:
countrydct['regex'] = None
globals().update(locals())
return countrydct
def _get_value(attr, soup):
_child = soup.find("div", class_=attr)
if _child is None:
return None, None
child = _child.findChild(class_="field")
if child.next_element.name == "a":
# special handling for links
value = child.next_element.attrs['href']
elif child.next_element.name == "span" and \
child.next_element.attrs['class'] == ["noLink"]:
# special handling for cases where we expect a link but UPU
# doesn't have one for us
value = None
else:
value = child.text.encode().decode('utf8')
if value == 'None':
value = None
return value, child
def retry(n):
s = requests.Session()
retries = Retry(
total=n, backoff_factor=0.2, status_forcelist=[500, 502, 503, 504],
raise_on_redirect=True, raise_on_status=True)
s.mount('http://', HTTPAdapter(max_retries=retries))
s.mount('https://', HTTPAdapter(max_retries=retries))
return s
if __name__ == '__main__':
main()
# for testing:
# r = requests.get(
# "http://www.upu.int/en/the-upu/member-countries/americas/nicaragua.html")
# gen_country_json_dct(r)
|
mhbu50/erpnext | erpnext/templates/pages/material_request_info.py | Python | gpl-3.0 | 1,743 | 0.021801 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe import _
from frappe.utils import flt
def get_context(context):
context.no_cache = 1
context.show_sidebar = True
context.doc = frappe.get_doc(frappe.form_dict.doctype, frappe.form_dict.name)
if hasattr(context.doc, "set_indicator"):
context.doc.set_indicator()
context.parents = frappe.form_dict.parents
context.title = frappe.form_dict.name
if not frappe.has_website_permission(context.doc):
frappe.throw(_("Not Permitted"), frappe.PermissionError)
default_print_format = frappe.db.get_value('Property Setter', dict(property='default_print_format', doc_type=frappe.form_dict.doctype), "value")
if default_print_format:
context.print_format = default_print_format
else:
context.print_format = | "Standard"
context.doc.items = get_more_items_info(context.doc.items, context.doc.name)
def get_more_items_info(items, material_request):
for item in items:
item.customer_provided = frappe.get_value('I | tem', item.item_code, 'is_customer_provided_item')
item.work_orders = frappe.db.sql("""
select
wo.name, wo.status, wo_item.consumed_qty
from
`tabWork Order Item` wo_item, `tabWork Order` wo
where
wo_item.item_code=%s
and wo_item.consumed_qty=0
and wo_item.parent=wo.name
and wo.status not in ('Completed', 'Cancelled', 'Stopped')
order by
wo.name asc""", item.item_code, as_dict=1)
item.delivered_qty = flt(frappe.db.sql("""select sum(transfer_qty)
from `tabStock Entry Detail` where material_request = %s
and item_code = %s and docstatus = 1""",
(material_request, item.item_code))[0][0])
return items
|
bartosz-kozak/Sample-script | python/seq_len.py | Python | mit | 544 | 0.014733 | #!usr/bin/python2.7
# coding: | utf-8
# date: 16-wrzesień-2016
# autor: B.Kozak
# Simple script giving length of sequences from fasta file
import Bio
from Bio import SeqIO
import sys
import os.path
filename = sys.argv[-1]
outname = filename.split('.')
outname1 = '.'.join([outname[0], 'txt'])
FastaFile = open(filename, 'rU')
f = open(outname1, 'w')
for rec in SeqIO.parse(FastaFile, 'fasta'):
name = rec.id
seq = rec.seq
seqLen = len(rec)
print name, seqLen
f.write("%s\t" % name)
f.write("%s\n" % seqLen)
f.close()
print 'Done | '
|
Fillll/reddit2telegram | reddit2telegram/channels/~inactive/r_technology/app.py | Python | mit | 143 | 0.006993 | #encoding | :utf-8
subreddit = 'technology'
t_channel = '@r_technology'
def send_post(submission, r2t):
| return r2t.send_simple(submission)
|
dorey/pyxform | pyxform/tests_v1/test_translations.py | Python | bsd-2-clause | 1,244 | 0.000804 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from pyxform_test_case import PyxformTestCase
class DoubleColonTranslations(PyxformTestCase):
def test_langs(self):
self.assertPyxformXform(
name='translations',
id_string='transl',
md="""
| survey | | | | |
| | type | name | label::english | label::french |
| | note | n1 | hello | bonjour |
""",
errored=False,
itext__contains=[
'<translation lang="french">',
'<text id="/translations/n1:label">',
'<value>bonjour</value>',
'</text>',
'</translation>',
'<translation lang="english">',
'<text id="/translations/n1:label">',
'<value>hello</value>',
'</text>',
'</translation>',
],
xml__contains=[
| """<label ref="jr:itext('/translations/n1:label')"/>""",
| ],
model__contains=[
'<bind nodeset="/translations/n1" readonly="true()" type="string"/>',
],
)
|
oppia/oppia-ml | core/classifiers/classifier_utils_test.py | Python | apache-2.0 | 9,247 | 0.001081 | # coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utility functions defined in classifier_utils."""
import json
import os
import re
from core.classifiers import algorithm_registry
from core.classifiers import classifier_utils
from core.tests import test_utils
import vmconf
import numpy as np
from sklearn import svm
class ClassifierUtilsTest(test_utils.GenericTestBase):
"""Tests for utility functions."""
def setUp(self):
super(ClassifierUtilsTest, self).setUp()
# Example training dataset.
self.data = np.array([[1, 0], [0, 1], [1, 1], [0, 0]])
self.labels = np.array([1, 1, 0, 0])
def test_that_svc_parameters_are_extracted_correctly(self):
"""Test that SVC classifier's parameters are extracted correctly."""
clf = svm.SVC()
# Train the model.
clf.fit(self.data, self.labels)
data = classifier_utils.extract_svm_parameters(clf)
expected_keys = [u'n_support', u'support_vectors', u'dual_coef',
| u'intercept', u'classes', u'kernel_params', u'probA',
u'probB']
self.assertListEqual(sorted(expected_keys), sorted(data.keys()))
# Make s | ure that all of the values are of serializable type.
self.assertEqual(type(data[u'n_support']), list)
self.assertEqual(type(data[u'support_vectors']), list)
self.assertEqual(type(data[u'dual_coef']), list)
self.assertEqual(type(data[u'intercept']), list)
self.assertEqual(type(data[u'classes']), list)
self.assertEqual(type(data[u'probA']), list)
self.assertEqual(type(data[u'probB']), list)
self.assertEqual(type(data[u'kernel_params']), dict)
self.assertEqual(type(data[u'kernel_params'][u'kernel']), unicode)
self.assertEqual(type(data[u'kernel_params'][u'gamma']), float)
self.assertEqual(type(data[u'kernel_params'][u'degree']), int)
self.assertEqual(type(data[u'kernel_params'][u'coef0']), float)
def check_that_unicode_validator_works_as_expected(self):
"""Make sure that unicode validator function works as expected."""
test_dict = {
'a': u'b',
u'c': {
u'abc': 20,
u'cdf': [u'j', u'k']
},
u'x': [{u'm': u'n'}, {u'e': u'f'}]
}
with self.assertRaisesRegexp(
Exception, 'Expected \'a\' to be unicode but found str.'):
classifier_utils.unicode_validator_for_classifier_data(test_dict)
test_dict = {
u'a': 'b',
u'c': {
u'abc': 20,
u'cdf': [u'j', u'k']
},
u'x': [{u'm': u'n'}, {u'e': u'f'}]
}
with self.assertRaisesRegexp(
Exception, 'Expected \'b\' to be unicode but found str.'):
classifier_utils.unicode_validator_for_classifier_data(test_dict)
test_dict = {
u'a': u'b',
u'c': {
'abc': 20,
u'cdf': [u'j', u'k']
},
u'x': [{u'm': u'n'}, {u'e': u'f'}]
}
with self.assertRaisesRegexp(
Exception, 'Expected \'abc\' to be unicode but found str.'):
classifier_utils.unicode_validator_for_classifier_data(test_dict)
test_dict = {
u'a': u'b',
u'c': {
u'abc': 20,
u'cdf': ['j', u'k']
},
u'x': [{u'm': u'n'}, {u'e': u'f'}]
}
with self.assertRaisesRegexp(
Exception, 'Expected \'j\' to be unicode but found str.'):
classifier_utils.unicode_validator_for_classifier_data(test_dict)
test_dict = {
u'a': u'b',
u'c': {
u'abc': 20,
u'cdf': [u'j', u'k']
},
u'x': [{'m': u'n'}, {u'e': u'f'}]
}
with self.assertRaisesRegexp(
Exception, 'Expected \'m\' to be unicode but found str.'):
classifier_utils.unicode_validator_for_classifier_data(test_dict)
test_dict = {
u'a': u'b',
u'c': {
u'abc': 20,
u'cdf': [u'j', u'k']
},
u'x': [{u'm': u'n'}, {u'e': 'f'}]
}
with self.assertRaisesRegexp(
Exception, 'Expected \'f\' to be unicode but found str.'):
classifier_utils.unicode_validator_for_classifier_data(test_dict)
def test_that_float_verifier_regex_works_correctly(self):
"""Test that float verifier regex correctly identifies float values."""
test_list = [
'0.123', '+0.123', '-0.123', '.123', '+.123', '-.123',
'0000003.1']
for test in test_list:
self.assertEqual(
re.match(vmconf.FLOAT_VERIFIER_REGEX, test).groups()[0], test)
test_list = [
'3e10', '3e-10', '3e+10', '+3e10', '-3e10', '+3e+10', '+3e-10',
'-3e-10', '-3e+10', '0.3e10', '-0.3e10', '+0.3e10', '-0.3e-10',
'-0.3e+10', '+0.3e+10', '+0.3e-10', '.3e+10', '-.3e10']
for test in test_list:
self.assertEqual(
re.match(vmconf.FLOAT_VERIFIER_REGEX, test).groups()[1], test)
test_list = ['123', '0000', '123.']
for test in test_list:
self.assertIsNone(re.match(vmconf.FLOAT_VERIFIER_REGEX, test))
def test_encode_floats_in_classifier_data(self):
"""Make sure that all values are converted correctly."""
test_dict = {
'x': ['123', 'abc', 0.123]
}
expected_dict = {
'x': ['123', 'abc', '0.123'],
}
output_dict = (
classifier_utils.encode_floats_in_classifier_data(
test_dict))
self.assertDictEqual(expected_dict, output_dict)
test_dict = {
'x': '-0.123'
}
with self.assertRaisesRegexp(
Exception,
'Float values should not be stored as strings.'):
classifier_utils.encode_floats_in_classifier_data(
test_dict)
test_dict = {
'x': ['+0.123', 0.456]
}
with self.assertRaisesRegexp(
Exception,
'Float values should not be stored as strings.'):
classifier_utils.encode_floats_in_classifier_data(
test_dict)
test_dict = {
'a': {
'ab': 'abcd',
'ad': {
'ada': 'abcdef',
'adc': [{
'adca': 'abcd',
'adcb': 0.1234,
'adcc': ['ade', 'afd']
}]
},
'ae': [['123', 0.123], ['abc']],
},
'b': {
'bd': [-2.48521656693, -2.48521656693, -2.48521656693],
'bg': ['abc', 'def', 'ghi'],
'bh': ['abc', '123'],
},
'c': 1.123432,
}
expected_dict = {
'a': {
'ab': 'abcd',
'ad': {
'ada': 'abcdef',
'adc': [{
'adca': 'abcd',
'adcb': '0.1234',
'adcc': ['ade', 'afd'],
}],
},
'ae': [['123', '0.123'], ['abc']],
},
'b': {
'bd': ['-2.48521656693', '-2.48521656693', '-2.48521656693'],
'bg': ['abc', 'def', 'ghi'],
|
antoinecarme/pyaf | tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_ConstantTrend_BestCycle_AR.py | Python | bsd-3-clause | 164 | 0.04878 | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['ConstantTrend' | ] , ['BestCycle'] , ['AR | '] ); |
adblockplus/abpbot | beanbot-client.py | Python | gpl-2.0 | 843 | 0.020166 | #!/usr/bin/python
import sys, re
from socket import *
serve_addr = ('localhost', 47701)
if __name__ == '__main__':
IRC_BOLD = '\x02'
IRC_ULINE = '\x1f'
IRC_NORMAL = '\x0f'
IRC_RED = '\x034'
IRC_LIME = '\x039'
IRC_BLUE = '\x0312'
repo, branch, author, rev, description = sys | .argv[1:6]
match = re.search(r'^\s*(.*?)\s*<.*>\s*$', author)
if match:
author = match.group(1)
data = (
"%(IRC_RED)s%(repo)s"
"%(IRC_NORMAL)s[%(branch)s] "
"%(IRC_NORMAL)s%(IRC_BOLD)s%(author)s "
"%(IRC_NORMAL)s%(IRC_ULINE)s%(rev)s%(IRC_NORMAL)s "
"%(description)s" % | locals()
)
if len(data) > 400:
data = data[:400] + "..."
sock = socket(AF_INET, SOCK_DGRAM)
sock.sendto(data, serve_addr)
sock.sendto("https://hg.adblockplus.org/%(repo)s/rev/%(rev)s" % locals(), serve_addr)
sock.close()
|
Connexions/cnx-epub | cnxepub/formatters.py | Python | agpl-3.0 | 33,743 | 0 | # -*- coding: ut | f-8 -*-
# ###
# Copyright (c) 2016, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
fr | om __future__ import unicode_literals
import hashlib
import json
import logging
import sys
from io import BytesIO
import re
import jinja2
import lxml.html
from lxml import etree
from copy import deepcopy
import requests
from .models import (
model_to_tree, content_to_etree, etree_to_content,
flatten_to_documents,
Binder, TranslucentBinder,
Document, DocumentPointer, CompositeDocument, utf8)
from .html_parsers import HTML_DOCUMENT_NAMESPACES
from .utils import ThreadPoolExecutor
logger = logging.getLogger('cnxepub')
IS_PY3 = sys.version_info.major == 3
__all__ = (
'DocumentContentFormatter',
'DocumentSummaryFormatter',
'HTMLFormatter',
'SingleHTMLFormatter',
)
class DocumentContentFormatter(object):
def __init__(self, document):
self.document = document
def __unicode__(self):
return self.__bytes__().decode('utf-8')
def __str__(self):
if IS_PY3:
return self.__bytes__().decode('utf-8')
return self.__bytes__()
def __bytes__(self):
html = """\
<html xmlns="http://www.w3.org/1999/xhtml">
{}
</html>""".format(utf8(self.document.content))
html = _fix_namespaces(html.encode('utf-8'))
et = etree.HTML(html.decode('utf-8'))
return etree.tostring(et, pretty_print=True, encoding='utf-8')
class DocumentSummaryFormatter(object):
def __init__(self, document):
self.document = document
def __unicode__(self):
return self.__bytes__().decode('utf-8')
def __str__(self):
if IS_PY3:
return self.__bytes__().decode('utf-8')
return self.__bytes__()
def __bytes__(self):
# try to make sure summary is wrapped in a tag
summary = self.document.metadata.get('summary', '') or ''
try:
etree.fromstring(summary)
html = '{}'.format(summary)
except etree.XMLSyntaxError:
html = """\
<div class="description" data-type="description"\
xmlns="http://www.w3.org/1999/xhtml">
{}
</div>""".format(summary)
return html.encode('utf-8')
class HTMLFormatter(object):
def __init__(self, model, extensions=None, generate_ids=False):
self.model = model
self.extensions = extensions
self.generate_ids = generate_ids
def _generate_ids(self, document, content):
"""Generate unique ids for html elements in page content so that it's
possible to link to them.
"""
document_id = document.id.replace('_', '')
# Step 1: prefix existing ids
elements_with_ids = content.xpath('//*[@id]')
existing_ids = set([el.attrib['id'] for el in elements_with_ids])
def next_free_auto_id_generator():
next_free_auto_id_generator.new_id_count = 0
def inner():
auto_id = 'auto_{}_{}'.format(
document_id,
next_free_auto_id_generator.new_id_count)
next_free_auto_id_generator.new_id_count += 1
while auto_id in existing_ids:
auto_id = 'auto_{}_{}'.format(
document_id,
next_free_auto_id_generator.new_id_count)
next_free_auto_id_generator.new_id_count += 1
return auto_id
return inner
next_auto_id = next_free_auto_id_generator()
old_id_to_new_id = {}
for node in elements_with_ids:
old_id = node.attrib['id']
if len(old_id) > 0:
new_id = 'auto_{}_{}'.format(document_id, old_id)
else:
new_id = next_auto_id()
node.attrib['id'] = new_id
old_id_to_new_id[old_id] = new_id
existing_ids.add(new_id)
# Step 2: give ids to elements that need them
elements_need_ids = [
'p', 'dl', 'dt', 'dd', 'table', 'div', 'section', 'figure',
'blockquote', 'q', 'code', 'pre', 'object', 'img', 'audio',
'video',
]
elements_xpath = '|'.join([
'.//*[local-name() = "{}"]'.format(elem)
for elem in elements_need_ids
])
data_types_need_ids = [
'equation', 'list', 'exercise', 'rule', 'example', 'note',
'footnote-number', 'footnote-ref', 'problem', 'solution', 'media',
'proof', 'statement', 'commentary'
]
data_types_xpath = '|'.join(['.//*[@data-type="{}"]'.format(data_type)
for data_type in data_types_need_ids])
xpath = '|'.join([elements_xpath, data_types_xpath])
for node in content.xpath(xpath, namespaces=HTML_DOCUMENT_NAMESPACES):
node_id = node.attrib.get('id')
if node_id:
continue
new_id = next_auto_id()
node.attrib['id'] = new_id
existing_ids.add(new_id)
# Step 3: redirect links to elements with now prefixed ids
for a in content.xpath('//a[@href]|//xhtml:a[@href]',
namespaces=HTML_DOCUMENT_NAMESPACES):
href = a.attrib['href']
if href.startswith('#') and href[1:] in old_id_to_new_id:
a.attrib['href'] = '#{}'.format(old_id_to_new_id[href[1:]])
@property
def _content(self):
if isinstance(self.model, TranslucentBinder):
if not self.extensions:
from .adapters import get_model_extensions
self.extensions = get_model_extensions(self.model)
return tree_to_html(
model_to_tree(self.model), self.extensions).decode('utf-8')
elif isinstance(self.model, Document):
_html = content_to_etree(self.model.content)
if self.generate_ids:
self._generate_ids(self.model, _html)
return etree_to_content(_html, strip_root_node=True)
@property
def _template(self):
if isinstance(self.model, DocumentPointer):
return jinja2.Template(DOCUMENT_POINTER_TEMPLATE,
trim_blocks=True, lstrip_blocks=True)
def isdict(v):
return isinstance(v, dict)
template_env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True)
return template_env.from_string(HTML_DOCUMENT,
globals={'isdict': isdict})
@property
def _template_args(self):
if isinstance(self.model, Document):
root = self.model._xml
else:
root = {}
return {
'metadata': self.model.metadata,
'content': self._content,
'is_translucent': getattr(self.model, 'is_translucent', False),
'resources': getattr(self.model, 'resources', []),
'root_attrs': {k: root.get(k) for k in root.keys()}
}
def __unicode__(self):
return self.__bytes().decode('utf-8')
def __str__(self):
if IS_PY3:
return self.__bytes__().decode('utf-8')
return self.__bytes__()
def __bytes__(self):
html = self._template.render(self._template_args)
return _fix_namespaces(html.encode('utf-8'))
class SingleHTMLFormatter(object):
def __init__(self, binder, includes=None, threads=1):
self.binder = binder
self.root = etree.fromstring(bytes(HTMLFormatter(self.binder)))
self.head = self.xpath('//xhtml:head')[0]
self.body = self.xpath('//xhtml:body')[0]
self.built = False
self.includes = includes
self.included = False
self.threads = threads
def xpath(self, path, elem=None):
if elem is None:
elem = self.root
return elem.xpath(path, namespaces=HTML_DOCUMENT_NAMESPACES)
def get_node_type(self, node, parent=None):
"""If node is a document, the type is page.
If node is a binder with |
mattbrowley/PSim | simplex.py | Python | mit | 11,438 | 0.003759 | #!/usr/bin/env python
#
# -*- Mode: python -*-
#
# $Id: Simplex.py,v 1.2 2004/05/31 14:01:06 vivake Exp $
#
# Copyright (c) 2002-2004 Vivake Gupta (vivakeATlab49.com). All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# This software is maintained by Vivake (vivakeATlab49.com) and is available at:
# http://shell.lab49.com/~vivake/python/Simplex.py
#
# 1.2 ( 5/2004) - Fixed a bug found by Noboru Yamamoto <noboru.yamamotoATkek.jp>
# which caused minimize() not to converge, and reach the maxiter
# limit under some conditions.
# ( 1/2011) - Added **kwargs where appropriate to enable passing additiona | l
# static parameters to the objective function (Filip Dominec)
""" Simplex - a regression method for arbitrary nonlinear function minimization
Simplex minimizes an arbitrary nonlinear function of N variables by the
Nedler-Mead Simplex method as described in:
Nedler, J.A. and Mead, R. "A Simplex Method for Function Minimization."
Computer Journal 7 (1965): 308-313.
It makes no assumptions about the smoothness of the function being m | inimized.
It converges to a local minimum which may or may not be the global minimum
depending on the initial guess used as a starting point.
"""
import math
import copy
import csv
import time
#from numpy import ndarray as nd
class Simplex:
def __init__(self, testfunc, guess, increments, kR = -1, kE = 2, kC = 0.5):
"""Initializes the simplex.
INPUTS
------
testfunc the function to minimize
guess[] an list containing initial guesses
increments[] an list containing increments, perturbation size
kR reflection constant
kE expansion constant
kC contraction constant
"""
self.testfunc = testfunc
self.guess = guess
self.increments = increments
self.kR = kR
self.kE = kE
self.kC = kC
self.numvars = len(self.guess)
load_time = time.strftime('%Y.%m.%d.%H.%M')
self.filename = "simplex_output_{}.log".format(load_time)
def minimize(self, epsilon = 0.0001, maxiters = 250, monitor = 1, **kwargs):
"""Walks to the simplex down to a local minima.
INPUTS
------
epsilon convergence requirement
maxiters maximum number of iterations
monitor if non-zero, progress info is output to stdout
OUTPUTS
-------
an array containing the final values
lowest value of the error function
number of iterations taken to get here
"""
self.simplex = []
self.lowest = -1
self.highest = -1
self.secondhighest = -1
self.errors = []
self.currenterror = 0
# Initialize vertices
for vertex in range(0, self.numvars + 3): # Two extras to store centroid and reflected point
self.simplex.append(copy.copy(self.guess))
# Use initial increments
for vertex in range(0, self.numvars + 1):
for x in range(0, self.numvars):
if x == (vertex - 1):
self.simplex[vertex][x] = self.guess[x] + self.increments[x]
self.errors.append(0)
self.calculate_errors_at_vertices(**kwargs)
if monitor:
with open(self.filename, 'wt') as save_file:
writer = csv.writer(save_file, dialect="excel-tab")
row = ['Step','Error']
for i in range(self.numvars):
row.append('Var {} '.format(i))
writer.writerow(row)
iter = 0
for iter in range(0, maxiters):
# Identify highest, second highest, and lowest vertices
self.highest = 0
self.lowest = 0
for vertex in range(0, self.numvars + 1):
if self.errors[vertex] > self.errors[self.highest]:
self.highest = vertex
if self.errors[vertex] < self.errors[self.lowest]:
self.lowest = vertex
self.secondhighest = 0
for vertex in range(0, self.numvars + 1):
if vertex == self.highest:
continue
if self.errors[vertex] > self.errors[self.secondhighest]:
self.secondhighest = vertex
# Test for convergence
S = 0.0
S1 = 0.0
for vertex in range(0, self.numvars + 1):
S = S + self.errors[vertex]
F2 = S / (self.numvars + 1)
for vertex in range(0, self.numvars + 1):
S1 = S1 + (self.errors[vertex] - F2)**2
T = math.sqrt(S1 / self.numvars)
# Optionally, print progress information
if monitor:
with open(self.filename, 'a') as save_file:
writer = csv.writer(save_file, dialect="excel-tab")
row = ["{} ".format(iter), "{:.4f}".format(self.errors[self.highest])]
for var in self.simplex[self.highest]:
row.append("{:.4e}".format(var))
writer.writerow(row)
if T <= epsilon: # We converged! Break out of loop!
break;
else: # Didn't converge. Keep crunching.
# Calculate centroid of simplex, excluding highest vertex
for x in range(0, self.numvars):
S = 0.0
for vertex in range(0, self.numvars + 1):
if vertex == self.highest:
continue
S = S + self.simplex[vertex][x]
self.simplex[self.numvars + 1][x] = S / self.numvars
self.reflect_simplex()
self.currenterror = self.testfunc(self.guess, **kwargs)
if self.currenterror < self.errors[self.lowest]:
tmp = self.currenterror
self.expand_simplex()
self.currenterror = self.testfunc(self.guess, **kwargs)
if self.currenterror < tmp:
self.accept_expanded_point()
else:
self.currenterror = tmp
self.accept_reflected_point()
elif self.currenterror <= self.errors[self.secondhighest]:
self.accept_reflected_point()
elif self.currenterror <= self.errors[self.highest]:
self.accept_reflected_point()
self.contract_simplex()
self.currenterror = self.testfunc(self.guess, **kwargs)
if self.currenterror < self.errors[self.highest]:
self.accept_contracted_point()
else:
self.multiple_contract_simplex(**kwargs)
elif self.currenterror >= self.errors[self.highest]:
self.contract_simplex()
self.currenterror = self.testfunc(self.guess, **kwargs)
if self.currenterror < self.errors[self.highest]:
self.accept_contracted_point()
else:
self.multiple_contract_simplex(**kwargs)
# Either converged or reached the maximum number of iterations.
# Return the lowest vertex and the currenterror.
|
collective/cyn.in | src/ubify.coretypes/ubify/coretypes/content/spacesfolder.py | Python | gpl-3.0 | 2,590 | 0.016216 | ###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this program. If not, see <http://www.gnu.org/licenses/>.
#
#You can contact Cynapse at support@cynapse.com with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# legal@cynapse.com
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dheeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################################################
from Products.Archetypes.atapi import *
from Products.ATContentTypes.content.folder \
import ATFolder as BaseClass
from Products.ATContentTypes.content.folder \
import ATFolderSchema as DefaultSchema
from Products.ATContentTypes.content.base import registerATCT
from ubify.coretypes.config import PROJECTNAME
schema = DefaultSchema.copy()
|
class SpacesFolder(BaseClass):
__doc__ = BaseClass.__doc__ + "(customizable version)"
# portal_type = BaseClass.portal_type
# archetype_name = BaseClass.archetype_name
schema = schema
_at_rename_after_creation = True
# enable FTP/WebDAV and friends
PUT = BaseC | lass.PUT
registerATCT(SpacesFolder, PROJECTNAME)
|
zelongc/cloud-project | connect_db.py | Python | mit | 783 | 0.014049 | #!/usr/bin/python
from couchdb import Server
import datetime
# server = Server() # connects to the local_server
# >>> remote_server = Server('http://example.com:5984/')
# >>> secure_remote_server = Server('https://username:password@example.com:5984/')
class db_server(object):
def __init__(self,username,login):
self.secure_server = Server('http://%s: | %s@130.220.212.108:5984' % (username, login))
self.db = self.secure_server["new_tweet"]
def insert(self,data):
try:
doc_id,doc_rev=self.db.save(data)
except Exception as e:
with open('dabatase_log','a') as f:
f.write("["+datetime.datetime.now().__str__()+"]"+'\n')
f.write(str(e)+'\n')
f.writ | e((data['_id']+'\n'))
|
UCNA/main | Scripts/plotters/LarmorClipping.py | Python | gpl-3.0 | 8,636 | 0.074456 | #!/sw/bin/python2.7
import sys
sys.path.append("..")
from ucnacore.PyxUtils import *
from math import *
from ucnacore.LinFitter import *
#from UCNAUtils import *
from bisect import bisect
from calib.FieldMapGen import *
def clip_function(y,rho,h,R):
sqd = sqrt(rho**2-y**2)
if sqd==0:
sqd = 1e-10
return h*rho**2/R*atan(y/sqd)+2*sqd/(3*R)*(3*h*y/2+rho**2-y**2)
def survival_fraction(h,rho,R):
d = R-h
if d < -rho:
return 1
if h <= -rho:
return 0
c1 = 0
if d < rho:
sqd = sqrt(rho**2-d**2)
c1 = pi/2*rho**2-d*sqd-rho**2*atan(d/sqd)
return ( c1 + clip_function(min(h,rho),rho,h,R)
- clip_function(max(h-R,-rho),rho,h,R))/(pi*rho**2)
def radial_clip_function(r,rho,h,R):
return r**2*(3*h-2*r)/(6*R**2)
def radial_survival_fraction(h,rho,R):
d = h-R
if d > rho:
return 1
if h <= 0:
return 0
c1 = 0
if d > 0:
c1 = (h-R)**2
return ( c1 + radial_clip_function(min(h,rho),rho,h,R) - radial_clip_function(max(d,0),rho,h,R) )/(rho**2)
class rot3:
def __init__(self,t1,t2,t3,s=1.0):
self.c1,self.s1 = cos(t1),sin(t1)
self.c2,self.s2 = cos(t2),sin(t2)
self.c3,self.s3 = cos(t3),sin(t3)
self.s = s
def __call__(self,(x,y,z)):
x,y = self.c1*x+self.s1*y,self.c1*y-self.s1*x
y,z = self.c2*y+self.s2*z,self.c2*z-self.s2*y
z,x = self.c3*z+self.s3*x,self.c3*x-self.s3*z
return self.s*x,self.s*y,self.s*z
class path3d:
def __init__(self):
self.pts = []
self.sty = []
self.endsty = []
self.breakunder = False
self.nopatch = False
def addpt(self,(x,y,z),s=1):
self.pts.append((x*s,y*s,z*s))
def apply(self,transf):
self.pts = [transf(xyz) for xyz in self.pts]
def finish(self):
self.p = path.path()
self.p.append(path.moveto(self.pts[0][0],self.pts[0][1]))
for g in self.pts[1:]:
self.p.append(path.lineto(g[0],g[1]))
self.patchpts = []
self.underpts = []
def nearestpt(self,(x,y)):
d0 = 1e20
n = None
for i in range(len(self.pts)):
d1 = (self.pts[i][0]-x)**2+(self.pts[i][1]-y)**2
if d1 < d0:
d0 = d1
n = i
return n
def znear(self,(x,y)):
return self.pts[self.nearestpt((x,y))][2]
def znearc(self,c):
x,y = self.p.at(c)
x,y = 100*x.t,100*y.t
return self.znear((x,y))
def addPatch(self,c,z):
self.patchpts.append((c,z))
def drawto(self,cnvs):
cnvs.stroke(self.p,self.sty)
def interleave(p3d1,p3d2):
print "Finding intersection points..."
is1,is2 = p3d1.p.intersect(p3d2.p)
print "determining patch z..."
assert len(is1)==len(is2)
for i in range(len(is1)):
z1 = p3d1.znearc(is1[i])
z2 = p3d2.znearc(is2[i])
if z1>z2:
p3d1.addPatch(is1[i],z1)
p3d2.underpts.append(is2[i])
else:
p3d2.addPatch(is2[i],z2)
p3d1.underpts.append(is1[i])
print "done."
def drawInterleaved(c,ps):
print "Drawing base curves..."
for p in ps:
p.p = p.p.normpath()
if p.breakunder:
splits = []
for s in p.underpts:
splits += [s-p.breakunder*0.5,s+p.breakunder*0.5]
psplit = p.p.split(splits)
for seg in psplit[0::2]:
c.stroke(seg,p.sty)
else:
c.stroke(p.p,p.sty+p.endsty)
print "Preparing patches..."
patches = []
for (pn,p) in enumerate(ps):
if p.nopatch:
continue
p.patchpts.sort()
splits = []
for s in p.patchpts:
splits += [s[0]-0.05,s[0]+0.05]
psplit = p.p.split(splits)
patches += [ (patch[1],pn,psplit[2*n+1]) for n,patch in enumerate(p.patchpts) ]
patches.sort()
print "Patching intersections..."
for p in patches:
c.stroke(p[2],ps[p[1]].sty)
print "Done."
def fieldPath(fmap,z0,z1,c,cmax,npts=50):
pfield = path3d()
for z in unifrange(z0,z1,npts):
Bdens = c/sqrt(fmap(z)+0.0001)
if abs(Bdens) < cmax:
pfield.addpt((0,Bdens,z))
return pfield
def larmor_unif(fT,theta,KE,t):
b = electron_beta(KE)
z = t*b*cos(theta)*3e8 # m
r = 3.3e-6*b*(KE+511)*sin(theta)/fT # m
f = 2.8e10*fT # Hz
return r*cos(2*pi*f*t),r*sin(2*pi*f*t),z
def larmor_step(p,pt2_per_B,fT):
nu = 2.8e10*fT*2*pi # angular frequency, Hz
pt = sqrt(fT*pt2_per_B) # transverse momentum component, keV
if p<=pt:
return 0,nu
pl = sqrt(p**2-pt**2) # longitudinal momentum, keV
vz = pl/sqrt(p*p+511*511)*3e8; # z velocity, m/s
return vz,nu
def larmorPath(fmap,p,pt2_per_B,z0,z1,dt,theta=0):
lpath = path3d()
z = z0
vz = 1
while z0 <= z <= z1 and vz>0:
fT = fmap(z) # magnetic field, T
r = 3.3e-6*sqrt(pt2_per_B/fT) # larmor radius, m
lpath.addpt((r*cos(theta),r*sin(theta),z))
# step to next | point
vz,nu = larmor_step(p,pt2_per_B,fmap(z))
theta += nu*dt
z += vz*dt
return lpath
def plot_larmor_trajectory():
fmap = fieldMap()
fmap.addFlat(-1.0,0.01,1.0)
fmap.addFlat(0.015,1.0,0.6)
#fmap.addFlat(-1.0,0.01,0.6)
#fmap.addFlat(0.08,1.0,1.0)
fT = fmap(0)
theta = 1.4
KE = 511.
#rot = rot3(0,0.0,-pi/2-0.2,500)
rot = rot3(0,0.0, | -pi/2+0.2,500)
tm = 1e-9
doFinal = True
plarmor = larmorPath(fmap,500,495**2/fmap(0),0,0.02,5e-13,3*pi/4)
plarmor.apply(rot)
#plarmor.sty = [style.linewidth.thick,rgb.red]
plarmor.sty = [style.linewidth.thick]
plarmor.endsty = [deco.earrow()]
plarmor.finish()
x0,y0 = plarmor.p.at(plarmor.p.begin())
fieldlines = []
w = 0.0025
cmagf = canvas.canvas()
for o in unifrange(-w,w,20):
pf = fieldPath(fmap,-0.002,0.022,o,1.02*w)
if len(pf.pts) < 10:
continue
pf.apply(rot)
pf.finish()
pf.breakunder = 0.07
pf.nopatch = True
#pf.sty=[style.linewidth.thin,rgb.blue]
pf.sty=[style.linewidth.thin] # field line color/style
fieldlines.append(pf)
pf.drawto(cmagf)
if doFinal:
interleave(plarmor,pf)
#cmagf.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.green])])
cmagf.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.white]),style.linewidth.Thick])
cmagf.writetofile("/Users/michael/Desktop/Bfield.pdf")
c = canvas.canvas()
if doFinal:
drawInterleaved(c,[plarmor,]+fieldlines)
else:
plarmor.drawto(c)
for pf in fieldlines:
pf.drawto(c)
#c.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.green])])
c.stroke(path.circle(x0,y0,0.07),[deco.filled([rgb.white]),style.linewidth.Thick])
c.writetofile("/Users/michael/Desktop/larmor_spiral.pdf")
def plot_spectrometer_field():
fmap = fieldMap()
fmap.addFlat(-3,-2.8,0.01)
fmap.addFlat(-2.3,-2.1,0.6)
fmap.addFlat(-1.6,1.6,1.0)
fmap.addFlat(2.1,2.3,0.6)
fmap.addFlat(2.8,3,0.01)
rot = rot3(0.0,0.0,-pi/2.,10.)
w = 0.25
cmagf = canvas.canvas()
for o in unifrange(-w,w,20):
pf = fieldPath(fmap,-2.6,2.6,o,w,400)
pf.apply(rot)
#if len(pf.pts) < 10:
# continue
pf.finish()
#pf.sty=[style.linewidth.thin,rgb.blue]
pf.sty=[style.linewidth.thin] # field line color/style
pf.drawto(cmagf)
cmagf.writetofile("/Users/michael/Desktop/Bfield.pdf")
def larmor_clipping_plot():
gSurv=graph.graphxy(width=20,height=10,
x=graph.axis.lin(title="Source offset [mm]"),
y=graph.axis.lin(title="",min=0,max=1),
key = graph.key.key(pos="bl"))
gSurv.texrunner.set(lfs='foils17pt')
rho = 1.5
h0 = 9.5
gdat = [ [h0-h,survival_fraction(h,rho,2*3.3),survival_fraction(h,rho,2*3.3/2)] for h in unifrange(h0-10,h0,100) ]
gdat = [ g+[0.5*(g[2]<=1e-3)+(g[2]>1e-3)*(g[1]/(g[2]+1e-6)),] for g in gdat]
gSurv.plot(graph.data.points(gdat,x=1,y=3,title="500keV line survival"),[graph.style.line([style.linewidth.Thick,rgb.blue])])
gSurv.plot(graph.data.points(gdat,x=1,y=2,title="1MeV line survival"),[graph.style.line([style.linewidth.Thick,rgb.red])])
gSurv.plot(graph.data.points(gdat,x=1,y=4,title="1MeV:500keV survival ratio"),[graph.style.line([style.linewidth.Thick])])
gSurv.writetofile("/Users/michael/Desktop/survival_%g.pdf"%rho)
def radial_clipping_plot():
gSurv=graph.graphxy(width=20,height=10,
x=graph.axis.lin(title="Source spot radius [mm]",min=0,max=9.5),
y=graph.axis.lin(title="",min=0,max=1),
key = graph.key.key(pos="bl"))
gSurv.texrunner.set(lfs='foils17pt')
h = 9.5
gdat = [ [rho,radial_survival_fraction(h,rho,3.3),radial_survival_fraction(h,rho,3.3/2.0)] for rho in unifrange(0.,9.5,200) ]
gdat = [ g+[0.5*(g[2]<=1e-3)+(g[2]>1e-3)*(g[1]/(g[2]+1e-6)),] for g in gdat]
gSurv.plot(graph.data.points(gdat,x=1,y=3,title="500keV line survival"),[graph.style.line([style.linewidth.Thick,rgb.blue] |
dbaty/soho | tests/test_builder.py | Python | bsd-3-clause | 4,834 | 0.000414 | from contextlib import contextmanager
from filecmp import dircmp
from tempfile import mkdtemp
from shutil import rmtree
from unittest import TestCase
@contextmanager
def temp_folder():
tmp_dir = mkdtemp()
try:
yield tmp_dir
finally:
rmtree(tmp_dir)
class DummyLogger(object):
def info(self, *args, **kwargs): pass
debug = warning = info
class BuilderFunctionalTest(object):
def setUp(self):
import os
here = os.path.dirname(__file__)
self.setUpTestDir(here)
self.config_file = os.path.join(self.test_site_dir, 'sohoconf.py')
self.expected_dir = os.path.join(self.test_site_dir, 'www')
def setUpTestDir(self, here):
import os
self.test_site_dir = os.path.join(here, 'fixtures', self.test_site)
def _make_builder(self, options, **custom_settings):
from soho.builder import Builder
from soho.cli import get_settings
settings = get_settings(options)
self.assertIn('logger', settings.keys())
settings.update(custom_settings)
settings['logger'] = DummyLogger()
return Builder(**settings)
def assertBuilderOutput(self, out_dir, expected_dir):
diff = dircmp(out_dir, expected_dir)
self.assertEqual(diff.left_only, [])
self.assertEqual(diff.right_only, [])
self.assertEqual(diff.diff_files, [])
def _fix_lastmod_in_sitemap(self, out_dir):
# 'git clone' does not preserve the last modification time of
# files. The modification time of all files is set to when
# they were cloned. As such, all '<lastmod>' elements in the
# generated 'sitemap.xml' will have unknown values that are
# different from what we have in the 'www' directories. Here,
# we overwrite generated 'sitemap.xml' and set the '<lastmod>'
# elements to what we expect. The rest of the file is left
# untouched.
import os
import re
regexp = re.compile('(<lastmod>.*?</lastmod>)')
expected = os.path.join(self.expected_dir, 'sitemap.xml')
with open(expected) as fp:
elements = regexp.findall(fp.read())
test = os.path.join(out_dir, 'sitemap.xml')
with open(test) as fp:
bogus = fp.read()
def fix_element(matchobj):
return elements.pop(0)
fixed = regexp.sub(fix_element, bogus)
with open(test, 'w') as fp:
fp.write(fixed)
def _build(self, builder, out_dir):
import os
builder.build()
sitemap = os.path.join(out | _dir, 'sitemap.xml')
if os.path.exists(sitemap):
self._fix_lastmod_in_sitemap(out_dir)
def test_builder(self):
from .base import make_options
options = make_options(config_file=self.config_file)
with temp_folder() as out_dir:
builder = self._make_builder(options=options, out_dir=out_dir)
self._build(builder, out_dir)
self.assertBuilderOutput(out_dir, | self.expected_dir)
class TestSite1(BuilderFunctionalTest, TestCase):
test_site = 'site1'
# inherits 'test_builder()' from BuilderFunctionalTest
def test_builder_dry_run(self):
import os
from .base import make_options
options = make_options(config_file=self.config_file, do_nothing=True)
with temp_folder() as out_dir:
builder = self._make_builder(options=options, out_dir=out_dir)
self._build(builder, out_dir)
self.assertEqual(os.listdir(out_dir), [])
def test_builder_build_twice(self):
from .base import make_options
options = make_options(config_file=self.config_file)
with temp_folder() as out_dir:
builder = self._make_builder(options=options, out_dir=out_dir)
self._build(builder, out_dir)
# build again
builder = self._make_builder(options=options, out_dir=out_dir)
self._build(builder, out_dir)
self.assertBuilderOutput(out_dir, self.expected_dir)
class TestSite2(BuilderFunctionalTest, TestCase):
test_site = 'site2'
# inherits 'test_builder()' from BuilderFunctionalTest
class TutorialFunctionalTest(BuilderFunctionalTest):
# Our base class to test all parts of the tutorials.
def setUpTestDir(self, here):
import os
self.test_site_dir = os.path.join(
here, '..', 'docs', '_tutorial', self.test_site)
class TestTutorialIntro(TutorialFunctionalTest, TestCase):
test_site = '1-intro'
class TestTutorialAssets(TutorialFunctionalTest, TestCase):
test_site = '2-assets'
class TestTutorialMetadata(TutorialFunctionalTest, TestCase):
test_site = '3-metadata'
class TestTutorialI18n(TutorialFunctionalTest, TestCase):
test_site = '4-i18n'
|
madpilot78/ntopng | tools/http_authenticator.py | Python | gpl-3.0 | 2,414 | 0.002071 | #!/usr/bin/env python3
#
# https://gist.githubusercontent.com/Integralist/ce5ebb37390ab0ae56c9e6e80128fdc2/raw/2e62bcc38aed7873f07e06865f0f4c06ec9129ee/Python3%2520HTTP%2520Server.py
#
# Sample HTTP authenticator service which work with ntopng "http" authentication.
# The "HTTP server" URL should be set to "http://localhost:3001/login".
#
# Test with:
# curl --header "Content-Type: application/json" --request POST --data '{"user":"testadmin","password":"avoid-plaintext-admin"}' -v http://localhost:3001/login
#
import time
import json
from http.server import BaseHTTPRe | questHandler, HTTPServer
HOST_NAME = 'localhost'
PORT_NUMBER = 3001
USERS_DB = {
"testuser": {"password": "avoid-plaintext", "admin": False},
"testadmin | ": {"password": "avoid-plaintext-admin", "admin": True},
}
class MyHandler(BaseHTTPRequestHandler):
def do_POST(self):
if self.path == "/login":
self.handle_login()
else:
self.respond({'status': 500})
def handle_http(self, status_code, path, data={}):
self.send_response(status_code)
self.send_header('Content-type', 'application/json')
self.end_headers()
content = json.dumps(data)
return bytes(content, 'UTF-8')
def respond(self, opts, data={}):
response = self.handle_http(opts['status'], self.path, data)
self.wfile.write(response)
def handle_login(self):
data_string = self.rfile.read(int(self.headers['Content-Length']))
data = json.loads(data_string)
print(data)
username = data.get("user")
password = data.get("password")
status = 403
response_data = {}
if username and password and (username in USERS_DB):
user = USERS_DB[username]
if user["password"] == password:
status = 200
if user["admin"]:
admin = True
response_data = {"admin": True}
return self.respond({'status': status}, response_data)
if __name__ == '__main__':
server_class = HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler)
print(time.asctime(), 'Server Starts - %s:%s' % (HOST_NAME, PORT_NUMBER))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), 'Server Stops - %s:%s' % (HOST_NAME, PORT_NUMBER))
|
lpirl/autosuspend | autosuspend.pre/200-ensure-low-loadavg.py | Python | gpl-3.0 | 507 | 0.005917 | #!/usr/bin/env python3
# Exits with exit code 0 (i.e., allows sleep) if all load averages
# (1, 5, 15 minutes) are below ``MAX_IDLE_LOAD``.
from os import getloadavg
MAX_IDLE_ | LOAD = .09
def check_load(time_span, load):
| if load > MAX_IDLE_LOAD:
print(
" Won't sleep because %i minute load average" % time_span,
"of %.2f is above threshold of %.2f." % (load, MAX_IDLE_LOAD)
)
exit(1)
loads = getloadavg()
check_load(1, loads[0])
check_load(5, loads[1])
check_load(15, loads[2])
|
kstaniek/ciscoconfparse | ciscoconfparse/__init__.py | Python | gpl-3.0 | 29 | 0 | from ciscoconf | parse import *
| |
matthewoconnor/maps | map/admin.py | Python | mit | 975 | 0.007179 | from django.contrib import admin, messages
from .tasks import import_areas_from_kml_file
from .models import Area, AreaMap, DataMap, AreaBin
class AreaAdmin(admin.ModelAdmin):
list_display = ("id", "name", "area_type")
class AreaMapAd | min(admin.ModelAdmin):
list_display = ("id", "name", "data_source", "dataset_identifier", "created_time")
actions = ["generate_areas_from_kmlfile"]
def generate_areas_from_kmlfile(modeladmin, request, queryset):
for areamap in queryset:
import_areas_from_kml_file.apply_async(args=[areamap])
class DataMapAdmin(admin.ModelAdmin):
list_display | = ("id", "name", "area_map", "data_source", "dataset_identifier", "created_time")
class AreaBinAdmin(admin.ModelAdmin):
list_display = ("id", "data_map", "area", "value", "count")
admin.site.register(Area, AreaAdmin)
admin.site.register(AreaMap, AreaMapAdmin)
admin.site.register(DataMap, DataMapAdmin)
admin.site.register(AreaBin, AreaBinAdmin)
|
Muyoo/gold_pred | feature_creator/news.py | Python | apache-2.0 | 3,497 | 0.004713 | #!/usr/bin/env python
#encoding=utf8
'''
Author: xmuyoo@163.com
Date: 2015/12/04
Content:
生成新闻特征数据
输入:
1、无特征样本数据
2、topic_view的文件
3、文章分词过滤后的文件
输出:
1、特征样本数据
'''
import pdb
from collections import Counter
DATA_DIR = 'tmp_data/topic_train'
MAX_SCORE = 10.0
MIN_WORDS_COUNT = 3
TOPIC_BELONG_LIMIT = 5
def load_topic_word():
''''''
topic_fname = '%s/topic_view.topic' % DATA_DIR
word_topic_dic = {}
topic_num = 0
topic_counter = Counter()
with open(topic_fname, 'r') as reader:
for line in reader:
topic, _, words = line.strip().partition('\t')
for word in words.split():
word, _sep, score = word.partition(':')
score = float(score)
# 保留分数大于10的
if score < MAX_SCORE:
continue
topic_counter[topic_num] += 1
word_topic_dic[word] = int(topic)
topic_num += 1
pool_topic = set([t for t, cnt in topic_counter.iteritems() \
if cnt <= MIN_WORDS_COUNT])
tmp_words_dic = {}
for word, topic in word_topic_dic.iteritems():
if topic in pool_topic:
continue
tmp_words_dic[word] = topic
return tmp_words_dic, topic_num
def load_date_word():
''''''
doc_fname = '%s/filtered.rst' % DATA_DIR
date_word_dic = {}
with open(doc_fname, 'r') as reader:
for line in reader:
tmp_dic = Counter()
data = line.strip().split('\t', 2)
if len(data) != 3:
continue
doc_id, date, words = data
if date not in date_word_dic:
date_word_dic[date] = set()
for wd in words.split():
tmp_dic[wd] += 1
date_word_dic[date] |= set([item[0] \
for item in tmp_dic.iteritems()])
return date_word_dic
def init_features(topic_set):
''''''
return dict([(t, '0') for t in topic_set])
def scan_topics(date, date_word_dic, word_topic_dic):
''''''
if date not in date_word_dic:
return
words_set = date_word_dic[date]
tmp_counter = Counter()
for word in words_set:
if word not in word_topic_dic:
continue
tmp_counter[word_topic_dic[word]] += 1
return set([topic for topic, cnt in \
filter(lambda x:x[1] >= TOPIC_BELONG_LIMIT, tmp_counter.iteritems())])
de | f generate(reader, output_fname, mongo_source):
''''''
print 'Feature: News'
writer = open(output_fname, 'w')
word_topic_dic, topic_num = load_topic_word()
topic_lst = sorted(set(word_topic_dic.values()))
date_word_dic = load_date_word()
title_str = 'label %s\n' % ' '.join(['news_%s' % t for t in topic_lst])
writer.write(title_str)
for line in reader:
data = line.strip().split(' ', | 4)
label = int(data[1])
date = data[2]
features = init_features(topic_lst)
tmp_topic_set = scan_topics(date, date_word_dic, word_topic_dic)
if not tmp_topic_set:
raise Exception('Error date: \n%s' % date)
#if label == -1:
# pdb.set_trace()
for topic in tmp_topic_set:
features[topic] = '1'
outstr = '%s %s\n' % (label, ' '.join([features[t] for t in topic_lst]))
writer.write(outstr)
writer.close()
|
hedin/vyatta-conf-parser | vyattaconfparser/parser.py | Python | mit | 5,394 | 0 | # coding:utf-8
import re
import sys
if sys.version < '3':
def u(x):
return x.decode('utf-8')
else:
unicode = str
def u(x):
return x
# Matches section start `interfaces {`
rx_section = re.compile(r'^([\w\-]+) \{$', re.UNICODE)
# Matches named section `ethernet eth0 {`
rx_named_section = re.compile(
r'^([\w\-]+) ( | [\w\-\"\./@:=\+]+) \{$', re.UNICODE
)
# Matches simple key-value pair `duplex auto`
rx_value = re.compile(r'^([\w\-]+) "?([^"]+)?"?$', re.UNICODE)
# Matches single value (flag) `disable`
rx_flag = re.compile(r'^([\w\-]+)$', re.UNICODE)
# Matches comments
rx_comment = re.compile(r'^(\/\*).*(\*\/)', re.UNICODE)
| class ParserException(Exception):
pass
def update_tree(config, path, val, val_type=None):
t = config
for item in path:
if list(item.keys())[0] not in t:
try:
t[list(item.keys())[0]] = {}
except TypeError:
break
t = t.get(list(item.keys())[0])
if val_type == 'flag':
t.update(val)
elif val_type == 'value':
if t and isinstance(t, dict):
if list(t.keys())[0] == list(val.keys())[0]:
try:
t.update(
{
list(t.keys())[0]: dict(
[
(k, {})
for k in list(t.values())
+ list(val.values())
]
)
}
)
except TypeError:
if isinstance(t[list(t.keys())[0]], unicode):
t[list(t.keys())[0]] = {t[list(t.keys())[0]]: {}}
t[list(t.keys())[0]].update({list(val.values())[0]: {}})
elif list(val.keys())[0] == list(path[-1].keys())[0]:
t.update({list(val.values())[0]: {}})
elif list(val.keys())[0] in list(t.keys()):
try:
t.update(
{
list(val.keys())[0]: {
t[list(val.keys())[0]]: {},
list(val.values())[0]: {},
}
}
)
except TypeError:
t[list(val.keys())[0]].update({list(val.values())[0]: {}})
else:
t.update(val)
else:
if isinstance(t, str):
prev_keys = list(map(lambda x: list(x.keys())[0], path))[:-1]
prev_section_key = prev_keys[-1]
if len(prev_keys) == 1:
config[prev_section_key] = {config[prev_section_key]: {}}
t = config[prev_section_key]
else:
t = config
for k in prev_keys[:-1]:
t = t[k]
t[prev_section_key] = {t[prev_section_key]: {}}
t = t[prev_section_key]
t.update({list(item.keys())[0]: val})
else:
t.update(val)
elif val_type == 'named_section':
pass
elif val_type == 'section':
t = val
return config
def parse_node(config, line, line_num, path=None):
if not path:
path = []
line = line.strip()
if not line:
return config, path
if rx_section.match(line):
val_type = 'section'
section = rx_section.match(line).groups()[0]
path.append({section: val_type})
if path:
update_tree(config, path, {section: {}}, val_type=val_type)
elif rx_named_section.match(line):
val_type = 'named_section'
section, name = rx_named_section.match(line).groups()
if section not in [list(p.keys())[0] for p in path]:
path.append({section: val_type})
elif section != [list(p.keys())[0] for p in path][-1]:
path.append({section: val_type})
path.append({name: val_type})
update_tree(config, path, {section: {name: {}}}, val_type=val_type)
elif rx_value.match(line):
key, value = rx_value.match(line).groups()
update_tree(config, path, {key: value}, val_type='value')
elif rx_flag.match(line):
flag = rx_flag.match(line).group()
update_tree(config, path, {flag: flag}, val_type='flag')
elif rx_comment.match(line):
pass
elif line == '}' and path:
path_types = [list(p.values())[0] for p in path]
path.pop()
if len(path_types) > 1 and path_types[-2:] == [
'section',
'named_section',
]:
path.pop()
elif len(path_types) > 1 and path_types[-2:] == [
'named_section',
'named_section',
]:
path.pop()
else:
raise ParserException(
'Parse error at {line_num}: {line}'.format(
line_num=line_num, line=line
)
)
return config, path
def parse_conf(s):
if s:
s = u(s).split('\n')
c = {}
headers = []
for n, line in enumerate(s, start=1):
c, headers = parse_node(c, line, n, headers)
return c
raise ParserException('Empty config passed')
|
npuichigo/ttsflow | third_party/tensorflow/tensorflow/contrib/distributions/python/kernel_tests/sample_stats_test.py | Python | apache-2.0 | 9,019 | 0.005987 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# | http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Sample Stats Ops."""
from __future__ imp | ort absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import sample_stats
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class PercentileTestWithLowerInterpolation(test.TestCase):
_interpolation = "lower"
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.test_session():
pct = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.test_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_two_dim_odd_input_axis_0(self):
x = np.array([[-1., 50., -3.5, 2., -1], [0., 0., 3., 2., 4.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.test_session():
# Get dim 1 with negative and positive indices.
pct_neg_index = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
pct_pos_index = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct_neg_index.get_shape())
self.assertAllEqual((2,), pct_pos_index.get_shape())
self.assertAllClose(expected_percentile, pct_neg_index.eval())
self.assertAllClose(expected_percentile, pct_pos_index.eval())
def test_two_dim_even_axis_0(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, axis=0)
with self.test_session():
pct = sample_stats.percentile(
x, q=q, interpolation=self._interpolation, axis=[0])
self.assertAllEqual((2,), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_two_dim_even_input_and_keep_dims_true(self):
x = np.array([[1., 2., 4., 50.], [1., 2., -4., 5.]]).T
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation, keepdims=True, axis=0)
with self.test_session():
pct = sample_stats.percentile(
x,
q=q,
interpolation=self._interpolation,
keep_dims=True,
axis=[0])
self.assertAllEqual((1, 2), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
with self.test_session():
pct = sample_stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis)
self.assertAllEqual(expected_percentile.shape, pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input_and_keepdims(self):
x = rng.rand(2, 3, 4, 5)
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
with self.test_session():
pct = sample_stats.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllEqual(expected_percentile.shape, pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_four_dimensional_input_x_static_ndims_but_dynamic_sizes(self):
x = rng.rand(2, 3, 4, 5)
x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x, q=0.77, interpolation=self._interpolation, axis=axis)
with self.test_session():
pct = sample_stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis)
self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
def test_four_dimensional_input_and_keepdims_x_static_ndims_dynamic_sz(self):
x = rng.rand(2, 3, 4, 5)
x_ph = array_ops.placeholder(dtypes.float64, shape=[None, None, None, None])
for axis in [None, 0, 1, -2, (0,), (-1,), (-1, 1), (3, 1), (-3, 0)]:
expected_percentile = np.percentile(
x,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keepdims=True)
with self.test_session():
pct = sample_stats.percentile(
x_ph,
q=0.77,
interpolation=self._interpolation,
axis=axis,
keep_dims=True)
self.assertAllClose(expected_percentile, pct.eval(feed_dict={x_ph: x}))
def test_with_integer_dtype(self):
x = [1, 5, 3, 2, 4]
for q in [0, 10, 25, 49.9, 50, 50.01, 90, 95, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.test_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertEqual(dtypes.int32, pct.dtype)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
class PercentileTestWithHigherInterpolation(
PercentileTestWithLowerInterpolation):
_interpolation = "higher"
class PercentileTestWithNearestInterpolation(test.TestCase):
"""Test separately because np.round and tf.round make different choices."""
_interpolation = "nearest"
def test_one_dim_odd_input(self):
x = [1., 5., 3., 2., 4.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.test_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolation)
self.assertAllEqual((), pct.get_shape())
self.assertAllClose(expected_percentile, pct.eval())
def test_one_dim_even_input(self):
x = [1., 5., 3., 2., 4., 5.]
for q in [0, 10.1, 25.1, 49.9, 50.1, 50.01, 89, 100]:
expected_percentile = np.percentile(
x, q=q, interpolation=self._interpolation)
with self.test_session():
pct = sample_stats.percentile(x, q=q, interpolation=self._interpolati |
dhess/lobbyists | lobbyists/tests/test_import_issues.py | Python | gpl-3.0 | 17,052 | 0.001935 | # -*- coding: utf-8 -*-
#
# test_import_issues.py - Test issue importing.
# Copyright (C) 2008 by Drew Hess <dhess@bothan.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test issue importing."""
import unittest
import lobbyists
import sqlite3
import util
class TestImportIssues(unittest.TestCase):
def test_import_issues(self):
"""Import issues"""
filings = list(lobbyists.parse_filings(util.testpath('issues.xml')))
con = sqlite3.connect(':memory:')
con = lobbyists.create_db(con)
cur = con.cursor()
self.failUnless(lobbyists.import_filings(cur, filings))
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT * FROM issue")
rows = list(cur)
row = rows.pop()
self.failUnlessEqual(row['id'], 23)
self.failUnlessEqual(row['code'],
'ENERGY/NUCLEAR')
self.failUnlessEqual(row['specific_issue'],
'\r\nComprehensive Energy Bill')
row = rows.pop()
self.failUnlessEqual(row['id'], 22)
self.failUnlessEqual(row['code'],
'TRANSPORTATION')
self.failUnlessEqual(row['specific_issue'],
'\r\nH.R. 1495 Water Resources Development Act (WRDA) - the WRDA provisions to modernize the locks on the Upper Mississippi and Illinois Rivers are essential if U.S. agriculture is going to remain competitive in the global marketplace.\r\nH.R. 1495 the Water Resources Development Act of 2007 (WRDA) - conference report - Title VIII of the legislation includes authorization for the Corps of Engineers to construct new 1,200 foot locks on the Upper Mississippi and Illinois Rivers\n')
row = rows.pop()
self.failUnlessEqual(row['id'], 21)
self.failUnlessEqual(row['code'],
'IMMIGRATION')
self.failUnlessEqual(row['specific_issue'],
'\r\nImmigration - Thanking Senator Lincoln and her staff for the hard work and long hours and dedication they presented in an effort to develop a comprehensive immigration reform.\n')
row = rows.pop()
self.failUnlessEqual(row['id'], 20)
self.failUnlessEqual(row['code'],
'AGRICULTURE')
self.failUnlessEqual(row['specific_issue'],
'\r\nFY08 Agriculture Appropriations Bill - (Sec. 738) amendment to prohibit USDA from spending money for health inspection of horses.\n\nH.R. 3161, the FY08 Ag spending bill - amendments: King/Kingston amendment to strike Sec. 738. It would limit USDA authority for equine health inspection, effectively restricting the movement of all horses; Ackerman amendment prohibits funding for Food Safety and Inspection Service (FSIS) inspections in facilities that process nonambulatory or downer livestock; Whitfield-Spratt-Rahall-Chandler amendment to restrict USDA inspection of horses intended for processing for human consumption.\n\nPayment Limits.\r\nFarm Bill: tax title, reductions in direct payments, counter-cyclical revenue option, senate ag committee markup on farm bill, amendments seeking further reform to payment limits and adjusted gross income restrictions.\n')
row = rows.pop()
self.failUnlessEqual(row['id'], 19)
self.failUnlessEqual(row['code'],
'TRADE (DOMESTIC/FOREIGN)')
self.failUnlessEqual(row['specific_issue'],
'\r\nU.S. -Peru Trade Promotion Agreement (TPA) - the goal is to increase U.S. agriculture exports and increase market share.')
row = rows.pop()
self.failUnlessEqual(row['id'], 18)
self.failUnlessEqual(row['code'],
'EDUCATION')
self.failUnlessEqual(row['specific_issue'],
'\r\nFY08 Labor, HHS and Education spending. Perkins Amendment (federal funding for FFA and career and technical education).')
row = rows.pop()
self.failUnlessEqual(row['id'], 17)
self.failUnlessEqual(row['code'],
'ROADS/HIGHWAY')
self.failUnlessEqual(row['specific_issue'],
'\r\nH.R. 3098 to restore farm truck exemptions from federal motor carrier vehicle regulations.')
row = rows.pop()
self.failUnlessEqual(row['id'], 16)
self.failUnlessEqual(row['code'],
'DEFENSE')
self.failUnlessEqual(row['specific_issue'],
'H.R.3222 & Senate FY08 Defense Appropriations-Navy, Army & SOCOM R&D\nH.R.1585 & S.1547 FY08 Defense Authorizations-Navy, Army & SOCOM R&D\n')
row = rows.pop()
self.failUnlessEqual(row['id'], 15)
self.failUnlessEqual(row['code'],
'HOMELAND SECURITY')
self.failUnlessEqual(row['specific_issue'],
'H.R.3222 & Senate FY08 Defense Appropriations-Navy, Army & SOCOM R&D\nH.R.1585 & S.1547 FY08 Defense Authorizations-Navy, Army & SOCOM R&D\nH.R.2638 & S.1644 FY08 DHS AppropriationsBill-CRP')
row = rows.pop()
self.failUnlessEqual(row['id'], 14)
self.failUnlessEqual(row['code'],
'BUDGET/APPROPRIATIONS')
self.failUnlessEqual(row['specific_issue'],
'H.R.3222 & Senate FY08 De | fense Appropriations-Navy, Army & SOCOM R&D\nH.R.1585 & S.1547 FY08 Defense Authorizations-Navy, Army & SOCOM R& | D\nH.R.2638 & S.1644 FY08 DHS AppropriationsBill-CRP')
row = rows.pop()
self.failUnlessEqual(row['id'], 13)
self.failUnlessEqual(row['code'],
'DEFENSE')
self.failUnlessEqual(row['specific_issue'],
'DEFENSE AUTHORIZATION, DEFENSE APPROPRIATIONS, VETERANS, DEFENSE HEALTH CARE, ARMED FORCES RETIREMENT, ARMED FORCES PERSONNEL BENEFITS, EMERGING DEFENSE RELATED ISSUES')
row = rows.pop()
self.failUnlessEqual(row['id'], 12)
self.failUnlessEqual(row['code'],
'BANKING')
self.failUnlessEqual(row['specific_issue'],
'unspecified')
row = rows.pop()
self.failUnlessEqual(row['id'], 11)
self.failUnlessEqual(row['code'],
'REAL ESTATE/LAND USE/CONSERVATION')
self.failUnlessEqual(row['specific_issue'],
'unspecified')
row = rows.pop()
self.failUnlessEqual(row['id'], 10)
self.failUnlessEqual(row['code'],
'FINANCIAL INSTITUTIONS/INVESTMENTS/SECURITIES')
self.failUnlessEqual(row['specific_issue'],
'unspecified')
row = rows.pop()
self.failUnlessEqual(row['id'], 9)
self.failUnlessEqual(row['code'],
'FOREIGN RELATIONS')
self.failUnlessEqual(row['specific_issue'],
'unspecified')
row = rows.pop()
self.failUnlessEqual(row['id'], 8)
self.failUnlessEqual(row['code'],
'LAW ENFORCEMENT/CRIME/CRIMINAL JUSTICE')
self.failUnlessEqual(row['specific_issue'],
'unspecified')
row = rows.pop()
self.failUnlessEqual(row['id'], 7)
self.failUnlessEqual(row['code'],
'FAMILY ISSUES/ABORTION/ADOPTION')
self.failUnlessEqual(row['specific_issue'], |
rishig/zulip | zerver/tornado/application.py | Python | apache-2.0 | 1,268 | 0.003155 |
import atexit
import tornado.web
from django.conf import settings
from zerver.tornado import autoreload
from zerver.lib.queue import get_queue_client
from zerver.tornado.handlers import AsyncDjangoHandler
from zerver.tornado.socket import get_sockjs_router
def setup_tornado_rabbitmq() -> None: # nocoverage
# When tornado is shut down, disconnect cleanly from rabbitmq
if settings.USING_RABBITMQ:
queue_client = ge | t_queue_client()
atexit.register(lambda: queue_client.close())
autoreload.add_reload_hook(lambda: queue_client.close())
def create_tornado_application(port: int) -> tornado.web.Application:
urls = (
r"/notify_tornado",
r"/json/events",
r"/api/v1/events",
r"/api/v1/events/in | ternal",
)
# Application is an instance of Django's standard wsgi handler.
return tornado.web.Application(([(url, AsyncDjangoHandler) for url in urls] +
get_sockjs_router(port).urls),
debug=settings.DEBUG,
autoreload=False,
# Disable Tornado's own request logging, since we have our own
log_function=lambda x: None)
|
satoken/centroid-rna-package | python/CentroidFold.py | Python | gpl-2.0 | 5,989 | 0.003506 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_CentroidFold', [dirname(__file__)])
except ImportError:
import _CentroidFold
return _CentroidFold
if fp is not None:
try:
_mod = imp.load_module('_CentroidFold', fp, pathname, description)
finally:
fp.close()
return _mod
_CentroidFold = swig_import_helper()
del swig_import_helper
else:
import _CentroidFold
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
| return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setatt | r(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _CentroidFold.delete_SwigPyIterator
__del__ = lambda self: None
def value(self):
return _CentroidFold.SwigPyIterator_value(self)
def incr(self, n=1):
return _CentroidFold.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _CentroidFold.SwigPyIterator_decr(self, n)
def distance(self, x):
return _CentroidFold.SwigPyIterator_distance(self, x)
def equal(self, x):
return _CentroidFold.SwigPyIterator_equal(self, x)
def copy(self):
return _CentroidFold.SwigPyIterator_copy(self)
def next(self):
return _CentroidFold.SwigPyIterator_next(self)
def __next__(self):
return _CentroidFold.SwigPyIterator___next__(self)
def previous(self):
return _CentroidFold.SwigPyIterator_previous(self)
def advance(self, n):
return _CentroidFold.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _CentroidFold.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _CentroidFold.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _CentroidFold.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _CentroidFold.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _CentroidFold.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _CentroidFold.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
SwigPyIterator_swigregister = _CentroidFold.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
class CentroidFold(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CentroidFold, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CentroidFold, name)
__repr__ = _swig_repr
AUX = _CentroidFold.CentroidFold_AUX
PFFOLD = _CentroidFold.CentroidFold_PFFOLD
CONTRAFOLD = _CentroidFold.CentroidFold_CONTRAFOLD
ALIPFFOLD = _CentroidFold.CentroidFold_ALIPFFOLD
BOLTZMANN = _CentroidFold.CentroidFold_BOLTZMANN
PFFOLD_ALIPFFOLD = _CentroidFold.CentroidFold_PFFOLD_ALIPFFOLD
BOLTZMANN_ALIPFFOLD = _CentroidFold.CentroidFold_BOLTZMANN_ALIPFFOLD
def __init__(self, *args):
this = _CentroidFold.new_CentroidFold(*args)
try:
self.this.append(this)
except:
self.this = this
__swig_destroy__ = _CentroidFold.delete_CentroidFold
__del__ = lambda self: None
def calculate_posterior(self, *args):
return _CentroidFold.CentroidFold_calculate_posterior(self, *args)
def decode_structure(self, gamma):
return _CentroidFold.CentroidFold_decode_structure(self, gamma)
def ps_plot(self, name, seq, g, color=True):
return _CentroidFold.CentroidFold_ps_plot(self, name, seq, g, color)
CentroidFold_swigregister = _CentroidFold.CentroidFold_swigregister
CentroidFold_swigregister(CentroidFold)
# This file is compatible with both classic and new-style classes.
|
ckelly/pyomniar | pyomniar/parsers.py | Python | mit | 1,440 | 0.003472 |
from pyomniar.utils import import_simplejson
from pyomniar.error import OmniarError
class Parser(object):
def parse(self, method, payload):
"""
Parse the response payload and return the result.
Returns a tuple that contains the result data and the cursors
(or None if not present).
"""
raise NotImplementedError
def parse_error(self | , payload):
"""
Parse the error message from payload.
If unable to parse the message, throw an exception
and default error message will be used.
"""
raise NotImplementedError
class JSONParser(Parser):
payload_format = 'json'
def __init__(self):
self.json_lib = import_simplejson()
def parse(self, method, payload):
try:
j | son = self.json_lib.loads(payload)
except Exception, e:
raise OmniarError('Failed to parse JSON payload: %s' % e)
return json
def parse_error(self, payload):
error = self.json_lib.loads(payload)
if error.has_key('error-reason'):
return error['error-reason']
else:
return error['error-code']
class EmptyParser(Parser):
"""Parser for items that don't return any data"""
# def __init__(self):
# pass
def parse(self, method, payload):
return True
def parse_error(self, payload):
return False |
arju88nair/projectCulminate | venv/lib/python3.5/site-packages/lazy_object_proxy/compat.py | Python | apache-2.0 | 196 | 0 | import sys
PY2 = sys.vers | ion_info[0] == 2
PY3 = sys.version_info[0] == 3
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {} | )
|
maximmaxim345/Sheep-it-blender-plugin | splinter/request_handler/status_code.py | Python | gpl-3.0 | 732 | 0.001366 | # -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
class StatusCode(object):
def __init__(self, status_code, reason):
| #: A message for the response (example: Success)
self.reason = reason
#: Code of the response (example: 200)
self.code = status_code
def __eq__(self, other):
return self.code == other
def __str__(self):
return "{} - {}".format(self.code, self.reason)
def is_success(self):
"""
Returns ``True`` if | the response was succeed, otherwise, returns ``False``.
"""
return self.code < 400
|
mnunberg/yobot | py/gui/agent_connect_dlg.py | Python | gpl-3.0 | 2,632 | 0.004179 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'agent_connect_dlg.ui'
#
# Created: Tue Oct 12 14:22:17 2010
# by: PyQt4 UI code generator 4.7.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(253, 111)
self.gridLayout = QtGui.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
self.label = QtGui.QLabel(Dialog)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.agent_addrinfo = QtGui.QComboBox(Dialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.agent_addrinfo.sizePolicy().hasHeightForWidth())
self.agent_addrinfo.setSizePolicy(sizePolicy)
self.agent_addrinfo.setEditable(True)
self.agent_addrinfo.setObjectName("agent_addrinfo")
self.gridLayout.addWidget(self.agent_addrinfo, 0, 1, 1, 1)
self.disconnect_from_server = QtGui.QCheckBox(Dialog)
self.disconnect_from_server.setObjectName("disconnect_from_server")
self.gridLayout.addWidget(self.disconnect_from_server, 1, 0, 1, 2)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 2)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate(" | Dialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", "Agent", None, QtGui.QApplication.UnicodeUTF8))
self.disconnect_from_server.setText(QtGui.QApplication.translate("Dialog", "Disconnect Clients from server", None, QtGui.QApplication.UnicodeUTF8))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv | )
Dialog = QtGui.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
peterayeni/django-kong-admin | setup.py | Python | bsd-3-clause | 1,630 | 0.001227 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import kong_admin
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = kong_admin.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on github:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.syst | em("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-kong-admin',
version=version,
description="""A reusable Django App to manage a Kong service (http://getkong.org)""",
long_description=readme + '\n\n' + history,
author='Dirk Moors',
author_email='dirk.moors@vikingco.com',
url='https://githu | b.com/vikingco/django-kong-admin',
packages=[
'kong_admin',
],
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='django-kong-admin',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
) |
zbigniewwojna/text-rcnn | core/preprocessor.py | Python | apache-2.0 | 77,352 | 0.003633 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Licens | e.
# ==============================================================================
"""Preprocess images and bounding boxes for detection.
We perform two sets of operations in preprocessing stage:
(a) operations that are applied to both training and testing data,
(b) operations that are applied only to training data for the purpose of
data augmentation.
A preprocessing function receives a se | t of inputs,
e.g. an image and bounding boxes,
performs an operation on them, and returns them.
Some examples are: randomly cropping the image, randomly mirroring the image,
randomly changing the brightness, contrast, hue and
randomly jittering the bounding boxes.
The preprocess function receives a tensor_dict which is a dictionary that maps
different field names to their tensors. For example,
tensor_dict[fields.InputDataFields.image] holds the image tensor.
The image is a rank 4 tensor: [1, height, width, channels] with
dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where
in each row there is a box with [ymin xmin ymax xmax].
Boxes are in normalized coordinates meaning
their coordinate values range in [0, 1]
Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing
functions receive a rank 3 tensor for processing the image. Thus, inside the
preprocess function we squeeze the image to become a rank 3 tensor and then
we pass it to the functions. At the end of the preprocess we expand the image
back to rank 4.
"""
import sys
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
def _apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
rand_sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([func(
control_flow_ops.switch(x, tf.equal(rand_sel, case))[1], case)
for case in range(num_cases)])[0]
def _apply_with_random_selector_tuples(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: A tuple of input tensors.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
num_inputs = len(x)
rand_sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
tuples = [list() for t in x]
for case in range(num_cases):
new_x = [control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x]
output = func(tuple(new_x), case)
for j in range(num_inputs):
tuples[j].append(output[j])
for i in range(num_inputs):
tuples[i] = control_flow_ops.merge(tuples[i])[0]
return tuple(tuples)
def _random_integer(minval, maxval, seed):
"""Returns a random 0-D tensor between minval and maxval.
Args:
minval: minimum value of the random tensor.
maxval: maximum value of the random tensor.
seed: random seed.
Returns:
A random 0-D tensor between minval and maxval.
"""
return tf.random_uniform(
[], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed)
def normalize_image(image, original_minval, original_maxval, target_minval,
target_maxval):
"""Normalizes pixel values in the image.
Moves the pixel values from the current [original_minval, original_maxval]
range to a the [target_minval, target_maxval] range.
Args:
image: rank 3 float32 tensor containing 1
image -> [height, width, channels].
original_minval: current image minimum value.
original_maxval: current image maximum value.
target_minval: target image minimum value.
target_maxval: target image maximum value.
Returns:
image: image which is the same shape as input image.
"""
with tf.name_scope('NormalizeImage', values=[image]):
original_minval = float(original_minval)
original_maxval = float(original_maxval)
target_minval = float(target_minval)
target_maxval = float(target_maxval)
image = tf.to_float(image)
image = tf.subtract(image, original_minval)
image = tf.multiply(image, (target_maxval - target_minval) /
(original_maxval - original_minval))
image = tf.add(image, target_minval)
return image
def flip_boxes(boxes):
"""Left-right flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
# Flip boxes.
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
return flipped_boxes
def retain_boxes_above_threshold(
boxes, labels, label_scores, masks=None, keypoints=None, threshold=0.0):
"""Retains boxes whose label score is above a given threshold.
If the label score for a box is missing (represented by NaN), the box is
retained. The boxes that don't pass the threshold will not appear in the
returned tensor.
Args:
boxes: float32 tensor of shape [num_instance, 4] representing boxes
location in normalized coordinates.
labels: rank 1 int32 tensor of shape [num_instance] containing the object
classes.
label_scores: float32 tensor of shape [num_instance] representing the
score for each box.
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks are of
the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
threshold: scalar python float.
Returns:
retained_boxes: [num_retained_instance, 4]
retianed_labels: [num_retained_instance]
retained_label_scores: [num_retained_instance]
If masks, or keypoints are not None, the function also returns:
retained_masks: [num_retained_instance, height, width]
retained_keypoints: [num_retained_instance, num_keypoints, 2]
"""
with tf.name_scope('RetainBoxesAboveThreshold',
values=[boxes, labels, label_scores]):
indices = tf.where(
tf.logical_or(label_scores > threshold, tf.is_nan(label_scores)))
indices = tf.squeeze(indices, axis=1)
retained_boxes = tf.gather(boxes, indices)
retained_labels = tf.gather(labels, indices)
retained_label_scores = tf.gather(label_scores, indices)
result = [retained_boxes, retained_labels, retained_label_scores]
if masks is not None:
retained_masks = tf.gather(masks, indices)
result.append(retained_masks)
if |
googlei18n/glyphsLib | tests/builder/designspace_roundtrip_test.py | Python | apache-2.0 | 2,834 | 0.000353 | # coding=UTF-8
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this fi | le except in complianc | e with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import, unicode_literals
import defcon
from fontTools import designspaceLib
from glyphsLib import to_glyphs, to_designspace
def test_default_master_roundtrips():
"""This test comes from a common scenario while using glyphsLib to go
back and forth several times with "minimize diffs" in both directions.
In the end we get UFOs that have information as below, and there was
a bug that turned "Regular" into "Normal" and changed the default axis
value.
"""
thin = defcon.Font()
thin.info.familyName = "CustomFont"
thin.info.styleName = "Thin"
thin.lib["com.schriftgestaltung.customParameter.GSFont.Axes"] = [
{"Name": "Weight", "Tag": "wght"}
]
regular = defcon.Font()
regular.info.familyName = "CustomFont"
regular.info.styleName = "Regular"
regular.lib["com.schriftgestaltung.customParameter.GSFont.Axes"] = [
{"Name": "Weight", "Tag": "wght"}
]
ds = designspaceLib.DesignSpaceDocument()
weight = ds.newAxisDescriptor()
weight.tag = "wght"
weight.name = "Weight"
weight.minimum = 300
weight.maximum = 700
weight.default = 400
weight.map = [(300, 58), (400, 85), (700, 145)]
ds.addAxis(weight)
thinSource = ds.newSourceDescriptor()
thinSource.font = thin
thinSource.location = {"Weight": 58}
thinSource.familyName = "CustomFont"
thinSource.styleName = "Thin"
ds.addSource(thinSource)
regularSource = ds.newSourceDescriptor()
regularSource.font = regular
regularSource.location = {"Weight": 85}
regularSource.familyName = "CustomFont"
regularSource.styleName = "Regular"
regularSource.copyFeatures = True
regularSource.copyGroups = True
regularSource.copyInfo = True
regularSource.copyLib = True
ds.addSource(regularSource)
font = to_glyphs(ds, minimize_ufo_diffs=True)
doc = to_designspace(font, minimize_glyphs_diffs=True)
reg = doc.sources[1]
assert reg.styleName == "Regular"
assert reg.font.info.styleName == "Regular"
assert reg.copyFeatures is True
assert reg.copyGroups is True
assert reg.copyInfo is True
assert reg.copyLib is True
|
Yuvv/LearnTestDemoTempMini | py-django/DataBackup/ops/credentials.py | Python | mit | 3,202 | 0.000625 | #!/usr/bin/env python
# import keystoneclient.v2_0.client as ksclient
# import glanceclient.v2.client as glclient
# import novaclient.client as nvclient
# import neutronclient.v2_0.client as ntclient
# import cinderclient.v2.client as cdclient
# import swiftclient.client as sftclient
__author__ = 'Yuvv'
OS_PROJECT_DOMAIN_ID = 'default'
OS_USER_DOMAIN_ID = 'default'
OS_PROJECT_NAME = 'admin'
OS_TENANT_NAME = 'admin'
OS_USERNAME = 'admin'
OS_PASSWORD = 'yuvv'
# OS_AUTH_URL = 'http://controller:35357/v3'
OS_AUTH_URL = 'http://controller:5000/v2.0/'
OS_IDENTITY_API_VERSION = 3
OS_IMAGE_API_VERSION = 2
OS_AUTH_VERSION = 3
'''
keystone = ksclient.Client(auth_url=OS_AUTH_URL,
username=OS_USERNAME,
password=OS_PASSWORD,
tenant_name=OS_TENANT_NAME)
glance = glclient.Client(endpoint=keystone.service_catalog.url_for(service_type='image'),
token=keystone.auth_token)
neutron = ntclient.Client(endpoint_url=keystone.service_catalog.url_for(service_type='network'),
token=keystone.auth_token)
cinder = cdclient.Client(auth_url=OS_AUTH_URL,
username=OS_USERNAME,
api_key=OS_PASSWORD,
project_id=OS_TENANT_NAME)
nova = nvclient.Client('2.1', auth_url=OS_AUTH_URL,
username=OS_USERNAME,
api_key=OS_PASSWORD,
project_id=OS_TENANT_NAME)
'''
def get_keystone_credits():
cred = dict()
cred['username'] = OS_USERNAME
cred['password'] = OS_PASSWORD
cred['auth_url'] = OS_AUTH_URL
cred['tenant_name'] = OS_TENANT_NAME
return cred
def get_neutron_credits():
cred = dict()
cred['username'] = OS_USERNAME
cred['password'] = OS_PASSWORD
cred['auth_url'] = OS_AUTH_URL
cred['tenant_name'] = OS_TENANT_NAME
return cred
def get_cinder_credits():
cred = dict()
cred['username'] = OS_USERNAME
cred['api_key'] = OS_PASSWORD
cred['auth_url'] = OS_AUTH_URL
cred['project_id'] = OS_TENANT_NAME
return cred
def get_nova_credits():
cred = dict()
cred['username'] = OS_USERNAME
cred['api_key'] = OS_PASSWORD
cred['auth_url'] = OS_AUTH_URL
cred['project_id'] = OS_TENANT_NAME
return cred
def get_swi | ft_credits():
cred = dict()
cred['user'] = OS_USERNAME
cred['key'] = OS_PASSWORD
cred['authurl'] = OS_AUTH_URL
return cred
'''
+----------------------------------+----------+--------------+
| ID | Name | Type |
+----------------------------------+----------+--------------+
| 02e5b5c270784e76bf5c144f0fa54030 | cinder | volume |
| 3a7ecbf5069d42d784f | df3ebe9deb745 | swift | object-store |
| 8e185002e3fe4028bda5c6cd910d31f6 | nova | compute |
| aaf1a49b4a1e463990880ddf9c8fb658 | glance | image |
| b3600985814247558a289c332ad62f09 | keystone | identity |
| bc4d28242d3a466ebce7663b28465a99 | neutron | network |
| cb799b0f7447401fb15821cffb103e74 | cinderv2 | volumev2 |
+----------------------------------+----------+--------------+
'''
|
andybab/Impala | tests/shell/test_shell_interactive.py | Python | apache-2.0 | 9,064 | 0.008164 | #!/usr/bin/env python
# encoding=utf-8
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pexpect
import pytest
import shlex
import shutil
import socket
import signal
from impala_shell_results import get_shell_cmd_result, cancellation_helper
from subprocess import Popen, PIPE
from tests.common.impala_service import ImpaladService
from tests.verifiers.metric_verifier import MetricVerifier
from time import sleep
SHELL_CMD = "%s/bin/impala-shell.sh" % os.environ['IMPALA_HOME']
SHELL_HISTORY_FILE = os.path.expanduser("~/.impalahistory")
TMP_HISTORY_FILE = os.path.expanduser("~/.impalahistorytmp")
class TestImpalaShellInteractive(object):
"""Test the impala shell interactively"""
def _send_cmd_to_shell(self, p, cmd):
"""Given an open shell process, write a cmd to stdin
This method takes care of adding the delimiter and EOL, callers should send the raw
command.
"""
p.stdin.write("%s;\n" % cmd)
p.stdin.flush()
def _start_new_shell_process(self, args=None):
"""Starts a shell process and returns the process handle"""
cmd = "%s %s" % (SHELL_CMD, args) if args else SHELL_CMD
return Popen(shlex.split(SHELL_CMD), shell=True, stdout=PIPE,
stdin=PIPE, stderr=PIPE)
@classmethod
def setup_class(cls):
if os.path.exists(SHELL_HISTORY_FILE):
shutil.move(SHELL_HISTORY_FILE, TMP_HISTORY_FILE)
@classmethod
def teardown_class(cls):
if os.path.exists(TMP_HISTORY_FILE): shutil.move(TMP_HISTORY_FILE, SHELL_HISTORY_FILE)
@pytest.mark.execute_serially
def test_escaped_quotes(self):
"""Test escaping quotes"""
# test escaped quotes outside of quotes
result = run_impala_shell_interactive("select \\'bc';")
assert "could not match input" in result.stderr
result = run_impala_shell_interactive("select \\\"bc\";")
assert "could not match input" in result.stderr
# test escaped quotes within quotes
result = run_impala_shell_interactive("select 'ab\\'c';")
assert "Fetched 1 row(s)" in result.stderr
result = run_impala_shell_interactive("select \"ab\\\"c\";")
assert "Fetched 1 row(s)" in result.stderr
@pytest.mark.execute_serially
def test_cancellation(self):
impalad = ImpaladService(socket.getfqdn())
impalad.wait_for_num_in_flight_queries(0)
command = "select sleep(10000);"
p = self._start_new_shell_process()
self._send_cmd_to_shell(p, command)
sleep(1)
# iterate through all processes with psutil
shell_pid = cancellation_helper()
sleep(2)
os.kill(shell_pid, signal.SIGINT)
result = get_shell_cmd_result(p)
assert impalad.wait_for_num_in_flight_queries(0)
@pytest.mark.execute_serially
def test_unicode_input(self):
"Test queries containing non-ascii input"
# test a unicode query spanning multiple lines
unicode_text = u'\ufffd'
args = "select '%s'\n;" % unicode_text.encode('utf-8')
result = run_impala_shell_interactive(args)
assert "Fetched 1 row(s)" in result.stderr
@pytest.mark.execute_serially
def test_welcome_string(self):
"""Test that the shell's welcome message is only printed once
when the shell is started. Ensure it is not reprinted on errors.
Regression test for IMPALA-1153
"""
result = run_impala_shell_interactive('asdf;')
assert result.stdout.count("Welcome to the Impala shell") == 1
result = run_impala_shell_interactive('select * from non_existent_table;')
assert result.stdout.count("Welcome to the Impala shell") == 1
@pytest.mark.execute_serially
def test_bash_cmd_timing(self):
"""Test existence of time output in bash commands run from shell"""
args = "! ls;"
result = run_impala_shell_interactive(args)
assert "Executed in" in result.stderr
@pytest.mark.execute_serially
def test_reconnect(self):
"""Regression Test for IMPALA-1235
Verifies that a connect command by the user is honoured.
"""
def get_num_open_sessions(impala_service):
"""Helper method to retrieve the number of open sessions"""
return impala_service.get_metric_value('impala-server.num-open-beeswax-sessions')
hostname = socket.getfqdn()
initial_impala_service = ImpaladService(hostname)
target_impala_service = ImpaladService(hostname, webserver_port=25001,
beeswax_port=21001, be_port=22001)
# Get the initial state for the number of sessions.
num_sessions_initial = get_num_open_sessions(initial_impala_service)
num_sessions_target = get_num_open_sessions(target_impala_service)
# Connect to localhost:21000 (default)
p = self._start_new_shell_process()
sleep(2)
# Make sure we're connected <hostname>:21000
assert get_num_open_sessions(initial_impala_service) == num_sessions_initial + 1, \
"Not connected to %s:21000" % hostname
self._send_cmd_to_shell(p, "connect %s:21001" % hostname)
# Wait for a little while
sleep(2)
# The number of sessions on the target impalad should have been incremented.
assert get_num_open_sessions(target_impala_service) == num_sessions_target + 1, \
"Not connected to %s:21001" % hostname
# The number of sessions on the initial impalad should have been decremented.
assert get_num_open_sessions(initial_impala_service) == num_sessions_initial, \
"Connection to %s:21000 should have been closed" % hostname
@pytest.mark.execute_serially
def test_ddl_queries_are_closed(self):
"""Regression test for IMPALA-1317
The shell does not call close() for alter, use and drop queries, leaving them in
flight. This test issues those queries in interactive mode, and checks the debug
webpage to confirm that they've been closed.
TODO: Add every statement type.
"""
TMP_DB = 'inflight_test_db'
TMP_TBL = 'tmp_tbl'
MSG = '%s query should be closed'
NUM_QUERIES = 'impala-server.num-queries'
impalad = ImpaladService(socket.getfqdn())
p = self._start_new_shell_process()
try:
start_num_queries = impalad.get_metric_value(NUM_QUERIES)
self._send_cmd_to_shell(p, 'create database if not exists %s' % TMP_DB)
self._send_cmd_to_shell(p, 'use %s' % TMP_DB)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 2)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'use'
self._send_cmd_to_shell(p, 'create table %s(i int)' % TMP_TBL)
self._send_cmd_to_shell(p, 'alter table %s add columns (j int)' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 4)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'alter'
self._send_cmd_to_shell(p, 'drop table %s' % TMP_TBL)
impalad.wait_for_metric_value(NUM_QUERIES, start_num_queries + 5)
assert impalad.wait_for_num_in_flight_queries(0), MSG % 'drop'
finally:
run_impala_shell_interactive("drop table if exists %s.%s;" % (TMP_DB, TMP_TBL))
run_impala_shell_interactive("drop database if exists foo;")
@pytest.mark.execute_serially
def test_multiline_queries_in_history(self):
"""Test to ensure that multiline queries with comments are preserved in history
Ensure that mu | ltiline queries are p | reserved when they're read back from history.
Additionally, also test that comments are preserved.
"""
# regex for pexpect, a shell prompt is expected after each command..
prompt_regex = '.*%s:2100.*' % socket.getfqdn()
# readline gets its input from tty, so using stdin does not work.
child_proc = pexpect.spawn(SHELL_CMD)
queries = ["select\n1--comment;",
"select /*comment*/\n1;",
"select\n/*comm\nent*/\n1;"]
for qu |
lahwran/distributed-crawler | crawler/central.py | Python | mit | 7,864 | 0.000127 | import json
import random
import urlparse
import re
import itertools
from collections import deque
from twisted.internet.protocol import Factory
from twisted.web.server import Site
from klein import Klein
from crawler import util
class Job(object):
def __init__(self, job_id):
self.queue = deque()
self.seen_urls = set()
self.result_urls = set()
self.id = job_id
self.working_count = 0
self.finished_count = 0
def finished_one(self):
self.working_count -= 1
self.finished_count += 1
def add_url(self, url, level=0):
if url in self.seen_urls:
return
if level < 2:
self.queue.append(util.QueueEntry(self.id, level, url))
self.seen_urls.add(url)
parsed = urlparse.urlparse(url)
# why does match() match only the beginning, instead of the whole
# string? I will never stop wondering (...even after I find out)
if re.search(r'\.(gif|je?pg|png|bmp|webp)', parsed.path):
self.result_urls.add(url)
def pop_url(self):
if not self.queue:
return None
self.working_count += 1
return self.queue.popleft()
class CoordinatorSession(util.CommandProtocol):
def __init__(self, coordinator):
self.coordinator = coordinator
self.waiting = 0
def connectionMade(self):
self.coordinator.clients.append(self)
self.waiting = self.coordinator.parallel_per_drone
for x in range(self.waiting):
self.send_one()
def connectionLost(self, reason):
self.coordinator.clients.remove(self)
def message_found(self, url_info):
# called no matter what kind of url - this just means there was a url
# found by the drone
queue_entry = util.queue_entry_parse(url_info)
self.coordinator.found_url(queue_entry)
def message_url_completed(self, job_id):
self.waiting += 1
self.send_one()
job = self.coordinator.jobs[job_id]
job.finished_one()
def send_one(self, queue_entry=None):
if queue_entry is None:
queue_entry = self.coordinator.pop_url()
if queue_entry is None:
return
self.waiting -= 1
self.command("scan_url", util.queue_entry_format(queue_entry))
class CoordinatorServer(Factory):
"""
The queue server/leader.
I considered various other things I hadn't used before (zeromq, rabbitmq,
celery, etc), especially in the hope that I could find an existing
brokerless task queue (a not-fully-connected mesh network sort of layout
would be really cool), but ended up deciding that that was too many new
things to learn. http://mcfunley.com/choose-boring-technology
So it's got some centralization that I'd rather not have. This will be a
bottleneck that prevents unbounded scaling, which would have to be
refactored into something fancier when that level of scale was reached.
Google wouldn't be able to run off this crawler, unfortunately.
I also considered having a database of some kind for the queue, but as I
haven't used any databases I'd be happy with for this, I decided to just
hack my own.
Incidentally, I like to use the word "Server" for what twisted calls
"factories", and "Session" for what twisted calls "protocols".
"""
def __init__(self, parallel_per_drone):
self.jobs = {}
self.clients = []
self.parallel_per_drone = parallel_per_drone
def allocate_job(self):
# this method adapted from tree-of-life/treeoflife/nodes/node.py
for x in xrange(10):
job_id = util.gen_id()
if job_id in self.jobs: # pragma: no cover
continue
job = self.jobs[job_id] = Job(job_id)
return job
else: # pragma: no cover
raise Exception("10 tries to generate node id failed. wat? this "
"really should be impossible. %s" % job_id)
def found_url(self, queue_entry):
# TODO: robots.txt parsing - has a bonus: sitemap.xml
job = self.jobs[queue_entry.job_id]
job.add_url(queue_entry.url, queue_entry.level)
self._broadcast(job)
def _broadcast(self, job):
clients = [[client] * client.waiting for client in self.clients]
for client in itertools.chain.from_iterable(clients):
if not job.queue:
break
if client.waiting <= 0:
continue
client.send_one(job.pop_url())
def pop_url(self):
# this isn't efficient, but I'm wasting too much time overengineering
# this silly project
jobs = self.jobs.values()
random.shuffle(jobs)
for x in jobs:
result = x.pop_url()
if result is not None:
return result
return None
def buildProtocol(self, addr):
return CoordinatorSession(self)
class NotFound(Exception):
pass
class JobApiServer(object):
"""
Http service for the actual public api.
This was originally going to be in CoordinatorServer, but that felt like
too much in one class.
"""
# I don't remember where it's documented that you can do this. Klein's
# documentation kind of sucks. But I did it in another project, so I
# pattern matched this code from there. It definitely works, and binds
# to the instance. See Klein.__get__ in klein/app.py in klein's code.
http = Klein()
def __init__(self, coordinator):
self.coordinator = coordinator
@http.route("/", methods=["POST"])
def http_submit_urls(self, request):
# urls can't have \n in them anymore. I say so. >:|
# (they actually can in real life, although few things support it.)
#
# also, for some reason, request.content is fully buffered before
# this method is called. seems odd of twisted to do it that way,
# but whatever. request is a t.w.s.Request.
urls = (url for url in request.content.read().split("\n") if url)
print
print repr(urls)
job = self.coordinator.allocate_job()
fo | r url in urls:
print
print "adding url", repr(url), type(url)
print
if type(url) == | unicode:
print "Unicode string!"
url = url.encode("utf-8")
job.add_url(url)
self.coordinator._broadcast(job)
return json.dumps(job.id) + '\n'
def _job_status_info(self, job):
return {
"crawled_urls": {
"finished": job.finished_count,
"in_progress": job.working_count,
"waiting_in_queue": len(job.queue)
},
"result_count": len(job.result_urls),
}
def get_job(self, job_id):
try:
return self.coordinator.jobs[job_id]
except KeyError:
raise NotFound
@http.handle_errors(NotFound)
def notfound(self, request, failure):
request.setResponseCode(404)
return 'No such job id'
@http.route("/status/<job_id>")
def http_job_status(self, request, job_id):
job = self.get_job(job_id)
return json.dumps(self._job_status_info(job))
@http.route("/status/all")
def http_job_all(self, request):
result = {}
for job_id, job in self.coordinator.jobs.items():
result[job_id] = self._job_status_info(job)
return json.dumps(result)
@http.route("/result/<job_id>")
def http_job_results(self, request, job_id):
job = self.get_job(job_id)
return json.dumps(list(job.result_urls))
def to_factory(self): # pragma: no cover
return Site(self.http.resource())
|
croxis/SpaceDrive | spacedrive/renderpipeline/samples/download_samples.py | Python | mit | 1,629 | 0.002455 | """
RenderPipeline
Copyright (c) 2014-2016 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in | the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of t | he Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
"""
Script to download the Render Pipeline samples
"""
import os
import sys
sys.path.insert(0, "../")
sys.path.insert(0, "../rpcore/util")
from submodule_downloader import download_submodule
if __name__ == "__main__":
# Make sure we are in the right directory
main_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(main_dir)
# Now extract the samples
download_submodule("tobspr", "RenderPipeline-Samples", ".", ["README.md", "LICENSE"])
|
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/webapps/galaxy/controllers/requests_admin.py | Python | gpl-3.0 | 47,292 | 0.021885 | from __future__ import absolute_import
from galaxy.web.base.controller import *
from galaxy.web.framework.helpers import time_ago, iff, grids
from galaxy.model.orm import *
from galaxy import model, util
from galaxy.web.form_builder import *
from .requests_common import RequestsGrid, invalid_id_redirect
from galaxy import eggs
eggs.require("amqp")
import amqp
import logging, os, pexpect, ConfigParser
log = logging.getLogger( __name__ )
class AdminRequestsGrid( RequestsGrid ):
class UserColumn( grids.TextColumn ):
def get_value( self, trans, grid, request ):
return request.user.email
# Grid definition
columns = [ col for col in RequestsGrid.columns ]
columns.append( UserColumn( "User",
model_class=model.User,
key='username' ) )
operations = [ operation for operation in RequestsGrid.operations ]
operations.append( grids.GridOperation( "Edit", allow_multiple=False, condition=( lambda item: not item.deleted ) ) )
operations.append( grids.GridOperation( "Reject", allow_multiple=False, condition=( lambda item: not item.deleted and item.is_submitted ) ) )
operations.append( grids.GridOperation( "Delete", allow_multiple=True, condition=( lambda item: not item.deleted ) ) )
operations.append( grids.GridOperation( "Undelete", condition=( lambda item: item.deleted ) ) )
global_actions = [
grids.GridAction( "Create new request", dict( controller='requests_common',
action='create_request',
cntrller='requests_admin' ) )
]
class DataTransferGrid( grids.Grid ):
# Custom column types
class NameColumn( grids.TextColumn ):
def get_value( self, trans, grid, sample_dataset ):
return sample_dataset.name
class SizeColumn( grids.TextColumn ):
def get_value( self, trans, grid, sample_dataset ):
return sample_dataset.size
class StatusColumn( grids.TextColumn ):
def get_value( self, trans, grid, sample_dataset ):
return sample_dataset.status
class ExternalServiceColumn( grids.TextColumn ):
def get_value( self, trans, grid, sample_dataset ):
try:
return sample_dataset.external_service.name
except:
return 'None'
# Grid definition
title = "Sample Datasets"
template = "admin/requests/sample_datasets_grid.mako"
model_class = model.SampleDataset
default_sort_key = "-create_time"
num_rows_per_page = 50
preserve_state = True
use_paging = False
columns = [
NameColumn( "Name",
link=( lambda item: dict( operation="view", id=item.id ) ),
attach_popup=True,
filterable="advanced" ),
SizeColumn( "Size",
filterable="advanced" ),
grids.GridColumn( "Last Updated",
key="update_time",
format=time_ago ),
ExternalServiceColumn( 'External service',
link=( lambda item: dict( operation="view_external_service", id=item.external_service.id ) ), ),
StatusColumn( "Transfer Status",
filterable="advanced",
label_id_prefix='datasetTransferStatus-' ),
]
columns.append( grids.MulticolFilterColumn( "Search",
cols_to_filter=[ columns[0] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
operations = [
grids.GridOperation( "Transfer",
allow_multiple=True,
condition=( lambda item: item.status in [ model.SampleDataset.transfer_status.NOT_STARTED ] ) ),
grids.GridOperation( "Rename",
allow_multiple=True,
allow_popup=False,
condition=( lambda item: item.status in [ model.SampleDataset.transfer_status.NOT_STARTED ] ) ),
grids.GridOperation( "Delete",
allow_multiple=True,
condition=( lambda item: item.status in [ model.SampleDataset.transfer_status.NOT_STARTED ] ) )
]
def apply_query_filter( self, trans, query, **kwd ):
sample_id = kwd.get( 'sample_id', None )
if not sample_id:
return query
return query.filter_by( sample_id=trans.security.decode_id( sample_id ) )
class RequestsAdmin( BaseUIController, UsesFormDefinitionsMixin ):
request_grid = AdminRequestsGrid()
datatx_grid = DataTransferGrid()
@web.expose
@web.require_admin
def index( self, trans ):
return trans.fill_template( "/admin/requests/index.mako" )
@web.expose
@web.require_admin
def browse_requests( self, trans, **kwd ):
if 'operation' in kwd:
operation = kwd['operation'].lower()
if operation == "edit":
return trans.response.send_redirect( web.url_for( controller='requests_common',
action='edit_basic_request_info',
cntrller='requests_admin',
**kwd ) )
if operation == "add_samples":
return trans.response.send_redirect( web.url_for( controller='requests_common',
action='add_samples',
cntrller='requests_admin',
**kwd ) )
if operation == "edit_samples":
return trans.response.send_redirect( web.url_for( controller='requests_common',
action='edit_samples',
cntrller='requests_admin',
**kwd ) )
if operation == "view_request":
return trans.response.send_redirect( web.url_for( controller='requests_common',
action='view_request',
cntrller='requests_admin',
**kwd ) )
if operation == "view_request_history":
return trans.response.send_redirect( web.url_for( controller='requests_common',
action='view_request_history',
cntrller='requests_admin',
**kwd ) )
if operation == "reject":
return self.reject_request( trans, **kwd )
if operation == "view_type":
return trans.response.send_redirect( web.url_for( controller='request_type',
| action='view_request_type',
**kwd ) )
i | f operation == "delete":
return trans.response.send_redirect( web.url_for( controller='requests_common',
action='delete_request',
cntrller='requests_admin',
**kwd ) )
if operation == "undelete":
return trans.response.send_redirect( web.url_for( controller='requests_common',
ac |
ecell/ecell3 | ecell/frontend/model-editor/ecell/ui/model_editor/ClassEditorWindow.py | Python | lgpl-3.0 | 4,556 | 0.017779 | #::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2016 Keio University
# Copyright (C) 2008-2016 RIKEN
# Copyright (C) 2005-2009 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
#
#'Design: Gabor Bereczki <gabor@e-cell.org>',
#'Design and application Framework: Koichi Takahashi <shafi@e-cell.org>',
#'Programming: Gabor Bereczki' at
# E-CELL Project, Lab. for Bioinformatics, Keio University.
#
import os
import os.path
import gtk
from ecell.ui.model_editor.ModelEditor import *
from ecell.ui.model_editor.ListWindow import *
from ecell.ui.model_editor.ClassEditor import *
from ecell.ui.model_editor.ClassList import *
from ecell.ui.model_editor.Constants import *
from ecell.ui.model_editor.Utils import *
class ClassEditorWindow(ListWindow):
def __init__( self, aModelEditor,aRoot=None ):
"""
in: ModelEditor theModelEditor
returns nothing
"""
self.theModelEditor = aModelEditor
#self.aRoot=aRoot
# init superclass
ListWindow.__init__( self, self.theModelEditor )
def openWindow( self ):
"""
in: nothing
returns nothing
"""
# superclass openwindow
ListWindow.openWindow( self )
# add steppe | rlist
self.theClassList = ClassList( self, self['ClassListFrame'] )
# add stepperpropertylist
self.theClassPropertyList = ClassEditor( self, self['ClassPropertyFrame'] )
# add signal handlers
# self.addHandlers({ })
self.theClassList.update()
classList = self.theModelEditor.getModel().getStepperList()
| if len(classList) == 0:
aClassList = []
else:
aClassList = [ classList[0] ]
self.selectStepper( aClassList )
def updateEntityList ( self ):
if not self.exists():
return
self.theClassList.update( )
self.updatePropertyList( )
def updatePropertyList ( self, anID = None ):
"""
in: anID ( system or stepper ) where changes happened
"""
if not self.exists():
return
oldDisplayedID = self.theClassPropertyList.getDisplayedStepper()
selectedIDs = self.theClassList.getSelectedIDs()
if len( selectedIDs) != 1:
newDisplayedID = None
else:
newDisplayedID = selectedIDs[0]
if oldDisplayedID != newDisplayedID or newDisplayedID == anID or anID == None:
self.theClassPropertyList.setDisplayedStepper( newDisplayedID )
def setLastActiveComponent( self, aComponent ):
pass
def update( self, aType = None, anID = None ):
# anID None means all for steppers
if aType == ME_STEPPER_TYPE:
if anID == None:
# update all
self.updateEntityList()
else:
self.updatePropertyList( anID )
elif aType in [ ME_SYSTEM_TYPE, ME_PROCESS_TYPE, None]:
self.updatePropertyList()
elif aType == ME_PROPERTY_TYPE:
self.updatePropertyList( anID )
def selectStepper( self, aStepperList ):
self.theClassList.changeSelection( aStepperList )
self.theClassList.selectByUser()
#############################
# SIGNAL HANDLERS #
#############################
def deleted( self, *args ):
ListWindow.deleted( self, *args )
self.theClassList.close()
self.theClassPropertyList.close()
self.theModelEditor.theClassEditor = None
self.theModelEditor.theMainWindow.update()
return True
|
malaterre/ITK | Modules/Core/GPUCommon/wrapping/test/itkGPUImageTest.py | Python | apache-2.0 | 5,250 | 0.005524 | #==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
import itk
width = 256
height = 256
GPUImageType = itk.GPUImage[itk.F, 2]
start = itk.Index[2]()
start[0] = 0
start[1] = 0
size = itk.Size[2]()
size[0] = width
size[1] = height
region = itk.ImageRegion[2](start, size)
srcA = GPUImageType.New()
srcA.SetRegions(region)
srcA.Allocate()
srcA.FillBuffer(1.0)
srcB = GPUImageType.New()
srcB.SetRegions(region)
srcB.Allocate()
srcB.FillBuffer(3.0)
dest = GPUImageType.New()
dest.SetRegions(region)
dest.Allocate()
idx = itk.Index[2]()
idx[0] = 0
idx[1] = 0
number_of_elements = width * height
kernel_manager = itk.GPUKernelManager.New()
gpu_source = itk.GPUImageOps.GetOpenCLSource()
print(gpu_source)
kernel_manager.LoadProgramFromString(gpu_source, '#define PIXELTYPE float\n')
kernel_add = kernel_manager.CreateKernel('ImageAdd')
srcA.SetCurrentCommandQueue(0)
srcB.SetCurrentCommandQueue(0)
dest.SetCurrentCommandQueue(0)
kernel_manager.SetCurrentCommandQueue(0)
print("Current Command Queue ID : 0 ")
print("======================")
print("Kernel : Addition")
print("------------------")
print("Before GPU kernel execution")
print("SrcA : {0}".format(srcA.GetPixel(idx)))
print("SrcB : {0}".format(srcB.GetPixel(idx)))
print("Dest : {0}".format(dest.GetPixel(idx)))
kernel_manager.SetKernelArgWithImage(kernel_add, 0, srcA.GetGPUDataManager());
kernel_manager.SetKernelArgWithImage(kernel_add, 1, srcB.GetGPUDataManager());
kernel_manager.SetKernelArgWithImage(kernel_add, 2, dest.GetGPUDataManager());
kernel_manager.SetKernelArgWithUInt(kernel_add, 3, number_of_elements);
kernel_manager.LaunchKernel2D(kernel_add, width, height, 16, 16);
print("------------------")
print("After GPU kernel execution")
print("SrcA : {0}".format(srcA.GetPixel(idx)))
print("SrcB : {0}".format(srcB.GetPixel(idx)))
print("Dest : {0}".format(dest.GetPixel(idx)))
print("======================")
#
# create multiplication kernel
#
kernel_mult = kernel_manager.CreateKernel("ImageMult");
print("======================")
print("Kernel : Multiplication")
print("------------------")
print("Before GPU kernel execution")
print("SrcA : {0}".format(srcA.GetPixel(idx)))
print("SrcB : {0}".format(srcB.GetPixel(idx)))
print("Dest : {0} | ".format(dest.GetPixel(idx)))
kernel_manager.SetKernelArgWithImage(kernel_mult, 0, srcA.GetGPUDataManager());
kernel_manager.SetKernelArgWithImage(kernel_mult, 1, srcB.GetGPUDataManager());
kernel_manager.SetKernelArgWithImage(kernel_mult, | 2, dest.GetGPUDataManager());
kernel_manager.SetKernelArgWithUInt(kernel_mult, 3, number_of_elements);
kernel_manager.LaunchKernel2D(kernel_mult, width, height, 16, 16);
print("------------------")
print("After GPU kernel execution")
print("SrcA : {0}".format(srcA.GetPixel(idx)))
print("SrcB : {0}".format(srcB.GetPixel(idx)))
print("Dest : {0}".format(dest.GetPixel(idx)))
print("======================")
#
# Change Command Queue if more than one GPU device exists
# otherwise, use same command queue
#
queueID = 0;
context_manager = itk.GPUContextManager.GetInstance();
if context_manager.GetNumberOfCommandQueues() >= 2:
queueID = 1;
print("More than one GPU device available, switching command queues.")
else:
print("Only one GPU device available, using same command queue.")
print("Current Command Queue ID : {0}".format(queueID))
#
# create subtraction kernel
#
kernel_sub = kernel_manager.CreateKernel("ImageSub");
srcA.FillBuffer(2.0);
srcB.FillBuffer(4.0);
dest.FillBuffer(1.0);
# default queue id was 0
srcA.SetCurrentCommandQueue(queueID);
srcB.SetCurrentCommandQueue(queueID);
dest.SetCurrentCommandQueue(queueID);
kernel_manager.SetCurrentCommandQueue(queueID);
print("======================")
print("Kernel : Subtraction")
print("------------------")
print("Before GPU kernel execution")
print("SrcA : {0}".format(srcA.GetPixel(idx)))
print("SrcB : {0}".format(srcB.GetPixel(idx)))
print("Dest : {0}".format(dest.GetPixel(idx)))
kernel_manager.SetKernelArgWithImage(kernel_sub, 0, srcA.GetGPUDataManager());
kernel_manager.SetKernelArgWithImage(kernel_sub, 1, srcB.GetGPUDataManager());
kernel_manager.SetKernelArgWithImage(kernel_sub, 2, dest.GetGPUDataManager());
kernel_manager.SetKernelArgWithUInt(kernel_sub, 3, number_of_elements);
kernel_manager.LaunchKernel2D(kernel_sub, width, height, 16, 16);
print("------------------")
print("After GPU kernel execution")
print("SrcA : {0}".format(srcA.GetPixel(idx)))
print("SrcB : {0}".format(srcB.GetPixel(idx)))
print("Dest : {0}".format(dest.GetPixel(idx)))
print("======================")
|
sketchfab/osgexport | blender-2.5/exporter/osg/osgobject.py | Python | gpl-2.0 | 48,200 | 0.002095 | # -*- python-indent: 4; coding: iso-8859-1; mode: python -*-
# Copyright (C) 2008 Cedric Pinson, Jeremy Moles
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Cedric Pinson <cedric.pinson@plopbyte.com>
# Jeremy Moles <jeremy@emperorlinux.com>
import bpy
import json
import mathutils
Matrix = mathutils.Matrix
Vector = mathutils.Vector
FLOATPRE = 5
CONCAT = lambda s, j="": j.join(str(v) for v in s)
STRFLT = lambda f: "%%.%df" % FLOATPRE % float(f)
INDENT = 2
VERSION = (0, 0, 0)
def findNode(name, root):
if root.name == name:
return root
if isinstance(root, Group) is False:
return None
for i in root.children:
found = findNode(name, i)
if found is not None:
return found
return None
def findMaterial(name, root):
if root.stateset is not None:
for i in root.stateset.attributes:
if isinstance(i, Material) is True and i.name == name:
return i
if isinstance(root, Geode) is True:
for i in root.drawables:
found = findMaterial(name, i)
if found is not None:
return found
if isinstance(root, Group) is True:
for i in root.children:
found = findMaterial(name, i)
if found is not None:
return found
return None
class Writer(object):
instances = {}
wrote_elements = {}
file_object = None
def __init__(self, comment=None):
object.__init__(self)
self.comment = comment
self.indent_level = 0
self.counter = len(Writer.instances)
Writer.instances[self] = True
def writeFile(self, output):
self.writeHeader(output)
self.write(output)
def writeHeader(self, output):
output.write("#Ascii Scene\n".encode('utf-8'))
output.write("#Version 92\n".encode('utf-8'))
output.write(("#Generator osgexport %d.%d.%d\n\n" % VERSION).encode('utf-8'))
def write(self, output):
Writer.serializeInstanceOrUseIt(self, output)
def encode(self, string):
text = string.replace("\t", "") \
.replace("#", (" " * INDENT)) \
.replace("$", (" " * (INDENT * self.indent_level)))
return text.encode('utf-8')
def writeMatrix(self, output, matrix):
if bpy.app.version[0] >= 2 and bpy.app.version[1] >= 62:
for i in range(0, 4):
output.write(self.encode("$##%s %s %s %s\n" % (STRFLT(matrix[0][i]),
STRFLT(matrix[1][i]),
STRFLT(matrix[2][i]),
STRFLT(matrix[3][i]))))
else:
for i in range(0, 4):
output.write(self.encode("$##%s %s %s %s\n" % (STRFLT(matrix[i][0]),
STRFLT(matrix[i][1]),
STRFLT(matrix[i][2]),
STRFLT(matrix[i][3]))))
output.write(self.encode("$#}\n"))
@staticmethod
def resetWriter():
Writer.instances = {}
ArrayData.instance = 0
Object.instance = 0
@staticmethod
def serializeInstanceOrUseIt(obj, output):
if obj in Writer.wrote_elements and \
hasattr(obj, "uniqueID") and \
obj.uniqueID is not None and \
hasattr(obj, 'serializeReference'):
return obj.serializeReference(output)
Writer.wrote_elements[obj] = True
return obj.serialize(output)
class Object(Writer):
instance = 0
def __init__(self, *args, **kwargs):
Writer.__init__(self, *args)
self.dataVariance = "UNKNOWN"
self.name = kwargs.get('name', "None")
self.uniqueID = None
self.userdata = None
def generateID(self):
self.uniqueID = Object.instance
Object.instance += 1
def copyFrom(self, obj):
self.name = obj.name
self.dataVariance = obj.dataVariance
def serializeReference(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
output.write(self.encode("$#UniqueID %d\n" % self.uniqueID))
output.write(self.encode("$}\n"))
def getOrCreateUserData(self):
if self.userdata is None:
self.userdata = DefaultUserDataContainer()
return self.userdata
def getNameSpaceClass(self):
return "{}::{}".format(self.nameSpace(), self.className())
def setName(self, name):
self.name = name
def className(self):
return "Object"
def nameSpace(self):
return "osg"
def serializeContent(self, output):
if self.uniqueID is not None:
output.write(self.encode("$#UniqueID {}\n".format(self.uniqueID)))
if self.name is not "None":
output.write(self.encode("$#Name \"{}\"\n".format(self.name)))
if self.dataVariance is not "UNKNOWN":
output.write(self.encode("$#DataVariance {}\n".format(self.dataVariance)))
if self.userdata is not None:
output.write(self.encode("$#UserDataContainer TRUE {\n"))
self.userdata.indent_level = self.indent_level + 2
self.userdata.write(output)
output.write(self.encode("$#}\n"))
class StringValueObject(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self)
self.generateID()
self.key = args[0]
self.value = args[1]
def className(self):
return "StringValueObject"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceCl | ass()))
Object.serializeContent(self, output)
output.write(self.encode("$#Name %s\n" % json.dumps(self.key)))
output.write(self.encode("$#Value %s\n" % json.dumps(self.value)))
output.write(self.encode("$}\n"))
class DefaultUserDataContainer(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.value = []
d | ef append(self, value):
self.value.append(value)
def className(self):
return "DefaultUserDataContainer"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#UDC_UserObjects %d {\n" % len(self.value)))
for s in self.value:
s.indent_level = self.indent_level + 2
s.write(output)
output.write(self.encode("$#}\n"))
class UpdateMatrixTransform(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.stacked_transforms = []
def className(self):
return "UpdateMatrixTransform"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#StackedTransforms %d {\n" % |
ghchinoy/tensorflow | tensorflow/python/keras/wrappers/scikit_learn_test.py | Python | apache-2.0 | 5,719 | 0.010316 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Scikit-learn API wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
INPUT_DIM = 5
HIDDEN_DIM = 5
TRAIN_SAMPLES = 10
TEST_SAMPLES = 5
NUM_CLASSES = 2
BATCH_SIZE = 5
EPOCHS = 1
def build_fn_clf(hidden_dim):
model = ke | ras.models.Sequential()
model.add(keras.layers.Dense(INPUT_DIM, input_shape=(INPUT_DIM,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(hidden_dim))
model.add(keras.layers.Activ | ation('relu'))
model.add(keras.layers.Dense(NUM_CLASSES))
model.add(keras.layers.Activation('softmax'))
model.compile(
optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
return model
def assert_classification_works(clf):
np.random.seed(42)
(x_train, y_train), (x_test, _) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
clf.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS)
score = clf.score(x_train, y_train, batch_size=BATCH_SIZE)
assert np.isscalar(score) and np.isfinite(score)
preds = clf.predict(x_test, batch_size=BATCH_SIZE)
assert preds.shape == (TEST_SAMPLES,)
for prediction in np.unique(preds):
assert prediction in range(NUM_CLASSES)
proba = clf.predict_proba(x_test, batch_size=BATCH_SIZE)
assert proba.shape == (TEST_SAMPLES, NUM_CLASSES)
assert np.allclose(np.sum(proba, axis=1), np.ones(TEST_SAMPLES))
def build_fn_reg(hidden_dim):
model = keras.models.Sequential()
model.add(keras.layers.Dense(INPUT_DIM, input_shape=(INPUT_DIM,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(hidden_dim))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(1))
model.add(keras.layers.Activation('linear'))
model.compile(
optimizer='sgd', loss='mean_absolute_error', metrics=['accuracy'])
return model
def assert_regression_works(reg):
np.random.seed(42)
(x_train, y_train), (x_test, _) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
reg.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS)
score = reg.score(x_train, y_train, batch_size=BATCH_SIZE)
assert np.isscalar(score) and np.isfinite(score)
preds = reg.predict(x_test, batch_size=BATCH_SIZE)
assert preds.shape == (TEST_SAMPLES,)
class ScikitLearnAPIWrapperTest(test.TestCase):
def test_classify_build_fn(self):
with self.cached_session():
clf = keras.wrappers.scikit_learn.KerasClassifier(
build_fn=build_fn_clf,
hidden_dim=HIDDEN_DIM,
batch_size=BATCH_SIZE,
epochs=EPOCHS)
assert_classification_works(clf)
def test_classify_class_build_fn(self):
class ClassBuildFnClf(object):
def __call__(self, hidden_dim):
return build_fn_clf(hidden_dim)
with self.cached_session():
clf = keras.wrappers.scikit_learn.KerasClassifier(
build_fn=ClassBuildFnClf(),
hidden_dim=HIDDEN_DIM,
batch_size=BATCH_SIZE,
epochs=EPOCHS)
assert_classification_works(clf)
def test_classify_inherit_class_build_fn(self):
class InheritClassBuildFnClf(keras.wrappers.scikit_learn.KerasClassifier):
def __call__(self, hidden_dim):
return build_fn_clf(hidden_dim)
with self.cached_session():
clf = InheritClassBuildFnClf(
build_fn=None,
hidden_dim=HIDDEN_DIM,
batch_size=BATCH_SIZE,
epochs=EPOCHS)
assert_classification_works(clf)
def test_regression_build_fn(self):
with self.cached_session():
reg = keras.wrappers.scikit_learn.KerasRegressor(
build_fn=build_fn_reg,
hidden_dim=HIDDEN_DIM,
batch_size=BATCH_SIZE,
epochs=EPOCHS)
assert_regression_works(reg)
def test_regression_class_build_fn(self):
class ClassBuildFnReg(object):
def __call__(self, hidden_dim):
return build_fn_reg(hidden_dim)
with self.cached_session():
reg = keras.wrappers.scikit_learn.KerasRegressor(
build_fn=ClassBuildFnReg(),
hidden_dim=HIDDEN_DIM,
batch_size=BATCH_SIZE,
epochs=EPOCHS)
assert_regression_works(reg)
def test_regression_inherit_class_build_fn(self):
class InheritClassBuildFnReg(keras.wrappers.scikit_learn.KerasRegressor):
def __call__(self, hidden_dim):
return build_fn_reg(hidden_dim)
with self.cached_session():
reg = InheritClassBuildFnReg(
build_fn=None,
hidden_dim=HIDDEN_DIM,
batch_size=BATCH_SIZE,
epochs=EPOCHS)
assert_regression_works(reg)
if __name__ == '__main__':
test.main()
|
AadityaJ/CodeMonk | Searching/cm3.py | Python | mit | 388 | 0.085052 | t = int(raw_input())
while t>0:
t = t - 1
curma = 0
n = int(raw_input())
arr1 = map(int, raw_input().split())
arr2 = map(int, raw_input().split())
for i in xrange(n):
low, high, pos = 0 | , n - 1, -1
while low<=high:
mid = (low + high) / 2
if arr2[mid] >= arr1[i]:
pos = mid
low = mid + 1
else:
high = mid - 1
curma = max (curma | , pos - i)
prilnt curma |
b12io/orchestra | orchestra/migrations/0024_auto_20160325_1916.py | Python | apache-2.0 | 1,224 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.utils.timezone
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0023_assignment_failed'),
]
operations = [
migrations.AddField(
model_name='certification',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='step',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='workercertification',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='workflow',
| name='created_at',
fi | eld=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='workflowversion',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
VWApplications/VWCourses | forum/migrations/0001_initial.py | Python | mpl-2.0 | 2,693 | 0.004836 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-06 21:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='Reply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField(verbose_name='Mensagem')),
('correct', models.BooleanField(default=False, verbose_name='Correto?')),
| ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Modificado em')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='replies', to=settings.AUTH_USER_MODEL, verbose_name='Autor')),
],
options={
'verbose_name': 'Resposta',
'verbose_name_plural': 'Respostas',
'order | ing': ['-correct', 'created_at'],
},
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Título')),
('body', models.TextField(verbose_name='Mensagem')),
('views', models.IntegerField(blank=True, default=0, verbose_name='Visualizações')),
('answers', models.IntegerField(blank=True, default=0, verbose_name='Respostas')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Modificado em')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='topics', to=settings.AUTH_USER_MODEL, verbose_name='Autor')),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
],
options={
'verbose_name': 'Tópico',
'verbose_name_plural': 'Tópicos',
'ordering': ['-updated_at'],
},
),
]
|
spektom/incubator-airflow | airflow/contrib/hooks/slack_webhook_hook.py | Python | apache-2.0 | 1,156 | 0.00173 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License a | t
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please | use `airflow.providers.slack.hooks.slack_webhook`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.slack.hooks.slack_webhook import SlackWebhookHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.slack.hooks.slack_webhook`.",
DeprecationWarning, stacklevel=2
)
|
amitsela/incubator-beam | sdks/python/apache_beam/utils/path_test.py | Python | apache-2.0 | 2,494 | 0.004411 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the path module."""
import unittest
import mock
from apache_beam.utils import path
def _gen_fake_join(separator):
"""Returns a callable that joins paths with the given separator."""
def _join(first_path, *paths):
return separator.join((first_path,) + paths)
return _join
class Path(unittest.TestCase):
def setUp(self):
pass
@mock.patch('apache_beam.utils.path.os')
def test_gcs_path(self, *unused_mocks):
# Test joining of GCS paths when os.path.join uses Windows-style separator.
path.os.path.join.side_effect = _gen_fake_join('\\')
self.assertEqual('gs://bucket/path/to/file',
path.join('gs://bucket/path', 'to', 'file'))
self.assertEqual('gs://bucket/path/to/file',
path.join('gs://bucket/path', 'to/file'))
self.assertEqual('gs://bucket/path//to/file',
path.join('gs://bucket/path', '/to/file'))
@mock.patch('apache_beam.utils.path.os')
def test_unix_path(self, *unused_mocks):
# Test joining of Unix paths.
path.os. | path.join.side_effect = _gen_fake_join('/')
self.assertEqual('/tmp/path/to/file', path.join('/tmp/path', 'to', 'file'))
self.assertEqual('/tmp/path/to/file', path.join('/tmp/path', 'to/file'))
@mock.patch('apache_beam.utils.path.os')
def test_windows_path(self, *unused_mocks):
# Test joining of Windows paths.
path.os.path.join.side_effect = _gen_fake_join('\\')
self.assertEq | ual(r'C:\tmp\path\to\file',
path.join(r'C:\tmp\path', 'to', 'file'))
self.assertEqual(r'C:\tmp\path\to\file',
path.join(r'C:\tmp\path', r'to\file'))
if __name__ == '__main__':
unittest.main()
|
mc10/project-euler | problem_34.py | Python | mit | 561 | 0.003565 | '''
Problem 34
@author: Kevin Ji
'''
FACTORIALS = [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800]
def factorial(number):
return FACTORIALS[number]
def is_curious_num(number):
temp_num = number
curious_sum = 0
while temp_num > 0:
curious_sum += factorial(temp_num % | 10)
temp_num //= 10
return number == curious_sum
# Tests
#print(is_curious_num(145)) # True
#print(is_curious_num(100)) # False
cur_sum = 0
for num in range(3, 1000000 | ):
if is_curious_num(num):
cur_sum += num
print(cur_sum)
|
mick-d/nipype_source | nipype/interfaces/freesurfer/base.py | Python | bsd-3-clause | 4,569 | 0.000219 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The freesurfer module provides basic functions for interfacing with
freesurfer tools.
Currently these tools are supported:
* Dicom2Nifti: using mri_convert
* Resample: using mri_convert
Examples
--------
See the docstrings for the individual classes for 'working' examples.
"""
__docformat__ = 'restructuredtext'
import os
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.base import (CommandLine, Directory,
| CommandLineInputSpec, isdefined)
cl | ass Info(object):
""" Freesurfer subject directory and version information.
Examples
--------
>>> from nipype.interfaces.freesurfer import Info
>>> Info.version() # doctest: +SKIP
>>> Info.subjectsdir() # doctest: +SKIP
"""
@staticmethod
def version():
"""Check for freesurfer version on system
Find which freesurfer is being used....and get version from
/path/to/freesurfer/build-stamp.txt
Returns
-------
version : string
version number as string
or None if freesurfer version not found
"""
fs_home = os.getenv('FREESURFER_HOME')
if fs_home is None:
return None
versionfile = os.path.join(fs_home, 'build-stamp.txt')
if not os.path.exists(versionfile):
return None
fid = open(versionfile, 'rt')
version = fid.readline()
fid.close()
return version
@classmethod
def subjectsdir(cls):
"""Check the global SUBJECTS_DIR
Parameters
----------
subjects_dir : string
The system defined subjects directory
Returns
-------
subject_dir : string
Represents the current environment setting of SUBJECTS_DIR
"""
if cls.version():
return os.environ['SUBJECTS_DIR']
return None
class FSTraitedSpec(CommandLineInputSpec):
subjects_dir = Directory(exists=True, desc='subjects directory')
class FSCommand(CommandLine):
"""General support for FreeSurfer commands.
Every FS command accepts 'subjects_dir' input.
"""
input_spec = FSTraitedSpec
_subjects_dir = None
def __init__(self, **inputs):
super(FSCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._subjects_dir_update, 'subjects_dir')
if not self._subjects_dir:
self._subjects_dir = Info.subjectsdir()
if not isdefined(self.inputs.subjects_dir) and self._subjects_dir:
self.inputs.subjects_dir = self._subjects_dir
self._subjects_dir_update()
def _subjects_dir_update(self):
if self.inputs.subjects_dir:
self.inputs.environ.update({'SUBJECTS_DIR':
self.inputs.subjects_dir})
@classmethod
def set_default_subjects_dir(cls, subjects_dir):
cls._subjects_dir = subjects_dir
@property
def version(self):
return Info.version()
def run(self, **inputs):
if 'subjects_dir' in inputs:
self.inputs.subjects_dir = inputs['subjects_dir']
self._subjects_dir_update()
return super(FSCommand, self).run(**inputs)
def _gen_fname(self, basename, fname=None, cwd=None, suffix='_fs',
use_ext=True):
'''Define a generic mapping for a single outfile
The filename is potentially autogenerated by suffixing inputs.infile
Parameters
----------
basename : string (required)
filename to base the new filename on
fname : string
if not None, just use this fname
cwd : string
prefix paths with cwd, otherwise os.getcwd()
suffix : string
default suffix
'''
if basename == '':
msg = 'Unable to generate filename for command %s. ' % self.cmd
msg += 'basename is not set!'
raise ValueError(msg)
if cwd is None:
cwd = os.getcwd()
fname = fname_presuffix(basename, suffix=suffix,
use_ext=use_ext, newpath=cwd)
return fname
@property
def version(self):
ver = Info.version()
if ver:
if 'dev' in ver:
return ver.rstrip().split('-')[-1] + '.dev'
else:
return ver.rstrip().split('-v')[-1]
|
jh23453/privacyidea | doc/installation/system/pimanage/conf.py | Python | agpl-3.0 | 9,178 | 0.006102 | # -*- coding: utf-8 -*-
#
# pi-manage documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 11 19:10:09 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can spec | ify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the proje | ct.
project = u'pi-manage'
copyright = u'2015, privacyIDEA'
author = u'privacyIDEA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.8'
# The full version, including alpha/beta/rc tags.
release = '2.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pi-managedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pi-manage.tex', u'pi-manage Documentation',
u'privacyIDEA', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual pa |
khushboo9293/mailman | src/mailman/rest/tests/test_api.py | Python | gpl-3.0 | 2,194 | 0.000456 | # Copyright (C) 2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""API version tests."""
__all__ = [
'TestAPIVersion',
]
import unittest
from mailman.core.system import system
from mailman.testing.helpers import call_api
from mailman.testing.layers import RESTLayer
from urllib.error import HTTPError
class TestAPIVersion(unittest.TestCase):
layer = RESTLayer
def test_api_31(self):
# API version 3.1 was introduced in Mailman 3.1.
url = 'http://localhost:9001/3.1/system'
new = '{}/versions'.format(url)
json, response = call_a | pi(url)
self.assertEqual(json['mailman_version'], system.mailman_version)
self.assertEqual(json['python_version'], system.python_version)
self.assertEqual(json['api_version'], '3.1')
self.assertEqual(json['self_link'], new)
def test_api_30(self):
# API version 3.0 is still supported.
url = 'http://localhost:9001/3.0/system'
new = '{}/versions'.format(url)
json, response = call_api(url)
self.assertEqual(json | ['mailman_version'], system.mailman_version)
self.assertEqual(json['python_version'], system.python_version)
self.assertEqual(json['api_version'], '3.0')
self.assertEqual(json['self_link'], new)
def test_bad_api(self):
# There is no API version earlier than 3.0.
with self.assertRaises(HTTPError) as cm:
call_api('http://localhost:9001/2.9/system')
self.assertEqual(cm.exception.code, 404)
|
szigyi/DAT210x | Module4/assignment5.py | Python | mit | 2,886 | 0.010742 | import math
import pandas as pd
import numpy as np
from scipy import misc
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import matplotlib.pyplot as plt
# Look pretty...
# matplotlib.style.use('ggplot')
plt.style.use('ggplot')
def Plot2D(T, title, x, y):
# This method picks a bunch of random samples (images in your case)
# to plot onto the chart:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.set_xlabel('Component: {0}'.format(x))
ax.set_ylabel('Component: {0}'.format(y))
ax.scatter(T[:,x],T[:,y], marker='.',alpha=0.7)
from os import listdir
file_path = "/Users/szabolcs/dev/git/DAT210x/Module4/Datasets/ALOI/32/"
#
# TODO: Start by creating a regular old, plain, "vanilla"
# python list. You can call it 'samples'.
#
file_names = listdir(file_path)
samples = []
#
# TODO: Write a for-loop that iterates over the images in the
# Module4/Datasets/ALOI/32/ folder, appending each of them to
# your list. Each .PNG image should first be loaded into a
# temporary NDArray, just as shown in the Feature
# Representation reading.
for file_name in file_names:
pic = misc.imread(file_path + file_name)
ser = [item for sublist in pic for item in sublist]
pic = pd.Series(ser)
| #pic = pic[::2, ::2]
pic | = pic.values.reshape(-1, 3)
samples.append(pic)
#
# Optional: Resample the image down by a factor of two if you
# have a slower computer. You can also convert the image from
# 0-255 to 0.0-1.0 if you'd like, but that will have no
# effect on the algorithm's results.
#
df = pd.DataFrame.from_records(samples)
print(df.shape)
num_images, num_pixels = df.shape
num_pixels = int(math.sqrt(num_pixels))
for i in range(num_images):
df.loc[i,:] = df.loc[i,:].values.reshape(num_pixels, num_pixels).T.reshape(-1)
print(df.shape)
#df.iloc[0] = pd.to_numeric(df.iloc[0], errors="coerce")
#print(df.dtypes)
#
# TODO: Once you're done answering the first three questions,
# right before you converted your list to a dataframe, add in
# additional code which also appends to your list the images
# in the Module4/Datasets/ALOI/32_i directory. Re-run your
# assignment and answer the final question below.
#
# .. your code here ..
#
# TODO: Convert the list to a dataframe
#
# .. your code here ..
#
# TODO: Implement Isomap here. Reduce the dataframe df down
# to three components, using K=6 for your neighborhood size
#
from sklearn.manifold import Isomap
imap = Isomap(n_components=2, n_neighbors=6)
imap.fit(df)
df_imap = imap.transform(df)
Plot2D(df_imap, "Isomap", 0, 1)
#
# TODO: Create a 2D Scatter plot to graph your manifold. You
# can use either 'o' or '.' as your marker. Graph the first two
# isomap components
#
# .. your code here ..
#
# TODO: Create a 3D Scatter plot to graph your manifold. You
# can use either 'o' or '.' as your marker:
#
# .. your code here ..
plt.show()
|
globocom/database-as-a-service | dbaas/maintenance/async_jobs/remove_instance_database.py | Python | bsd-3-clause | 874 | 0 | from mainte | nance.async_jobs import BaseJob
from maintenance.models import RemoveInstanceDatabase
__all__ = ('RemoveInstanceDatabase',)
class RemoveInstanceDatabaseJob(BaseJob):
step_manger_class = RemoveInstanceDatabase
get_steps_method = 'remove_readonly_i | nstance_steps'
success_msg = 'Instance removed with success'
error_msg = 'Could not remove instance'
def __init__(self, request, database, task, instance, since_step=None,
step_manager=None, scheduled_task=None,
auto_rollback=False, auto_cleanup=False):
super(RemoveInstanceDatabaseJob, self).__init__(
request, database, task, since_step,
step_manager, scheduled_task,
auto_rollback, auto_cleanup
)
self._instance = instance
@property
def instances(self):
return [self._instance]
|
romanz/python-trezor | trezorlib/tests/device_tests/test_cancel.py | Python | lgpl-3.0 | 2,032 | 0 | # This file is part of the Trezor project.
#
# Copyright (C) 2012-2018 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import pytest
import trezorlib.messages as m
from .conftest import setup_client
@setup_client()
@pytest.mark.parametrize(
"message",
[
m.Ping(message="hello", button_protection=True),
m.GetAddress(
address_n=[0],
coin_name="Bitcoin",
script_type=m.InputScriptType.SPENDADDRESS,
show_display=True,
),
],
)
def test_ca | ncel_message_via_cancel(client, message):
resp = client.call_raw(message)
assert isinstance(resp, m.ButtonRequest)
client.transport.write(m.ButtonAck())
clie | nt.transport.write(m.Cancel())
resp = client.transport.read()
assert isinstance(resp, m.Failure)
assert resp.code == m.FailureType.ActionCancelled
@setup_client()
@pytest.mark.parametrize(
"message",
[
m.Ping(message="hello", button_protection=True),
m.GetAddress(
address_n=[0],
coin_name="Bitcoin",
script_type=m.InputScriptType.SPENDADDRESS,
show_display=True,
),
],
)
def test_cancel_message_via_initialize(client, message):
resp = client.call_raw(message)
assert isinstance(resp, m.ButtonRequest)
client.transport.write(m.ButtonAck())
client.transport.write(m.Initialize())
resp = client.transport.read()
assert isinstance(resp, m.Features)
|
fhdk/eordre-app.pyqt | util/worker.py | Python | agpl-3.0 | 8,857 | 0.002939 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright: Frede Hundewadt <echo "ZmhAdWV4LmRrCg==" | base64 -d>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
#
# code from https://stackoverflow.com/a/41605909
#
"""Worker module"""
import csv
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot
from util import httpFn
__module__ = "worker"
class Worker(QObject):
"""
Must derive from QObject in order to emit signals, connect slots to other signals, and operate in a QThread.
"""
sig_status = pyqtSignal(int, str) # worker id, progress: emitted every step through the file
sig_done = pyqtSignal(int) # worker id: emitted at end of the file
def __init__(self, thread_id: int, app):
super().__init__()
self.__app = app
self.__thread_id = thread_id
self.__abort = False
@pyqtSlot(name="import_contacts_csv")
def import_contacts_csv(self, contacts, filename, header):
"""
Import contacts using csv file
:param contacts: object
:param filename: str
:param header: bool
:return:
"""
filename.encode("utf8")
self.sig_status.emit(self.__thread_id, "{}".format("Forbereder indlæsning ..."))
contacts.recreate_table()
ftext = ">>> Import er færdig!"
with open(filename) as csvdata:
reader = csv.reader(csvdata, delimiter="|")
line = 0
for row in reader:
self.__app.processEvents()
if not len(row) == contacts.csv_record_length:
ftext = "FEJL: Formatet i den valgte fil er ikke korrekt!"
break
if header and line == 0:
line += 1
continue
self.sig_status.emit(self.__thread_id, "{} - {}".format(row[2].strip(), row[3].strip()))
contacts.translate_row_insert(row) # send row to database
self.sig_status.emit(self.__thread_id, "{}".format(ftext))
self.sig_done.emit(self.__thread_id)
@pyqtSlot(name="import_customers_csv")
def import_customers_csv(self, customers, filename, header):
"""
Import customers using csv file
:param customers:
:param filename:
:param header:
:return:
"""
filename.encode("utf8")
self.sig_status.emit(self.__thread_id, "{}".format("Forbereder indlæsning ..."))
customers.recreate_table()
ftext = ">>> Import er færdig!"
with open(filename) as csvdata:
reader = csv.reader(csvdata, delimiter="|")
rows = list(reader)
for line, row in enumerate(rows):
self.__app.processEvents()
if not len(row) == customers.csv_record_length:
ftext = "FEJL: Formatet i den valgte fil er ikke korrekt!"
break
if header and line == 0:
continue
self.sig_status.emit(self.__thread_id, "{} - {}".format(row[1].strip(), row[2].strip()))
customers.translate_row_insert(row) # send row to database
self.sig_status.emit(self.__thread_id, "{}".format(ftext))
self.sig_done.emit(self.__thread_id)
@pyqtSlot(name="import_customers_http")
def import_customers_http(self, customers, employees, settings):
"""
Import customers using http
:param customers:
:param employees:
:param settings:
:return:
"""
self.sig_status.emit(self.__thread_id, "{}".format("Forbereder hentning ..."))
self.sig_status.emit(self.__thread_id, "{}".format("Henter fra server ..."))
data = httpFn.get_customers(settings, employees) # fetch datafile from http server
for row in data: # data processing
self.__app.processEvents()
self.sig_status.emit(self.__thread_id, "{} - {}".format(row[0], row[1]))
customers.import_http(row) # init_detail row to database
self.sig_done. | emit(self.__thread_id)
@pyqtSlot(name="import_order_line | s_csv")
def import_orderlines_csv(self, orderlines, filename, header):
"""
Import lines using csv file
:param orderlines: OrderLine() class
:param filename: filename to read
:param header: bool if first line is header
:return:
"""
filename.encode("utf8")
self.sig_status.emit(self.__thread_id, "{}".format("Forbereder indlæsning ..."))
orderlines.recreate_table()
ftext = ">>> Import er færdig!"
with open(filename) as csvdata:
reader = csv.reader(csvdata, delimiter="|")
rows = list(reader)
for line, row in enumerate(rows):
self.__app.processEvents()
if not len(row) == orderlines.csv_record_length:
ftext = "FEJL: Formatet i den valgte fil er ikke korrekt!"
break
if header and line == 0:
continue
self.sig_status.emit(self.__thread_id, "{} - {}".format(row[2].strip(), row[3].strip()))
orderlines.translate_row_insert(row) # send row to database
self.sig_status.emit(self.__thread_id, "{}".format(ftext))
self.sig_done.emit(self.__thread_id)
@pyqtSlot(name="import_products_http")
def import_products_http(self, products, settings):
"""
Import products using http
:param products:
:param settings:
"""
self.sig_status.emit(self.__thread_id, "{}".format("Forbereder hentning ..."))
products.drop_table() # drop product table
self.sig_status.emit(self.__thread_id, "{}".format("Henter fra server ..."))
data = httpFn.get_products(settings) # fetching datafile using http with settings
for row in data: # process the data
self.__app.processEvents()
self.sig_status.emit(self.__thread_id, "{} - {}".format(row[0], row[1]))
products.insert(row) # send row to database
self.sig_done.emit(self.__thread_id)
@pyqtSlot(name="import_reports_csv")
def import_reports_csv(self, employeeid, reports, filename, header):
"""
Import reports using csv file
:param employeeid
:param reports:
:param filename:
:param header:
:return:
"""
filename.encode("utf8")
self.sig_status.emit(self.__thread_id, "{}".format("Forbereder indlæsning ..."))
reports.recreate_table()
ftext = ">>> Import er færdig!"
with open(filename) as csvdata:
reader = csv.reader(csvdata, delimiter="|")
rows = list(reader)
for line, row in enumerate(rows):
self.__app.processEvents()
if not len(row) == reports.csv_record_length:
ftext = "FEJL: Formatet i den valgte fil er ikke korrekt!"
break
if header and line == 0:
continue
self.sig_status.emit(self.__thread_id, "{} - {}".format(row[2].strip(), row[3].strip()))
reports.translate_row_insert(row, employeeid) # send row to database
self.sig_status.emit(self.__thread_id, "{}".format(ftext))
self.sig_done.emit(self.__thread_id)
@pyqtSlot(name="import_visits_csv")
def import_visits_csv(self, visits, filename, header):
"""
Import visits using csv file
:param visits:
:param filename:
:param header:
:return:
"""
filename.encode("utf8")
self.sig_status.emit(self.__thread_id, "{}".format("Forbereder indlæsning ..."))
visits.recreate_table()
ftext = ">>> Import er færdig!"
with open(filename) as csvdata:
reader = csv.reader(csvdata, delimiter="|")
|
RentennaDev/partial | partial/request.py | Python | mit | 325 | 0.009231 | from werkzeug.contrib.securecookie import SecureCookie
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseRequest
from part | ial import scanner
class Request(BaseRequest):
@ | cached_property
def session(self):
return SecureCookie.load_cookie(self, secret_key=scanner.CONFIG['SECRET']) |
farooqsheikhpk/Aspose.BarCode-for-Cloud | Examples/Python/generating-saving/cloud-storage/generate-barcode-and-save-asposecloudstorage.py | Python | mit | 1,910 | 0.013613 | import asposebarcodecloud
from asposebarcodecloud.BarcodeApi import BarcodeApi
from asposebarcodecloud.BarcodeApi import ApiException
import asposestoragecloud
from asposestoragecloud.StorageApi import StorageApi
from asposestoragecloud.StorageApi import ResponseMessage
import ConfigParser
config = ConfigPa | rser.ConfigParser()
config.readfp(open(r'../../data/config.properties'))
apiKey = config.get('AppConfig', 'api_key')
appSid = config.get('AppConfig', 'app_sid')
out_folder = config.get('AppC | onfig', 'out_folder')
data_folder = "../../data/" #resouece data folder
#ExStart:1
#Instantiate Aspose.Storage API SDK
storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True)
storageApi = StorageApi(storage_apiClient)
#Instantiate Aspose.Barcode API SDK
api_client = asposebarcodecloud.ApiClient.ApiClient(apiKey, appSid, True)
barcodeApi = BarcodeApi(api_client);
#Set the barcode file name created on server
name = "sample-barcode"
#Set Text to encode inside barcode.
text = "Aspose.BarCode"
#Set Barcode Symbology
type = "Code128"
#Set Generated Barcode Image Format
format = "jpeg"
try:
#invoke Aspose.BarCode Cloud SDK API to create barcode and put in cloud storage
response = barcodeApi.PutBarcodeGenerateFile(name, file=None, text=text, type=type, format=format)
if response.Status == "OK":
#download generated barcode from cloud storage
response = storageApi.GetDownload(Path=name)
outfilename = out_folder + name + "." + format
with open(outfilename, 'wb') as f:
for chunk in response.InputStream:
f.write(chunk)
except ApiException as ex:
print "ApiException:"
print "Code:" + str(ex.code)
print "Message:" + ex.message
#ExEnd:1 |
Norin-Radd/plugin.video.boilerroom | resources/lib/youtubewrapper.py | Python | gpl-2.0 | 9,753 | 0.042346 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Author: Norin (copied it)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib
import json
import re
import os
import sys
import math
import xbmcaddon
import xbmcplugin
from common_variables import *
from directory import *
#get list of playlists
def get_playlists():
url = 'https://www.googleapis.com/youtube/v3/playlists?part=snippet,contentDetails&channelId='+channel_id+'&maxResults=50&key='+youtube_api_key
raw = urllib.urlopen(url)
resp = json.load(raw)
raw.close()
totalplaylists = len(resp["items"])
for playlist in resp["items"]:
playlist_id = playlist["id"]
try:
for level in ["high","medium","low"]:
if level in playlist["snippet"]["thumbnails"]:
thumb = playlist["snippet"]["thumbnails"][level]["url"]
break
except:
thumb = ''
label = playlist["snippet"]["title"]
addDir('[B]'+label.encode('utf-8')+'[/B]',playlist_id,1,thumb,1,totalplaylists,token='')
return
#get list of live videos
def get_live_videos():
list_of_tupple_items = []
url = 'https://www.googleapis.com/youtube/v3/search?eventType=live&part=snippet&channelId='+channel_id+'&type=video&maxResults=50&key='+youtube_api_key
raw = urllib.urlopen(url)
resp = json.load(raw)
raw.close()
if resp["items"]:
video_ids = []
for item in resp["items"]:
videoid = item["id"]["videoId"]
video_ids.append(videoid)
video_ids = ','.join(video_ids)
url_api = 'https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails&id='+video_ids+'&key='+youtube_api_key
raw = urllib.urlopen(url_api)
resp = json.load(raw)
raw.close()
for item in resp["items"]:
title = item["snippet"]["title"]
plot = item["snippet"]["description"]
thumb = item["snippet"]["thumbnails"]["high"]["url"]
aired = item["snippet"]["publishedAt"]
videoid = item["id"]
episode = re.findall('(\d+)',title)
try:
aired = re.compile('(.+?)-(.+?)-(.+?)T').findall(aired)[0]
date = aired[2] + '.' + aired[1] + '.' + aired[0]
aired = aired[0]+'-'+aired[1]+'-'+aired[2]
except:
aired = ''
date = ''
infolabels = {'plot':plot.encode('utf-8'),'tvshowtitle':tvshowtitle,'title':title.encode('utf-8'),'originaltitle':title.encode('utf-8'),'aired':aired,'date':date,'status':status,'cast':cast,'episode':episode,'playcount':0}
#Video and audio info
video_info = { 'codec': 'avc1', 'aspect' : 1.78 }
audio_info = { 'codec': 'aac', 'language' : 'en' }
try:
if item["contentDetails"]["definition"].lower() == 'hd':
video_info['width'] = 1280
video_info['height'] = 720
audio_info['channels'] = 2
else:
video_info['width'] = 854
video_info['height'] = 480
audio_info['channels'] = 1
try:
if xbmcaddon.Addon(id='plugin.video.youtube').getSetting('kodion.video.quality.ask') == 'false' and xbmcaddon.Addon(id='plugin.video.youtube').getSetting('kodion.video.quality') != '3' and xbmcaddon.Addon(id='plugin.video.youtube').getSetting('kodion.video.quality') != '4':
video_info['width'] = 854
video_info['height'] = 480
audio_info['channels'] = 1
except: pass
except:
video_info['width'] = 854
video_info['height'] = 480
audio_info['channels'] = 1
#build and append item
tupple = build_episode_item(title.encode('utf-8'),videoid,5,thumb,1,infolabels,video_info,audio_info)
list_of_tupple_items.append(tupple)
if list_of_tupple_items:
number_of_items = len(list_of_tupple_items)
xbmcplugin.addDirectoryItems(int(sys.argv[1]), list_of_tupple_items,totalItems=number_of_items)
add_sort_methods()
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
else:
msgok(translate(30000),translate(30002))
sys.exit(0)
#Get uploads playlist id and return the list of all videos videos uploaded by the channel user
def get_all_youtube_uploads():
url_api = 'https://www.googleapis.com/youtube/v3/channels?part=contentDetails&id='+channel_id+'&key='+youtube_api_key
raw = urllib.urlopen(url_api)
resp = json.load(raw)
raw.close()
if "items" in resp.keys():
try:
uploads_playlist = resp["items"][0]["contentDetails"]["relatedPlaylists"]["uploads"]
return_youtubevideos('all',uploads_playlist,'',1)
except:
sys.exit(0)
else:
sys.exit(0)
return
#Get list of vod videos
def return_youtubevideos(name,url,token,page):
items_per_page = int(selfAddon.getSetting('items_per_page'))
list_of_tupple_items = []
if page != 1:
url_api = 'https://www.googleapis.com/youtube/v3/playlistItems?part=id,snippet,contentDetails&maxResults='+str(items_per_page)+'&playlistId='+url+'&key='+youtube_api_key +'&pageToken='+token
else:
url_api = 'https://www.googleapis.com/youtube/v3/playlistItems?part=id,snippet,contentDetails&maxResults='+str(items_per_page)+'&playlistId='+url+'&key='+youtube_api_key
raw = urllib.urlopen(url_api)
resp = json.load(raw)
raw.close()
try: nextpagetoken = resp["nextPageToken"]
except: nextpagetoken = ''
try: availablevideos = resp["pageInfo"]["totalResults"]
except: availablevideos = 1
returnedVideos = resp["items"]
totalvideos = len(returnedVideos)
totalpages = int(math.ceil((float(availablevideos)/items_per | _page)))
video_ids = []
if returnedVideos:
for video in returnedVideos:
videoid = video["contentDetails"]["videoId"]
video_ids.append(videoid)
video_ids = ','.join(video_ids)
url_api = 'https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails&id='+video_ids+'&key='+youtube_api_key
raw = urllib.urlopen(url_api)
resp = json.load(raw)
raw.close()
returnedVideos = re | sp["items"]
for video in returnedVideos:
title = video["snippet"]["title"]
plot = video["snippet"]["description"]
aired = video["snippet"]["publishedAt"]
thumb = video["snippet"]["thumbnails"]["high"]["url"]
videoid = video["id"]
#process duration
duration_string = video["contentDetails"]["duration"]
try: duration = return_duration_as_seconds(duration_string)
except: duration = '0'
try:
aired = re.compile('(.+?)-(.+?)-(.+?)T').findall(aired)[0]
date = aired[2] + '.' + aired[1] + '.' + aired[0]
aired = aired[0]+'-'+aired[1]+'-'+aired[2]
except:
aired = ''
date = ''
try:
if episode_playlists:
if url in episode_playlists:
episode = re.compile('(\d+)').findall(title)[0]
else: episode = ''
else: episode = ''
except: episode = ''
#playcount
if os.path.exists(os.path.join(watchedfolder,str(videoid)+'.txt')) : playcount = 1
else: playcount = 0
infolabels = {'plot':plot.encode('utf-8'),'aired':aired,'date':date,'tvshowtitle':tvshowtitle,'title':title.encode('utf-8'),'originaltitle':title.encode('utf-8'),'status':status,'cast':cast,'duration':duration,'episode':episode,'playcount':playcount}
#Video and audio info
video_info = { 'codec': 'avc1', 'aspect' : 1.78 }
audio_info = { 'codec': 'aac', 'language' : 'en' }
try:
if video["contentDetails"]["definition"].lower() == 'hd':
video_info['width'] = 1280
video_info['height'] = 720
audio_info['channels'] = 2
else:
video_info['width'] = 854
video_info['height'] = 480
audio_info['channels'] = 1
try:
if xbmcaddon.Addon(id='plugin.video.youtube').getSetting('kodion.video.quality.ask') == 'false' and xbmcaddon.Addon(id='plugin.video.youtube').getSetting('kodion.video.quality') != '3' and xbmcaddon.Addon(id='plugin.video.youtube').getSetting('kodion.video.quality') != '4':
video_info['width'] = 854
video_info['height'] = 480
audio_info['channels'] |
pablorecio/djangae | djangae/db/backends/appengine/base.py | Python | bsd-3-clause | 17,365 | 0.002361 | #STANDARD LIB
import datetime
import decimal
import warnings
#LIBRARIES
from django.conf import settings
from django.db.backends import (
BaseDatabaseOperations,
BaseDatabaseClient,
BaseDatabaseIntrospection,
BaseDatabaseWrapper,
BaseDatabaseFeatures,
BaseDatabaseValidation
)
try:
from django.db.backends.schema import BaseDatabaseSchemaEditor
except ImportError:
#Django < 1.7 doesn't have BaseDatabaseSchemaEditor
class BaseDatabaseSchemaEditor(object):
pass
from django.db.backends.creation import BaseDatabaseCreation
from django.utils import timezone
from google.appengine.api.datastore_types import Blob, Text
from google.appengine.ext.db import metadata
from google.appengine.datastore import datastore_stub_util
from google.appengine.api.datastore import Key
from google.appengine.api import datastore
#DJANGAE
from djangae.db.utils import (
decimal_to_string,
make_timezone_naive,
get_datastore_key,
)
from djangae.db import caching
from djangae.indexing import load_special_indexes
from .commands import (
SelectCommand,
InsertCommand,
FlushCommand,
UpdateCommand,
DeleteCommand,
coerce_unicode,
get_field_from_column
)
from djangae.db.backends.appengine import dbapi as Database
class Connection(object):
""" Dummy connection class """
def __init__(self, wrapper, params):
self.creation = wrapper.creation
self.ops = wrapper.ops
self.params = params
self.queries = []
def rollback(self):
pass
def commit(self):
pass
def close(self):
pass
class Cursor(object):
""" Dummy cursor class """
def __init__(self, connection):
self.connection = connection
self.start_cursor = None
self.returned_ids = []
self.rowcount = -1
self.last_select_command = None
self.last_delete_command = None
def execute(self, sql, *params):
if isinstance(sql, SelectCommand):
# Also catches subclasses of SelectCommand (e.g Update)
self.last_select_command = sql
self.rowcount = self.last_select_command.execute() or -1
elif isinstance(sql, FlushCommand):
sql.execute()
elif isinstance(sql, UpdateCommand):
self.rowcount = sql.execute()
elif isinstance(sql, DeleteCommand):
self.rowcount = sql.execute()
elif isinstance(sql, InsertCommand):
self.connection.queries.append(sql)
self.returned_ids = sql.execute()
else:
raise Database.CouldBeSupportedError("Can't execute traditional SQL: '%s' (although perhaps we could make GQL work)", sql)
def next(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
def fetchone(self, delete_flag=False):
try:
if isinstance(self.last_select_command.results, (int, long)):
# Handle aggregate (e.g. count)
return (self.last_select_command.results, )
else:
entity = self.last_select_command.next_result()
except StopIteration: #FIXME: does this ever get raised? Where from?
entity = None
if entity is None:
return None
## FIXME: Move this to SelectCommand.next_result()
result = []
# If there is extra_select prepend values to the results list
for col, query in self.last_select_command.extra_select.items():
result.append(entity.get(col))
for col in self.last_select_command.queried_fields:
if col == "__key__":
key = entity if isinstance(entity, Key) else entity.key()
self.returned_ids.append(key)
result.append(key.id_or_name())
else:
field = get_field_from_column(self.last_select_command.model, col)
value = self.connection.ops.convert_values(entity.get(col), field)
result.append(value)
return result
def fetchmany(self, size, delete_flag=False):
if not self.last_select_command.results:
return []
result = []
i = 0
while i < size:
entity = self.fetchone(delete_flag)
if entity is None:
break
result.append(entity)
i += 1
return result
@property
def lastrowid(self):
return self.returned_ids[-1].id_or_name()
def __iter__(self):
return self
def close(self):
pass
MAXINT = 9223372036854775808
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "djangae.db.backends.appengine.compiler"
# Datastore will store all integers as 64bit long values
integer_field_ranges = {
'SmallIntegerField': (-MAXINT, MAXINT-1),
'IntegerField': (-MAXINT, MAXINT-1),
'BigIntegerField': (-MAXINT, MAXINT-1),
'PositiveSmallIntegerField': (0, MAXINT-1),
'PositiveIntegerField': (0, MAXINT-1),
}
def quote_name(self, name):
return name
def convert_values(self, value, field):
""" Called when returning values from the datastore"""
value = super(DatabaseOperations, self).convert_values(value, field)
db_type = field.db_type(self.connection)
if db_type == 'string' and isinstance(value, str):
value = value.decode("utf-8")
elif db_type == "datetime":
value = self.connection.ops.value_from_db_datetime(value)
elif db_type == "date":
value = self.connection.ops.value_from_db_date(value)
elif db_type == "time":
value = self.connection.ops.value_from_db_time(value)
elif db_type == "decimal":
value = self.connection.ops.value_from_db_decimal(value)
elif db_type == 'list':
if not value:
value = [] # Convert None back to an empty list
elif db_type == 'set':
if not value:
value = set()
else:
value = set(value)
return value
def sql_flush(self, style, tables, seqs, allow_cascade=False):
return [ FlushCommand(table) for table in tables ]
def prep_lookup_key(self, model, value, field):
if isinstance(value, basestring):
value = value[:500]
left = value[500:]
if left:
warnings.warn("Truncating primary key that is over 500 characters. "
"THIS IS AN ERROR IN YOUR PROGRAM.",
RuntimeWarning)
# This is a bit of a hack. Basically when you query an integer PK with a
# string containing an int. SQL seems to return the row regardless of type, and as far as
# I can tell, Django at no point tries to cast the value to an integer. So, in the
# case where the internal type is an AutoField, we try to cast the string value
# I would love a more generic solution... patches welcome!
# It would be nice to see the SQL output of the lookup_int_as_str test is on SQL, if
# the string is converted to an int, I'd love to know where!
if field.get_internal_type() == 'AutoField':
try:
value = int(value)
| except (TypeError, ValueError):
pass
value = get_datastore_key(model, value)
else:
value = get_datastore_key(model, value)
return value
de | f prep_lookup_decimal(self, model, value, field):
return self.value_to_db_decimal(value, field.max_digits, field.decimal_places)
def prep_lookup_date(self, model, value, field):
if isinstance(value, datetime.datetime):
return value
return self.value_to_db_date(value)
def prep_lookup_time(self, model, value, field):
if isinstance(value, datetime.datetime):
return value
return self.value_to_db_time(value)
def prep_lookup_value(self, model, value, field, colu |
lukasmerten/GitPlayground | UsefulPythonScripts/Ferrie2007_Innen.py | Python | mit | 3,648 | 0.052906 | import numpy as np
import matplotlib.pyplot as plt
import pylab
import scipy.integrate as integrate
x= -500
y= -500
z = 10
# Konstanten fuer CMZ
xc =-50 # Position Mitte in allg Koordinaten
yc = 50
TettaC = 70
#Konstanten fuer DISK
alpha = 13.5
beta = 20.
TettaD = 48.5
# Abmessungen in CMZ Koordinaten
XMAX=250
XC = XMAX/2
LC = XMAX/(2*np.log(2)**0.25)
HC = 18.
HC2 = 54.
# Abmessungen in DISK Koordinaten
XD = 1200
LD = 438.
HD = 42.
HD2 = 120.
#Konstanten fuer HII -WIM-
y3 = -10
z3= -20
L3 = 145.
H3 = 26.
L2 = 3700.
H2 = 140.
L1 = 17000
H1=950.
#Konstanen fuer HII VHIM
alphaVH = 21
LVH=162
HVH = 90
def Bogenmass(x): # Trafo ins Bogenmass fuer Winkel zur Berechnung
return x*np.pi/180
def cos(x): # Cos FKT fuer Gradmass
x=Bogenmass(x)
return np.cos(x)
def sin(x): # Sin FKT fuer Gradmass
x=Bogenmass(x)
return np.sin(x)
def sech2(x):
return np.cosh(x)**2
def u(x):
if x.all<0:
return 0
else:
return 1
def CMZ_X_Trafo(x,y):
return (x-xc)*cos(TettaC) +(y-yc)*sin(TettaC)
def CMZ_Y_Trafo(x,y):
return -(x-xc)*sin(TettaC) +(y-yc)*cos(TettaC)
def DISK_X_Trafo(x,y,z):
return x*cos(beta)*cos(TettaD) - y*(sin(alpha)*sin(beta)*cos(TettaD) -cos(alpha)*sin(TettaD))-z*(cos(alpha)*sin(beta)*cos(TettaD) +sin(alpha)*sin(TettaD))
def DISK_Y_Trafo(x,y,z):
xT= x*cos(beta)*sin(TettaD)
yT = y*(sin(alpha)*sin(beta)*sin(TettaD) +cos(alpha)*cos(Tet | taD))
zT = z*(cos(alpha)*sin(beta)*sin(TettaD) -sin(alpha)*sin(TettaD))
return -xT+yT+zT
def DISK_Z_Trafo(x,y,z):
xT = x*sin(beta)
yT = y*sin(alpha)*cos(beta)
zT = z*cos(al | pha)*cos(beta)
return xT+yT+zT
#Mollekularer Wasserstoff im CMZ,
def n_H2_CMZ(x0,y0,z0): # Eingabe in Urspruenglichen koordinaten
x = CMZ_X_Trafo(x0,y0)
y = CMZ_Y_Trafo(x0,y0)
XY_Help = ((np.sqrt(x**2+(2.5*y)**2)-XC)/LC)**4
return 150*np.exp(-XY_Help)*np.exp(-(z0/HC)**2)
#Atomarer Wasserstoff im CMZ
def n_HI_CMZ(x0,y0,z0): #Eingabe in Urspruenglichen Koordinaten
x=CMZ_X_Trafo(x0,y0)
y=CMZ_Y_Trafo(x0,y0)
A=np.sqrt(x**2 +(2.5*y)**2)
B= (A-XC)/LC
XY_Help=B**4
Z = (z0/HC2)**2
return 8.8*np.exp(-XY_Help)*np.exp(-Z)
#Mollekularer Wasserstoff in der DISK
def n_H2_DISK(x0,y0,z0):
x= DISK_X_Trafo(x0,y0,z0)
y= DISK_Y_Trafo(x0,y0,z0)
z=DISK_Z_Trafo(x0,y0,z0)
return 4.8*np.exp(-((np.sqrt(x**2 + (3.1*y)**2) - XD)/LD)**4)*np.exp(-(z/HD)**2)
#Atomarer Wasserstoff in der DISK
def n_HI_DISK(x0,y0,z0):
x= DISK_X_Trafo(x0,y0,z0)
y= DISK_Y_Trafo(x0,y0,z0)
z=DISK_Z_Trafo(x0,y0,z0)
return 0.34*np.exp(-((np.sqrt(x**2 + (3.1*y)**2) - XD)/LD)**4)*np.exp(-(z/HD2)**2)
#Ioniesierter Wasserstoff
def n_HII_WIM(x0,y0,z0):
r=np.sqrt(x0**2+y0**2+z0**2)
P1 = np.exp(-(x**2+(y0-y3)**2)/L3**2)*np.exp(-(z0-z3)**2/H3**2)
P2 = np.exp(-((r-L2)/(0.5*L2))**2)*sech2(z/H2)
P3 = np.cos(np.pi*r*0.5/L1)*sech2(z/H1)
return 8.0*(P1+0.009*P2+0.005*P3)
def n_HII_VHIM(x0,y0,z0):
e = y0*cos(alphaVH)+z0*sin(alphaVH)
s = -y0*sin(alphaVH) + z*cos(alphaVH)
return 0.29*np.exp(-((x0**2+e**2)/LVH**2 + s**2/HVH**2))
def n_HII(x0,y0,z0):
return n_HII_VHIM(x0,y0,z0) +n_HII_WIM(x0,y0,z0)
def n_HI(x,y,z):
return n_HI_DISK(x,y,z) + n_HI_CMZ(x,y,z)
def n_H2(x,y,z):
return n_H2_CMZ(x,y,z) + n_H2_DISK(x,y,z)
x = pylab.linspace(-100,100,200)
y = pylab.linspace(-100,100,200)
#2d Arrays Definieren
xx,yy = pylab.meshgrid(x,y)
#Daten fuellen
zz = pylab.zeros(xx.shape)
for i in range(xx.shape[0]):
for j in range(xx.shape[1]):
zz[i,j] = n_H2(xx[i,j], yy[i,j],0)
# plotten
plt.figure()
plt.title('Massdistribution for H2')
plt.pcolormesh(xx,yy,zz)
plt.colorbar()
plt.contour(xx,yy,zz)
plt.gca().set_aspect("equal")
plt.xlabel('x/pc')
plt.ylabel('y/pc')
plt.show()
|
goller/pynsq | tests/test_writer.py | Python | mit | 892 | 0 | from __future__ import absolute_import
import nsq
import unittest
class WriterUnitTest(unittest.TestCase):
def setUp(self):
super(WriterUnitTest, self).setUp()
def test_constructor(self):
name = 'test'
reconnect_interval = 10.0
writer = nsq.Writer(nsqd_tcp_addresses=['127.0.0.1:4150'],
reconnect_interval=reconnect_interval,
| name=name)
self.assertEqual(writer.name, name)
self.assertEqual(0, len(writer.conn_kwargs))
self.assertEqual(writer.reconnect_interval, re | connect_interval)
def test_bad_writer_arguments(self):
bad_options = dict(foo=10)
self.assertRaises(
AssertionError,
nsq.Writer,
nsqd_tcp_addresses=['127.0.0.1:4150'],
reconnect_interval=15.0,
name='test', **bad_options)
|
mrmuxl/keops | keops/modules/comments/apps.py | Python | agpl-3.0 | 403 | 0.002481 |
from django.utils.translation import ugettext_lazy as _
app_info = {
'name': 'comments',
'author': 'Katrid',
'website': 'http://katrid.com',
'short_description': 'Enterprise Social Network',
'description': _('Comments, Discussions, Mailing List, News, Document Followers'),
' | dependencies': ['keops.module | s.contact'],
'category': _('Communication'),
'version': '0.2',
}
|
neillc/zookeepr | zkpylons/controllers/product_category.py | Python | gpl-2.0 | 5,696 | 0.002633 | import logging
from pylons import request, response, session, tmpl_context as c
from zkpylons.lib.helpers import redirect_to
from pylons.decorators import validate
from pylons.decorators.rest import dispatch_on
from formencode import validators, htmlfill, ForEach, Invalid
from formencode.variabledecode import NestedVariables
from zkpylons.lib.base import BaseController, render
from zkpylons.lib.ssl_requirement import enforce_ssl
from zkpylons.lib.validators import BaseSchema, ProductValidator
import zkpylons.lib.helpers as h
from authkit.authorize.pylons_adaptors import authorize
from authkit.permissions import ValidAuthKitUser
from zkpylons.lib.mail import email
from zkpylons.model import meta
from zkpylons.model.product import Product, ProductInclude
from zkpylons.model.product_category import ProductCategory
from zkpylons.config.lca_info import lca_info
log = logging.getLogger(__name__)
class NotExistingProductCategoryValidator(validators.FancyValidator):
def validate_python(self, values, state):
product_category = ProductCategory.find_by_name(values['product_category']['name'])
if product_category != None and product_category != c.product_category:
message = "Duplicate product category name"
error_dict = {'product_category.name': "Category name already in use"}
raise Invalid(message, values, state, error_dict=error_dict)
class ProductCategorySchema(BaseSchema):
name = validators.String(not_empty=True)
description = validators.String(not_empty=True)
note = validators.String()
display = validators.String(not_empty=True)
display_mode = validators.String()
display_order = validators.Int(min=0, max=2000000, not_empty=True)
invoice_free_products = validators.Bool(if_missing=False)
min_qty = validators.Int(min=0, max=2000000)
max_qty = validators.Int(min=0, max=2000000)
# TODO: check that min_qty <= max_qty
class NewProductCategorySchema(BaseSchema):
product_category = ProductCategorySchema()
pre_validators = [NestedVariables]
chained_validators = [NotExistingProductCategoryValidator()]
class EditProductCategorySchema(BaseSchema):
product_category = ProductCategorySchema()
pre_validators = [NestedVariables]
class ProductCategoryController(BaseController):
@enforce_ssl(required_all=True)
@authorize(h.auth.has_organiser_role)
def __before__(self, **kwargs):
pass
@dispatch_on(POST="_new")
def new(self):
return render('/product_category/new.mako')
@validate(schema=NewProductCategorySchema(), form='new', post_only=True, on_get=True, variable_decode=True)
def _new(self):
results = self.form_result['product_category']
c.product_category = ProductCategory(**results)
meta.Session.add(c.product_category)
| meta.Session.commit()
h.flash("Category created")
redirect_to(action='view', id=c.product_category.id)
def view(self, id):
c.product_category = ProductCategory.find_by_id(id)
return render('/product_category/view.mako')
def stats(self, id):
c.can_edit = True
c.product_category = ProductCategory.find_by_id(id)
c.product_categories = ProductCategory.find_all()
return render('/product_category/stats.mako')
def i | ndex(self):
c.can_edit = True
c.product_category_collection = ProductCategory.find_all()
return render('/product_category/list.mako')
@dispatch_on(POST="_edit")
def edit(self, id):
c.product_category = ProductCategory.find_by_id(id)
defaults = h.object_to_defaults(c.product_category, 'product_category')
form = render('/product_category/edit.mako')
return htmlfill.render(form, defaults)
@validate(schema=EditProductCategorySchema(), form='edit', post_only=True, on_get=True, variable_decode=True)
def _edit(self, id):
product_category = ProductCategory.find_by_id(id)
for key in self.form_result['product_category']:
setattr(product_category, key, self.form_result['product_category'][key])
# update the objects with the validated form data
meta.Session.commit()
h.flash("The product_category has been updated successfully.")
redirect_to(action='view', id=id)
@dispatch_on(POST="_delete")
def delete(self, id):
"""Delete the product_category
GET will return a form asking for approval.
POST requests will delete the item.
"""
c.product_category = ProductCategory.find_by_id(id)
return render('/product_category/confirm_delete.mako')
@validate(schema=None, form='delete', post_only=True, on_get=True, variable_decode=True)
def _delete(self, id):
c.product_category = ProductCategory.find_by_id(id)
# For some reason cascading isn't working for me. Likely I just don't understand SA so I'll do it this way:
# first delete all of the products
for product in c.product_category.products:
# We also delete all of the productincludes for the products
for include in ProductInclude.find_by_product(product.id):
meta.Session.delete(include)
meta.Session.commit()
meta.Session.delete(product)
meta.Session.commit()
# Also delete any includes of the category
for include in ProductInclude.find_by_category(id):
meta.Session.delete(include)
meta.Session.commit()
meta.Session.delete(c.product_category)
meta.Session.commit()
h.flash("Category has been deleted.")
redirect_to('index')
|
NikNitro/Python-iBeacon-Scan | sympy/printing/printer.py | Python | gpl-3.0 | 9,328 | 0.000536 | """Printing subsystem driver
SymPy's printing system works the following way: Any expression can be
passed to a designated Printer who then is responsible to return an
adequate representation of that expression.
The basic concept is the following:
1. Let the object print itself if it knows how.
2. Take the best fitting method defined in the printer.
3. As fall-back use the emptyPrinter method for the printer.
Some more information how the single concepts work and who should use which:
1. The object prints itself
This was the original way of doing printing in sympy. Every class had
its own latex, mathml, str and repr methods, but it turned out that it
is hard to produce a high quality printer, if all the methods are spread
out that far. Therefore all printing code was combined into the different
printers, which works great for built-in sympy objects, but not that
good for user defined classes where it is inconvenient to patch the
printers.
Nevertheless, to get a fitting representation, the printers look for a
specific method in every object, that will be called if it's available
and is then responsible for the representation. The name of that method
depends on the specific printer and is defined under
Printer.printmethod.
2. Take the best fitting method defined in the printer.
The printer loops through expr classes (class + its bases), and tries
to dispatch the work to _print_<EXPR_CLASS>
e.g., suppose we have the following class hierarchy::
Basic
|
Atom
|
Number
|
Rational
then, for expr=Rational(...), in order to dispatch, we will try
calling printer methods as shown in the figure below::
p._print(expr)
|
|-- p._print_Rational(expr)
|
|-- p._print_Number(expr)
|
|-- p._print_Atom(expr)
|
`-- p._print_Basic(expr)
if ._print_Rational method exists in the printer, then it is called,
and the result is returned back.
otherwise, we proceed with trying Rational bases in the inheritance
order.
3. As fall-back use the emptyPrinter method for the printer.
As fall-back self.emptyPrinter will be called with the expression. If
not defined in the Printer subclass this will be the same as str(expr).
"""
from __future__ import print_function, division
from sympy import Basic, Add
from sympy.core.core import BasicMeta
from functools import cmp_to_key
class Printer(object):
"""Generic printer
Its job is to provide infrastructure for implementing new printers easily.
Basically, if you want to implement a printer, all you have to do is:
1. Subclass Printer.
2. Define Printer.printmethod in your subclass.
If a object has a method with that name, this method will be used
for printing.
3. In your subclass, define ``_print_<CLASS>`` methods
For each class you want to provide printing to, define an appropriate
method how to do it. For example if you want a class FOO to be printed in
its own way, define _print_FOO::
def _print_FOO(self, e):
...
this should return how FOO instance e is printed
Also, if ``BAR`` is a subclass of ``FOO``, ``_print_FOO(bar)`` will
be called for instance of ``BAR``, if no ``_print_BAR`` is provided.
Thus, usually, we don't need to provide printing routines for every
class we want to support -- only generic routine has to be provided
for a set of classes.
A good example for this are functions - for example ``PrettyPrinter``
only defines ``_print_Function``, and there is no ``_print_sin``,
``_print_tan``, etc...
On the other hand, a good printer will probably have to define
separate routines for ``Symbol``, ``Atom``, ``Number``, ``Integral``,
``Limit``, etc...
4. If convenient, override ``self.emptyPrinter``
This callable will be called to obtain printing result as a last resort,
that is when no appropriate print method was found for an expression.
Examples of overloading StrPrinter::
from sympy import Basic, Function, Symbol
from sympy.printing.str import StrPrinter
class CustomStrPrinter(StrPrinter):
\"\"\"
Examples of how to customize the StrPrinter for both a SymPy class and a
user defined class subclassed from the SymPy Basic class.
\"\"\"
def _print_Derivative(self, expr):
\"\"\"
Custom printing of the SymPy Derivative class.
Instead of:
D(x(t), t) or D(x(t), t, t)
We will print:
x' or x''
In this example, expr.args == (x(t), t), and expr.args[0] == x(t), and
expr.args[0].func == x
\"\"\"
return str(expr.args[0].func) + "'"*len(expr.args[1:])
def _print_MyClass(self, expr):
\"\"\"
Print the characters of MyClass.s alternatively lower case and upper
case
\"\"\"
s = ""
i = 0
for char in expr.s:
if i % 2 == 0:
s += char.lower()
else:
s += char.upper()
i += 1
return s
# Override the __str__ method of to use CustromStrPrinter
Basic.__str__ = lambda self: CustomStrPrinter().doprint(self)
# Demonstration of CustomStrPrinter:
t = Symbol('t')
x = Function('x')(t)
dxdt = x.diff(t) # dxdt is a Derivative instance
d2xdt2 = dxdt.diff(t) # dxdt2 is a Derivative instance
ex = MyClass('I like both lowercase and upper case')
print dxdt
print d2xdt2
print ex
The output of the above code is::
x'
x''
i lIkE BoTh lOwErCaSe aNd uPpEr cAsE
By overriding Basic.__str__, we can customize the printing of anything that
is subclassed from Basic.
"""
_global_settings = {}
_default_settings = {}
emptyPrinter = str
printmethod = None
def __init__(self, settings=None):
self._str = str
self._settings = self._default_settings.copy()
for key, val in self._global_settings.items():
if key in self._default_settings:
self._settings[key] = val
if settings is not None:
self._settings.update(settings)
if len(self._settings) > len(self._default_settings):
for key in self._settings:
if key not in self._default_settings:
raise TypeError("Unknown setting '%s'." % key)
# _print_level is the number of times self._print() was recursively
# called. See StrPrinter._print_Float() for an example of usage
self._print_level = 0
@classmethod
def set_global_settings(cls, **settings):
"""Set system-wide printing settings. """
for key, val in settings.items():
if val is not None:
cls._global_settings[key] = val
@property
def order(self):
if 'order' in self._settings:
return self._settings['order']
else:
raise AttributeError("No order defined.")
def doprint(self, | expr):
"""Returns printer's representation for expr (as a string)"""
return self._str(self._print(expr))
def _print(self, expr, *args, **kwargs):
"""Internal dispatcher
Tries the following concepts to print an expression:
1. Let the object print itself if it knows how.
2. Take the best fitting method defined in the printer.
3. As fall-ba | ck use the emptyPrinter method for the printer.
"""
self._print_level += 1
try:
# If the printer defines a name for a printing method
# (Printer.printme |
kAlmAcetA/zookeeper_monitor | zookeeper_monitor/zk/__init__.py | Python | mit | 668 | 0.002994 | # -*- coding:utf-8 -*-
"""
Module provides zookeeper abstraction
"""
from .host import Host
from .cluster import Cluster
from .exceptions import HostBaseError, HostConnec | tionTimeout, HostSetTimeoutTypeError
from .exceptions import HostSetTimeoutValueError, HostInvalidInfo, ZkBaseError
from .exceptions import ClusterHostAddError, ClusterHostDuplicateError, ClusterHostCreateError
__all__ = [
'Host',
'Cluster',
'ZkBaseError',
'HostBaseError',
'HostConnectionTimeout',
'H | ostSetTimeoutTypeError',
'HostSetTimeoutValueError',
'HostInvalidInfo',
'ClusterHostAddError',
'ClusterHostDuplicateError',
'ClusterHostCreateError'
]
|
kanishkamisra/pokedex | scraper.py | Python | mit | 1,483 | 0.035738 | import requests
import lxml.html as lh
import json
url = 'http://pokemondb.net/pokedex/all'
page = requests.get(url)
doc = lh.fromstring(page.content)
#Store data from the table into a list
elements = doc.xpath('//tr')
col = []
i=0
# Store headers as tuples with each header being associated with a list
for e in elements[0]:
i+=1
name = e.text_content()
col.append((name,[]))
# Let's populate!
for j in range(1,len(elements)):
E = elements[j]
if len(E) != 10:
break
i=0
for e in E.iterchildren():
data = e.text_content()
if i>0:
try:
data = int(data)
except:
pass
col[i][1].append(data)
i+=1
data_list = []
# Populate a separate list with json formatted data.
for i in range(0,len(elements)-1):
data_list.append({column: entry[i] for (column, entry) in col})
# Clean!
def brackets | (word):
list = [x for x in word]
for i in range(1, len(list)):
if list[i].isupper():
list[i] = ' ' + list[i]
new_list = ''.join(list).split(' ')
if len(new_list) > 1:
new_list.insert(1,'(')
new_list.append(')')
return ' '.join(new_list)
def breaks(word):
list = [x for x in word]
for i in range(1, len(list)):
if list[i].isupper():
lis | t[i] = ' ' + list[i]
new_list = ''.join(list).split(' ')
return new_list
for data in data_list:
data['Name'] = brackets(data['Name'])
data['Type'] = breaks(data['Type'])
# Dump to json
with open('pokemondata.json', 'wb') as jsonfile:
json.dump(data_list, jsonfile)
print 'Data saved to json!'
|
adamlincoln/pokersim | src/pokersim/__init__.py | Python | gpl-3.0 | 1,480 | 0.006081 | import argparse
from pokersim.Table import Table
from pokersim.Player import Player
from pokersim.Recorder import Recorder
rec = Recorder()
parser = argparse.ArgumentParser(description='Set up a poker game')
#parser.add_argument('-n', '--numplayers', type=int, nargs='?', default=10, help='Number of players')
parser.add_argument('-d', '--numhands', type=int, nargs='?', default=1, help='Number of hands')
#parser.add_argument('-c', '--chips', action='append', type=int, nargs='+', help='Number of chips')
parser.add_argument('-p', '--players', action='append', nargs=2, metavar=('number_of_chips', 'brain_name'), help='Player info: chips brain_name')
def main():
args = vars(parser.parse_args())
print args
# Get from command line:
# Type of players
# Chips for players
numplayers = len(args['players'])
if numplayers < 3:
print 'At least 3 players are needed. You selected', numplayers
return
#if args['chips'] is not None and len(args['chips']) > numplayers:
#numplayers = len(args['chip | s'])
table = Table()
for i in xrange(numplayers):
player = Player(int(args['players'][i][0]), args['players'][i][1])
player.sit(table, i)
for i in xrange(args['numhands']):
| table.deal()
print 'After', args['numhands'], 'hands:'
for player in table.players.values():
print 'Player', player.position, 'has', player.chips, 'chips'
print 'The Table has', table.box, 'chips'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.