repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
dmchu/selenium_gr_5 | tests/day_6/hw_day_6_task_12.py | Python | apache-2.0 | 3,837 | 0.002085 | import os
from faker import Faker
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.wait import WebDr | iverWait
from selenium.webdriver.support import expected_conditions as EC
from wheel.signatures import assertTrue
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
wd.implicitly_wait(2)
request.addfinalizer(wd.quit)
return wd
def test_add_new_product(driver):
wait = WebDriverWait(driver, 5)
f = Faker()
driver.get("http://localhost/litecart/admin")
driver.find_element_by_name("username").send_keys("admin")
driver.find_eleme | nt_by_name("password").send_keys("admin")
driver.find_element_by_name("login").click()
wait.until(EC.title_is("My Store"))
menu = driver.find_element_by_id("box-apps-menu")
menu.find_element(By.LINK_TEXT, "Catalog").click()
add_new_product = driver.find_element(By.LINK_TEXT, "Add New Product")
add_new_product.click()
tab_general = driver.find_element(By.ID, "tab-general")
status_enabled = tab_general.find_element(By.CSS_SELECTOR, "input[value='1']")
status_enabled.click()
product_name = "Dark Blue Duck"
product_name_field = tab_general.find_element(By.CSS_SELECTOR, ".input-wrapper>input")
product_name_field.send_keys(product_name)
code_field = tab_general.find_element(By.NAME, 'code')
code_field.send_keys("rd006")
categories = tab_general.find_elements(By.NAME, "categories[]")
check = "Rubber Ducks"
for category in categories:
if category.get_attribute('checked') == "true":
category.click()
if category.get_attribute('data-name') == check:
category.click()
product_quantity = tab_general.find_element(By.NAME, "quantity")
product_quantity.clear()
product_quantity.send_keys(30)
upload_image_field = tab_general.find_element(By.NAME, "new_images[]")
path = os.path.normcase('tests/day_6/images/dark_blue_duck.png')
absolute_path = os.path.abspath(path)
upload_image_field.send_keys(absolute_path)
# Information tab
information_tab = driver.find_element(By.LINK_TEXT, "Information")
information_tab.click()
active_tab = wait.until(EC.visibility_of_element_located((By.CLASS_NAME, 'active')))
assertTrue(active_tab.text == "Information")
info_form = driver.find_element(By.TAG_NAME, "table")
manufacturer_select = info_form.find_element(By.NAME, "manufacturer_id")
manufacturer = "ACME Corp."
Select(manufacturer_select).select_by_visible_text(manufacturer)
short_description = info_form.find_element(By.NAME, "short_description[en]")
short_description_text = f.text(126)
short_description.send_keys(short_description_text)
description = info_form.find_element(By.CLASS_NAME, "trumbowyg-editor")
description_text = f.text(669)
description.send_keys(description_text)
prices_tab = driver.find_element(By.LINK_TEXT, "Prices")
prices_tab.click()
active_tab = wait.until(EC.visibility_of_element_located((By.CLASS_NAME, 'active')))
assertTrue(active_tab.text == "Prices")
prices_form = driver.find_element(By.TAG_NAME, "table")
purchase_price = prices_form.find_element(By.NAME, "purchase_price")
purchase_price.clear()
purchase_price.send_keys(10)
price = prices_form.find_element(By.NAME, "prices[USD]")
price.clear()
price.send_keys(20)
save = driver.find_element(By.NAME, "save")
save.click()
success_message = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".notice.success")))
assertTrue(success_message.text == "Changes were successfully saved.")
assertTrue(driver.find_element(By.LINK_TEXT, product_name), "Product not exist") |
xyuanmu/XX-Net | python3.8.2/Lib/curses/__init__.py | Python | bsd-2-clause | 3,800 | 0.002105 | """curses
The main package for curses support for Python. Normally used by importing
the package, and perhaps a particular module inside it.
import curses
from curses import textpad
curses.initscr()
...
"""
from _curses import *
import os as _os
import sys as _sys
# Some constants, most notably the ACS_* ones, are only added to the C
# _curses module's dictionary after initscr() is called. (Some
# versions of SGI's curses don't define values for those constants
# until initscr() has been called.) This wrapper function calls the
# underlying C initscr(), and then copies the constants from the
# _curses module to the curses package's dictionary. Don't do 'from
# curses import *' if you'll be needing the ACS_* constants.
def ini | tscr():
import _curses, curses
# we call setupterm() here because it raises an error
# instead of calling exit() in error cases.
setupterm(term=_os.environ.get("TERM", "unknown"),
fd=_sys.__stdout__.fileno())
stdscr = _curses.init | scr()
for key, value in _curses.__dict__.items():
if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
setattr(curses, key, value)
return stdscr
# This is a similar wrapper for start_color(), which adds the COLORS and
# COLOR_PAIRS variables which are only available after start_color() is
# called.
def start_color():
import _curses, curses
retval = _curses.start_color()
if hasattr(_curses, 'COLORS'):
curses.COLORS = _curses.COLORS
if hasattr(_curses, 'COLOR_PAIRS'):
curses.COLOR_PAIRS = _curses.COLOR_PAIRS
return retval
# Import Python has_key() implementation if _curses doesn't contain has_key()
try:
has_key
except NameError:
from .has_key import has_key
# Wrapper for the entire curses-based application. Runs a function which
# should be the rest of your curses-based application. If the application
# raises an exception, wrapper() will restore the terminal to a sane state so
# you can read the resulting traceback.
def wrapper(*args, **kwds):
"""Wrapper function that initializes curses and calls another function,
restoring normal keyboard/screen behavior on error.
The callable object 'func' is then passed the main window 'stdscr'
as its first argument, followed by any other arguments passed to
wrapper().
"""
if args:
func, *args = args
elif 'func' in kwds:
func = kwds.pop('func')
import warnings
warnings.warn("Passing 'func' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
raise TypeError('wrapper expected at least 1 positional argument, '
'got %d' % len(args))
try:
# Initialize curses
stdscr = initscr()
# Turn off echoing of keys, and enter cbreak mode,
# where no buffering is performed on keyboard input
noecho()
cbreak()
# In keypad mode, escape sequences for special keys
# (like the cursor keys) will be interpreted and
# a special value like curses.KEY_LEFT will be returned
stdscr.keypad(1)
# Start color, too. Harmless if the terminal doesn't have
# color; user can test with has_color() later on. The try/catch
# works around a minor bit of over-conscientiousness in the curses
# module -- the error return from C start_color() is ignorable.
try:
start_color()
except:
pass
return func(stdscr, *args, **kwds)
finally:
# Set everything back to normal
if 'stdscr' in locals():
stdscr.keypad(0)
echo()
nocbreak()
endwin()
wrapper.__text_signature__ = '(func, /, *args, **kwds)'
|
plotly/plotly.py | packages/python/plotly/plotly/validators/histogram2dcontour/line/_width.py | Python | mit | 469 | 0.002132 | import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.bas | evalidators.NumberValidator):
def __init__(
self, plotly_name="width", parent_name="histogram2dcontour.line", **kwargs
):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style+colorbars"),
| min=kwargs.pop("min", 0),
**kwargs
)
|
cheekujodhpur/ae320 | a1/eps.py | Python | mit | 112 | 0.008929 | #!/usr/bin/env | python
#To find epsilon of machine
eps = 1.0
while 1.0+eps > 1.0:
eps = | eps/2.0
print eps
|
VVS1864/SAMoCAD | src/grab_object.py | Python | apache-2.0 | 2,345 | 0.029299 | # -*- coding: utf-8; -*-
def lapping2(par, select): #Выделение/снятие выделения рамкой
if par.lappingFlag == True:#Если кончание выделения
par.c.delete(par.rect)#Удалить прямоугольник выделения
par.rect = None
par.ex = par.rectx
par.ey = par.recty
par.ex2 = par.rectx2
par.ey2 = par.recty2
if par.ex<par.ex2:#Пересчет координат для функций канваса
x1=par.ex
x2=par.ex2
if par.ey<par.ey2:
y1=par.ey
y2=par.ey2
else:
y1=par.ey2 |
y2=par.ey
c = par.c.find_overlapping(x1,y1,x2,y2)
par.mass_collektor(c, select)
else:
x1=par.ex2
x2=par.ex
if par.ey<par.ey2:
y1=par.ey
y2=par.ey2
else:
y1=par.ey2
y2=par.ey
c = par.c.find_enclosed(x1,y1,x2,y2)#Получить все объекты, попавшие полностью в рамк | у
par.mass_collektor(c, select)#Добавить полученное в коллекцию
par.colObj()#Пересчитать количество объектов
par.lappingFlag = False
par.c.unbind_class(par.c, "<Motion>")#Вернуть события в исходное состояние
#par.c.tag_bind('sel', "<Button-1>", par.collektor_sel)
#par.c.tag_bind('sel', "<Shift-Button-1>", par.collektor_desel)
par.c.bind_class(par.c,"<Motion>", par.gpriv)
par.c.bind_class(par.master1,"<Return>", par.old_function)
par.dialog.config(text = u'Command:')
else:#Если начало выделения
par.dialog.config(text = u'Select - ending point:')
par.info.config(text = u'Escape - stop')
par.lappingFlag=True
par.rectx=par.priv_coord[0]
par.recty=par.priv_coord[1]
par.set_coord()
#par.c.tag_unbind('sel', "<Button-1>")
#par.c.tag_unbind('sel', "<Shift-Button-1>")
par.c.unbind_class(par.c,"<Motion>")
par.c.bind_class(par.c, "<Motion>", par.resRect)
par.c.unbind_class(par.master1,"<Return>")
|
elyezer/robottelo | tests/foreman/api/test_host.py | Python | gpl-3.0 | 51,434 | 0 | # -*- encoding: utf-8 -*-
"""Unit tests for the ``hosts`` paths.
An API reference can be found here:
http://theforeman.org/api/apidoc/v2/hosts.html
:Requirement: Host
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: API
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_integer, gen_ipaddr, gen_mac, gen_string
from nailgun import client, entities
from requests.exceptions import HTTPError
from six.moves import http_client
from robottelo.api.utils import publish_puppet_module
from robottelo.config import settings
from robottelo.constants import CUSTOM_PUPPET_REPO, ENVIRONMENT
from robottelo.datafactory import (
invalid_interfaces_list,
invalid_values_list,
valid_data_list,
valid_hosts_list,
valid_interfaces_list,
)
from robottelo.decorators import (
bz_bug_is_open,
run_only_on,
stubbed,
tier1,
tier2,
tier3,
)
from robottelo.decorators.func_locker import lock_function
from robottelo.test import APITestCase
class HostTestCase(APITestCase):
"""Tests for ``entities.Host().path()``."""
@classmethod
@lock_function
def setUpClass(cls):
"""Setup common entities."""
super(HostTestCase, cls).setUpClass()
cls.org = entities.Organization().create()
cls.loc = entities.Location(organization=[cls.org]).create()
# Content View and repository related entities
cls.cv = publish_puppet_module(
[{'author': 'robottelo', 'name': 'generic_1'}],
CUSTOM_PUPPET_REPO,
organization_id=cls.org.id
)
cls.env = entities.Environment().search(
query={'search': u'content_view="{0}"'.format(cls.cv.name)}
)[0].read()
cls.lce = entities.LifecycleEnvironment().search(query={
'search': 'name={0} and organization_id={1}'.format(
ENVIRONMENT, cls.org.id)
})[0].read()
cls.puppet_classes = entities.PuppetClass().search(query={
'search': u'name ~ "{0}" and environment = "{1}"'.format(
'generic_1', cls.env.name)
})
# Compute Resource related entities
cls.compresource_libvirt = entities.LibvirtComputeResource(
organization=[cls.org],
location=[cls.loc],
).create()
cls.image = entities.Image(
compute_resource=cls.compresource_libvirt).create()
@run_only_on('sat')
@tier1
def test_positive_get_search(self):
"""GET ``api/v2/hosts`` and specify the ``search`` parameter.
:id: d63f87e5-66e6-4886-8b44-4129259493a6
:expectedresults: HTTP 200 is returned, along with ``search`` term.
:CaseImportance: Critical
"""
query = gen_string('utf8', gen_integer(1, 100))
response = client.get(
entities.Host().path(),
auth=settings.server.get_credentials(),
data={u'search': query},
verify=False,
)
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(response.json()['search'], query)
@run_only_on('sat')
@tier1
def test_positive_get_per_page(self):
"""GET ``api/v2/hosts`` and specify the ``per_page`` parameter.
:id: 9086f41c-b3b9-4af2-b6c4-46b80b4d1cfd
:expectedresults: HTTP 200 is returned, along with per ``per_page``
value.
:CaseImportance: Critical
"""
per_page = gen_integer(1, 1000)
response = client.get(
entities.Host().path(),
auth=settings.server.get_credentials(),
data={u'per_page': per_page},
verify=False,
)
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(response.json()['per_page'], per_page)
@run_only_on('sat')
@tier1
def test_positive_create_with_owner_type(self):
"""Create a host and specify an ``owner_type``.
:id: 9f486875-1f30-4dcb-b7ce-b2cf515c413b
:expectedresults: The host can be read back, and the ``owner_type``
attribute is correct.
:CaseImportance: Critical
"""
for owner_type in ('User', 'Usergroup'):
with self.subTest(owner_type):
if owner_type == 'Usergroup' and bz_bug_is_open(1203865):
continue # instead of skip for compatibility with py.test
host = entities.Host(owner_type=owner_type).create()
self.assertEqual(host.owner_type, owner_type)
@run_only_on('sat')
@tier1
def test_positive_update_owner_type(self):
"""Update a host's ``owner_type``.
:id: b72cd8ef-3a0b-4d2d-94f9-9b64908d699a
:expectedresults: The host's ``owner_type`` attribute is updated as
requested.
:CaseImportance: Critical
"""
host = entities.Host().create()
for owner_type in ('User', 'Usergroup'):
with self.subTest(owner_type):
if owner_type == 'Usergroup' and bz_bug_is_open(1210001):
continue # instead of skip for compatibility with py.test
host.owner_type = owner_type
| host = host.update(['owner_type'])
self.assertEqual(host.owner_type, owner_type)
@run_only_on('sat')
@tier1
def test_positive_create_with_name(self):
"""Create a host with different names and | minimal input parameters
:id: a7c0e8ec-3816-4092-88b1-0324cb271752
:expectedresults: A host is created with expected name
:CaseImportance: Critical
"""
for name in valid_hosts_list():
with self.subTest(name):
host = entities.Host(name=name).create()
self.assertEqual(
host.name,
'{0}.{1}'.format(name, host.domain.read().name)
)
@run_only_on('sat')
@tier1
def test_positive_create_with_ip(self):
"""Create a host with IP address specified
:id: 3f266906-c509-42ce-9b20-def448bf8d86
:expectedresults: A host is created with expected IP address
:CaseImportance: Critical
"""
ip_addr = gen_ipaddr()
host = entities.Host(ip=ip_addr).create()
self.assertEqual(host.ip, ip_addr)
@run_only_on('sat')
@tier2
def test_positive_create_with_hostgroup(self):
"""Create a host with hostgroup specified
:id: 8f9601f9-afd8-4a88-8f28-a5cbc996e805
:expectedresults: A host is created with expected hostgroup assigned
:CaseLevel: Integration
"""
org = entities.Organization().create()
loc = entities.Location(organization=[org]).create()
hostgroup = entities.HostGroup(
location=[loc],
organization=[org],
).create()
host = entities.Host(
hostgroup=hostgroup,
location=loc,
organization=org,
).create()
self.assertEqual(host.hostgroup.read().name, hostgroup.name)
@run_only_on('sat')
@tier2
def test_positive_create_inherit_lce_cv(self):
"""Create a host with hostgroup specified. Make sure host inherited
hostgroup's lifecycle environment and content-view
:id: 229cbdbc-838b-456c-bc6f-4ac895badfbc
:expectedresults: Host's lifecycle environment and content view match
the ones specified in hostgroup
:CaseLevel: Integration
:BZ: 1391656
"""
hostgroup = entities.HostGroup(
content_view=self.cv,
lifecycle_environment=self.lce,
organization=[self.org],
).create()
host = entities.Host(
hostgroup=hostgroup,
organization=self.org,
).create()
self.assertEqual(
host.content_facet_attributes['lifecycle_environment_id'],
hostgroup.lifecycle_environment.id
)
self.assertEqual(
host.content_facet_attributes['content_view_id'],
hostgroup.content_view.id
)
@run_only_on('sat')
@tier1 |
kartoza/geonode | geonode/tasks/tests.py | Python | gpl-3.0 | 1,025 | 0 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.test import TestCase
class TasksTest(TestCase):
"""
Tests geonode.messaging
"""
def setUp(self):
self.adm_un = "admin"
sel | f.a | dm_pw = "admin"
|
mike-tr-adamson/python-driver | tests/integration/standard/test_udts.py | Python | apache-2.0 | 25,583 | 0.002306 | # Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from collections import namedtuple
from functools import partial
from cassandra import InvalidRequest
from cassandra.cluster import Cluster, UserTypeDoesNotExist
from cassandra.query import dict_factory
from cassandra.util import OrderedMap
from tests.integration import get_server_versions, use_singledc, PROTOCOL_VERSION, execute_until_pass
from tests.integration.datatype_utils import update_datatypes, PRIMITIVE_DATATYPES, COLLECTION_TYPES, \
get_sample, get_collection_sample
nested_collection_udt = namedtuple('nested_collection_udt', ['m', 't', 'l', 's'])
nested_collection_udt_nested = namedtuple('nested_collection_udt_nested', ['m', 't', 'l', 's', 'u'])
def setup_module():
use_singledc()
update_datatypes()
class UDTTests(unittest.TestCase):
def setUp(self):
self._cass_version, self._cql_version = get_server_versions()
if self._cass_version < (2, 1, 0):
raise unittest.SkipTest("User Defined Types were introduced in Cassandra 2.1")
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
execute_until_pass(self.session,
"CREATE KEYSPACE udttests WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1'}")
self.cluster.shutdown()
def tearDown(self):
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
execute_until_pass(self.session, "DROP KEYSPACE udttests")
self.cluster.shutdown()
def test_can_insert_unprepared_registered_udts(self):
"""
Test the insertion of unprepared, registered UDTs
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect("udttests")
s.execute("CREATE TYPE user (age int, name text)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
User = namedtuple('user', ('age', 'name'))
c.register_user_type("udttests", "user", User)
s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User(42, 'bob')))
result = s.execute("SELECT b FROM mytable WHERE a=0")
self.assertEqual(1, len(result))
row = result[0]
self.assertEqual(42, row.b.age)
self.assertEqual('bob', row.b.name)
self.assertTrue(type(row.b) is User)
# use the same UDT name in a different keyspace
s.execute("""
CREATE KEYSPACE udt_test_unprepared_registered2
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_unprepared_registered2")
s.execute("CREATE TYPE user (state text, is_cool boolean)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
User = namedtuple('user', ('state', 'is_cool'))
c.register_user_type("udt_test_unprepared_registered2", "user", User)
s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User('Texas', True)))
result = s.execute("SELECT b FROM mytable WHERE a=0")
self.assertEqual(1, len(result))
row = result[0]
self.assertEqual('Texas', row.b.state)
self.assertEqual(True, row.b.is_cool)
self.assertTrue(type(row.b) is User)
c.shutdown()
def test_can_register_udt_before_connecting(self):
"""
Test the registration of UDTs before session creation
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect()
s.execute("""
CREATE KEYSPACE udt_test_register_before_connecting
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_register_before_connecting")
s.execute("CREATE TYPE user (age int, name text)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
s.execute("""
CREATE KEYSPACE udt_test_register_before_connecting2
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_register_before_connecting2")
s.execute("CREATE TYPE user (state text, is_cool boolean)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
# now that types are defined, shutdown and re-create Cluster
c.shutdown()
c = Cluster(protocol_version=PROTOCOL_VERSION)
User1 = namedtuple('user', ('age', 'name'))
User2 = namedtuple('user', ('state', 'is_cool'))
c.register_user_type("udt_test_register_before_connecting", "user", User1)
c.register_user_type("udt_test_register_before_connecting2", "user", User2)
s = c.connect()
s.set_keyspace("udt_test_register_before_connecting")
s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User1(42, 'bob')))
result = s.execute("SELECT b FROM mytable WHERE a=0")
self.assertEqual(1, len(result))
row = result[0]
self.assertEqual(42, row.b.age)
self.assertEqua | l('bob', row.b.name)
self.assertTrue(type(row.b) is User1)
# use the same UDT name in a different keyspace
s.set_keyspace("udt_test_register_before_connecting2")
s.execute("INSERT INTO mytable (a, b) VALUES (%s, %s)", (0, User2('Texas', True)))
result = s.execute("SELECT b FROM mytable WHERE a=0")
| self.assertEqual(1, len(result))
row = result[0]
self.assertEqual('Texas', row.b.state)
self.assertEqual(True, row.b.is_cool)
self.assertTrue(type(row.b) is User2)
c.shutdown()
def test_can_insert_prepared_unregistered_udts(self):
"""
Test the insertion of prepared, unregistered UDTs
"""
c = Cluster(protocol_version=PROTOCOL_VERSION)
s = c.connect("udttests")
s.execute("CREATE TYPE user (age int, name text)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
User = namedtuple('user', ('age', 'name'))
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, User(42, 'bob')))
select = s.prepare("SELECT b FROM mytable WHERE a=?")
result = s.execute(select, (0,))
self.assertEqual(1, len(result))
row = result[0]
self.assertEqual(42, row.b.age)
self.assertEqual('bob', row.b.name)
# use the same UDT name in a different keyspace
s.execute("""
CREATE KEYSPACE udt_test_prepared_unregistered2
WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }
""")
s.set_keyspace("udt_test_prepared_unregistered2")
s.execute("CREATE TYPE user (state text, is_cool boolean)")
s.execute("CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)")
User = namedtuple('user', ('state', 'is_cool'))
insert = s.prepare("INSERT INTO mytable (a, b) VALUES (?, ?)")
s.execute(insert, (0, User('Texas', True)))
select = s.prepare("SELECT b FROM mytable WHERE a=?")
result = s.execute(select, (0,))
self.assertEqual(1, len(result))
row = result[0]
self.assertEqual('Texas', row.b.state)
self.assertEqual(True, row.b.is_cool)
c.shutdown()
def test_can_insert_prepared_registered_udts(self):
"""
Test the insertion of pre |
reuvenderay/PythonProjects | enterInfo.py | Python | mit | 1,339 | 0.003734 | __author__ = 'Reuven'
import sys
class GetInfo():
def __init__(self):
self.u_name = ""
self.age = ""
self.uid = ""
def get_name(self):
while self.u_name.isalpha() is F | alse:
self.u_name = raw_input("Please enter your name:")[:20]
if self.u_name == "exit" or self.u_name == "quit":
sys.exit()
return self.u_name
def get_age(self):
while self.age.isdigit() is False or int(self.age) not in range(1, 120):
self.age = raw_input("Please enter your age:")
if self.age == "exit" or self.age == "quit":
| sys.exit()
return self.age
def get_user_id(self):
while len(self.uid) != 6 or self.uid.isdigit() is False or int(self.uid) not in range(1, 1000000):
self.uid = raw_input("Please enter your User ID:")
if self.uid == "exit" or self.uid == "quit":
sys.exit()
return self.uid
def print_info(self):
name = self.get_name()
age = self.get_age()
uid = self.get_user_id()
print "You are %s, aged %s. next year you will be %d, with user id %s, the next user is %s."\
% (name, age, int(age)+1, uid, int(uid)+1)
if __name__ == "__main__":
get_stuff = GetInfo()
get_stuff.print_info()
|
XiMuYouZi/PythonDemo | Web/zhihu/error/__init__.py | Python | mit | 88 | 0.011364 | from flask import Blueprint
error = Blueprint('error', __name__, )
from . import vie | w
| |
us-ignite/us_ignite | us_ignite/snippets/tests/models_tests.py | Python | bsd-3-clause | 1,160 | 0.000862 | from nose.tools import eq_, ok_
from django.test import TestCase
from us_ignite.snippets.models import Snippet
from us_ignite.snippets.tests import fixtures
class TestSnippetModel(TestCase):
def tearDown(self):
Snippet.objects.all().delete()
def get_instance(self):
data = {
'name': 'Gigabit snippets',
'slug': 'featured',
'url': 'http://us-ignite.org/',
}
return Snippet.objects.create(**data)
def test_instance_is_created_successfully(self):
instance = self.get_instance()
eq_(instance.name, 'Gigabit snippets')
eq_(instance.status, Sn | ippet.DRAFT)
eq_(instance.url, 'http://us-ignite.org/')
eq_(instance.url_text, '')
eq_(instance.body, '')
eq_(instance.image, '')
eq_(instance.is_featured, False)
ok_(instance.created)
ok_(instance.modified)
eq_(instance.slug, 'featured')
ok_(instance.id)
eq_(instance.notes, '')
def test_instance_name_is_used_as_title(self):
| instance = fixtures.get_snippet(name='About page')
eq_(instance.title, 'About page')
|
ardi69/pyload-0.4.10 | pyload/plugin/crypter/LinkSaveIn.py | Python | gpl-3.0 | 707 | 0.015559 | # -*- coding: utf-8 -*-
from pyload.plugin.internal.SimpleDereferer import SimpleDereferer
class LinkSaveIn(SimpleDereferer):
__name = "LinkSaveIn"
__type = "crypt | er"
__version = "2.03"
__pattern = r'https?://(?:www\.)?linksave\.in/\w+'
__config = [("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description = """LinkSave.in decrypter plugin"""
__license = "GPLv3"
__authors = [("Walter Purcaro", "vuolter@gmail.com")]
COOKIES = [("linksave.in", "Linksave_Language", "english")]
OF | FLINE_PATTERN = r'>(Error )?404 -'
|
gustavofonseca/oai-pmh | oaipmh/entities.py | Python | bsd-2-clause | 1,659 | 0.001211 | from collections import namedtuple
RepositoryMeta = namedtuple('RepositoryMeta', '''repositoryName baseURL
protocolVersion adminEmail earliestDatestamp deletedRecord
granularity''')
OAIRequest = namedtuple('OAIRequest', '''verb identifier metadataPrefix set
resumptionToken from_ until''')
MetadataFormat = namedtuple('MetadataFormat', '''metadataPrefix schema
metadataNamespace''')
ResumptionToken = namedtuple('ResumptionToken', '''set from_ until offset count
metadataPrefix''')
"""
Representa um objeto de informação.
É inspirado no modelo de dados Dublin Core conforme definido originalmente em
[RFC2413]. Com exceção de ``repoidentifier`` e ``datestamp``, todos os atributos
são multivalorados, e alguns são listas associativas.
http://dublin | core.org/documents/1999/07/02/dces/
sample = {
'ridentifier': <str>,
'datestamp': <datetime>,
'setspec': <List[str]>,
'title': <List[Tuple[str, s | tr]]>,
'creator': <List[str]>,
'subject': <List[Tuple[str, str]]>,
'description': <List[Tuple[str, str]]>,
'publisher': <List[str]>,
'contributor': <List[str]>,
'date': <List[datetime]>,
'type': <List[str]>,
'format': <List[str]>,
'identifier': <List[str]>,
'source': <List[str]>,
'language': <List[str]>,
'relation': <List[str]>,
'rights': <List[str]>,
},
res = Resource(**sample)
"""
Resource = namedtuple('Resource', '''ridentifier datestamp setspec title
creator subject description publisher contributor date type format
identifier source language relation rights''')
Set = namedtuple('Set', '''setSpec setName''')
|
diofant/diofant | diofant/tests/integrals/test_quadrature.py | Python | bsd-3-clause | 20,443 | 0.000587 | from diofant import Rational
from diofant.integrals.quadrature import (gauss_chebyshev_t, gauss_chebyshev_u,
gauss_gen_laguerre, gauss_hermite,
gauss_jacobi, gauss_laguerre,
gauss_legendre)
__all__ = ()
def test_legendre():
x, w = gauss_legendre(1, 17)
assert [str(r) for r in x] == ['0']
assert [str(r) for r in w] == ['2.0000000000000000']
x, w = gauss_legendre(2, 17)
assert [str(r) for r in x] == ['-0.57735026918962576',
'0.57735026918962576']
assert [str(r) for r in w] == ['1.0000000000000000', '1.0000000000000000']
x, w = gauss_legendre(3, 17)
assert [str(r) for r in x] == ['-0.77459666924148338', '0',
'0.77459666924148338']
assert [str(r) for r in w] == ['0.55555555555555556',
'0.88888888888888889', '0.55555555555555556']
x, w = gauss_legendre(4, 17)
assert [str(r) for r in x] == ['-0.86113631159405258',
'-0.33998104358485626', '0.33998104358485626',
'0.86113631159405258']
assert [str(r) for r in w] == ['0.34785484513745386',
'0.65214515486254614', '0.65214515486254614',
'0.34785484513745386']
def test_legendre_precise():
x, w = gauss_legendre(3, 40)
assert [str(r) for r in x] == \
['-0.7745966692414833770358530799564799221666', '0',
'0.7745966692414833770358530799564799221666']
assert [str(r) for r in w] == \
['0.5555555555555555555555555555555555555556',
'0.8888888888888888888888888888888888888889',
'0.5555555555555555555555555555555555555556']
def test_laguerre():
x, w = gauss_laguerre(1, 17)
assert [str(r) for r in x] == ['1.0000000000000000']
assert [str(r) for r in w] == ['1.0000000000000000']
x, w = gauss_laguerre(2, 17)
assert [str(r) for r in x] == ['0.58578643762690495',
'3.4142135623730950']
assert [str(r) for r in w] == ['0.85355339059327376',
'0.14644660940672624']
x, w = gauss_laguerre(3, 17)
assert [str(r) for r in x] == [
'0.41577455678347908',
'2.2942803602790417',
'6.2899450829374792',
]
assert [str(r) for r in w] == [
'0.71109300992917302',
'0.27851773356924085',
'0.010389256501586136',
]
x, w = gauss_laguerre(4, 17)
assert [str(r) for r in x] == ['0.32254768961939231', '1.7457611011583466',
'4.5366202969211280', '9.3950709123011331']
assert [str(r) for r in w] == ['0.60315410434163360',
'0.35741869243779969', '0.038887908515005384',
'0.00053929470556132745']
x, w = gauss_laguerre(5, 17)
assert [str(r) for r in x] == ['0.26356031971814091', '1.4134030591065168',
'3.5964257710407221', '7.0858100058588376', '12.640800844275783']
assert [str(r) for r in w] == ['0.52175561058280865',
'0.39866681108317593', '0.075942449681707595',
'0.0036117586799220485', '2.3369972385776228e-5']
def test_laguerre_precise():
x, w = gauss_laguerre(3, 40)
assert [str(r) for r in x] == \
['0.4157745567834790833115338731282744735466',
'2.294280360279041719822050361359593868960',
'6.289945082937479196866415765512131657493']
assert [str(r) for r in w] == \
['0.7110930099291730154495901911425944313094',
'0.2785177335692408488014448884567264810349',
'0.01038925650158613574896492040067908765572']
def test_hermite():
x, w = gauss_hermite(1, 17)
assert [str(r) for r in x] == ['0']
assert [str(r) for r in w] == ['1.7724538509055160']
x, w = gauss_hermite(2, 17)
assert [str(r) for r in x] == ['-0.70710678118654752',
'0.70710678118654752']
assert [str(r) for r in w] == ['0.88622692545275801',
'0.88622692545275801']
x, w = gauss_hermite(3, 17)
assert [str(r) for r in x] == [
'-1.2247448713915890',
'0',
'1.2247448713915890']
assert [str(r) for r in | w] == [
'0.29540897515091934',
'1.1816359006036774',
'0.29540897515091934']
x, w = gauss_hermite(4, 17)
assert [str(r) for r in x] == [
'-1.6506801238857846',
'-0.52464762327529032',
'0.52464762327529032',
'1.6 | 506801238857846'
]
assert [str(r) for r in w] == [
'0.081312835447245177',
'0.80491409000551284',
'0.80491409000551284',
'0.081312835447245177'
]
x, w = gauss_hermite(5, 17)
assert [str(r) for r in x] == [
'-2.0201828704560856',
'-0.95857246461381851',
'0',
'0.95857246461381851',
'2.0201828704560856'
]
assert [str(r) for r in w] == [
'0.019953242059045913',
'0.39361932315224116',
'0.94530872048294188',
'0.39361932315224116',
'0.019953242059045913'
]
def test_hermite_precise():
x, w = gauss_hermite(3, 40)
assert [str(r) for r in x] == [
'-1.224744871391589049098642037352945695983',
'0',
'1.224744871391589049098642037352945695983'
]
assert [str(r) for r in w] == [
'0.2954089751509193378830279138901908637996',
'1.181635900603677351532111655560763455198',
'0.2954089751509193378830279138901908637996'
]
def test_gen_laguerre():
x, w = gauss_gen_laguerre(1, Rational(-1, 2), 17)
assert [str(r) for r in x] == ['0.50000000000000000']
assert [str(r) for r in w] == ['1.7724538509055160']
x, w = gauss_gen_laguerre(2, Rational(-1, 2), 17)
assert [str(r) for r in x] == ['0.27525512860841095',
'2.7247448713915890']
assert [str(r) for r in w] == ['1.6098281800110257',
'0.16262567089449035']
x, w = gauss_gen_laguerre(3, Rational(-1, 2), 17)
assert [str(r) for r in x] == ['0.19016350919348813',
'1.7844927485432516',
'5.5253437422632603']
assert [str(r) for r in w] == ['1.4492591904487850',
'0.31413464064571329',
'0.0090600198110176913']
x, w = gauss_gen_laguerre(4, Rational(-1, 2), 17)
assert [str(r) for r in x] == ['0.14530352150331709',
'1.3390972881263614',
'3.9269635013582872',
'8.5886356890120343']
assert [str(r) for r in w] == ['1.3222940251164826',
'0.41560465162978376',
'0.034155966014826951',
'0.00039920814442273524']
x, w = gauss_gen_laguerre(5, Rational(-1, 2), 17)
assert [str(r) for r in x] == ['0.11758132021177814',
'1.0745620124369040',
'3.0859374437175500',
'6.4147297336620305',
'11.807189489971737']
assert [str(r) for r in w] == ['1.2217252674706516',
'0.48027722216462937',
'0.067748788910962126',
'0.0026872914935624654',
'1.5280865710465241e-5']
x, w = gauss_gen_laguerre(1, 2, 17)
assert [str(r) for r in x] == ['3.0000000000000000']
assert [str(r) for r in w] == ['2.0000000000000000']
x, w = gauss_gen_laguerre(2, 2, 17)
assert [str(r) for r in x] == ['2.0000000000000000',
'6.0000000000000000']
assert [str(r) for r in w] == ['1.50000000000000 |
alzeih/ava | ava_core/organize/signals.py | Python | gpl-3.0 | 1,780 | 0.005056 | # Django Imports
from django.db.models.signals import post_save
from django.dispatch import receiver
# Python Imports
from logging import getLogger
# Local Imports
from .models import Person, PersonAttribute, PersonIdentifier, PersonIdentifierAttribute, GroupIdentifier, \
GroupIdentifierAttribute
# Logging
logger = getLogger(__name__)
# Implementation
@receiver(post_save, sender=Person)
def signal_ava_organize_person_post_save(sender, created, instance, **kwargs):
logger.debug('Signal Called'
'- organize::signal_ava_organi | ze_person_post_save')
if created:
logger.debug('Signal Fired'
' - organize::signal_ava_organize_person_post_save')
attribute = PersonAttribute.objects.create(person=instance)
attribute.save()
@receiver(post_save, sender=PersonIdentifier)
def | signal_ava_organize_person_identifier_post_save(sender,created, instance, **kwargs):
logger.debug('Signal Called'
'- organize::signal_ava_organize_person_identifier_post_save')
if created:
logger.debug('Signal Fired'
' - organize::signal_ava_organize_person_identifier_post_save')
attribute = PersonIdentifierAttribute.objects.create(identifier=instance)
attribute.save()
@receiver(post_save, sender=GroupIdentifier)
def signal_ava_organize_group_identifier_post_save(sender,created, instance, **kwargs):
logger.debug('Signal Called'
'- organize::signal_ava_organize_group_identifier_post_save')
if created:
logger.debug('Signal Fired'
' - organize::signal_ava_organize_group_identifier_post_save')
attribute = GroupIdentifierAttribute.objects.create(identifier=instance)
attribute.save()
|
1065865483/0python_script | four/Webdriver/Mouse_action.py | Python | mit | 603 | 0.008696 | from selenium import webdriver
from time import sleep
from selenium.webdriver.common.action_chains import ActionChains
driver = webdriver.Firefox()
driver.get("https://www.baidu.com/")
driver.maximize_window()
driver.find_element_by_id("kw").send_keys("python")
element=driver.find_element_by_id("kw")
#双击操作
ActionChains(dri | ver).double_click(element).perform()
sleep(2)
#右击鼠标操作
ActionChains(driver).context_click(element).perform()
sleep(2)
#鼠标悬停
above=driver.find_element_by_css_selector(".pf")
ActionChains(driver).move_to_element(a | bove).perform()
sleep(2)
driver.quit()
|
torchhound/projects | codeGolf/find1.py | Python | gpl-3.0 | 368 | 0.065217 | #written for | http://codegolf.stackexchange.com/questions/79207/minimize-those-ones
def find1(num):
z = 10
y = 11
while True:
if num%z==0:
if num/2 <= y/2:
print(num)
break
else:
print(num/y+num%y)
break
z += 10
def main():
cases = [0,7,121,72,1000,2016]
for th in range(0,5):
find1(cases[th - 1])
if __name__ == '__main__': |
main()
|
davidszotten/pytest-django | tests/test_django_configurations.py | Python | bsd-3-clause | 3,011 | 0 | """Tests which check the various ways you can set DJANGO_SETTINGS_MODULE
I | f these tests fail you probably forgot to install django-configurations.
"""
import pytest
pytest.importorskip('configurations')
try:
import configurations.importer
configurations
except ImportError as e:
if 'LaxOptionParser' in e.args[0]:
pytest.skip('This version of django-configurations is incompatible wit | h Django: ' # noqa
'https://github.com/jezdez/django-configurations/issues/65') # noqa
BARE_SETTINGS = '''
from configurations import Settings
class MySettings(Settings):
# At least one database must be configured
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
},
}
SECRET_KEY = 'foobar'
'''
def test_dc_env(testdir, monkeypatch):
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'tpkg.settings_env')
monkeypatch.setenv('DJANGO_CONFIGURATION', 'MySettings')
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_env.py')
settings.write(BARE_SETTINGS)
testdir.makepyfile("""
import os
def test_settings():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_env'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
def test_dc_ini(testdir, monkeypatch):
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'tpkg.settings_env')
monkeypatch.setenv('DJANGO_CONFIGURATION', 'MySettings')
testdir.makeini("""
[pytest]
DJANGO_SETTINGS_MODULE = DO_NOT_USE_ini
DJANGO_CONFIGURATION = DO_NOT_USE_ini
""")
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_env.py')
settings.write(BARE_SETTINGS)
testdir.makepyfile("""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_env'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
def test_dc_option(testdir, monkeypatch):
monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'DO_NOT_USE_env')
monkeypatch.setenv('DJANGO_CONFIGURATION', 'DO_NOT_USE_env')
testdir.makeini("""
[pytest]
DJANGO_SETTINGS_MODULE = DO_NOT_USE_ini
DJANGO_CONFIGURATION = DO_NOT_USE_ini
""")
pkg = testdir.mkpydir('tpkg')
settings = pkg.join('settings_opt.py')
settings.write(BARE_SETTINGS)
testdir.makepyfile("""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_opt'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
""")
result = testdir.runpytest('--ds=tpkg.settings_opt', '--dc=MySettings')
result.stdout.fnmatch_lines(['*1 passed*'])
assert result.ret == 0
|
vikraman/gentoostats | server/kwd.py | Python | gpl-3.0 | 1,062 | 0.017891 |
import helpers
from config import render, db
class Keyword(object):
def GET(self):
keyword_count = db.query('SELECT KEYWORD,\
COUNT(DISTINCT IPKEY) AS PACKAGES,\
COUNT(DISTINCT UUID) AS HOSTS\
FROM GLOBAL_KEYWORDS NATURAL JOIN KEYWORDS\
NATURAL JOIN INSTALLED_PACKAGES GROUP BY KEYWORD')
keyword_data = dict()
for t in keyword_count:
keyword_data[t['KEYWORD']] = {'HOSTS':t['HOSTS'], 'PACKAGES':t['PACKAGES']}
if helpers.is_json_request():
return helpers.serialize(keyword_data)
else:
| # generate plot
x_ticklabels = keyword_data.keys()
y_values = [ keyword_data[k]['PACKAGES'] for k in x_ticklabels ]
keyword_plot = helpers.barchart(title = 'Installed packages per keywo | rd',
x_label = 'Keyword', y_label = 'Number of Packages',
x_ticklabels = x_ticklabels, y_values = y_values)
return render.keyword(keyword_data, keyword_plot)
|
guildai/guild | examples/separating-inputs-and-outputs/downstream.py | Python | apache-2.0 | 185 | 0 | import os
# Ensure directory for model.
if not os.path.exists("models"):
os.mkdir("mode | ls")
# Write model.
with open("models/checkpoint.txt", "w") as | f:
f.write("downstream")
|
onkelpit/i3pystatus | i3pystatus/moon.py | Python | mit | 2,598 | 0.000385 | from i3pystatus import IntervalModule, formatp
import datetime
import math
import decimal
import os
from i3pystatus.core.util import TimeWrapper
dec = decimal.Decimal
class MoonPhase(IntervalModule):
"""
Available Formatters
status: Allows for mapping of current moon phase
- New Moon:
- Waxing Crescent:
- First Quarter:
- Waxing Gibbous:
- Full Moon:
- Waning Gibbous:
- Last Quarter:
- Waning Crescent:
"""
settings = (
"for | mat",
("status", "Current moon phase"),
("illum", "Percentage | that is illuminated"),
("color", "Set color"),
)
format = "{illum} {status}"
interval = 60 * 60 * 2 # every 2 hours
status = {
"New Moon": "NM",
"Waxing Crescent": "WaxCres",
"First Quarter": "FQ",
"Waxing Gibbous": "WaxGib",
"Full Moon": "FM",
"Waning Gibbous": "WanGib",
"Last Quarter": "LQ",
"Waning Cresent": "WanCres",
}
color = {
"New Moon": "#00BDE5",
"Waxing Crescent": "#138DD8",
"First Quarter": "#265ECC",
"Waxing Gibbous": "#392FBF",
"Full Moon": "#4C00B3",
"Waning Gibbous": "#871181",
"Last Quarter": "#C32250",
"Waning Crescent": "#FF341F",
}
def pos(now=None):
days_in_second = 86400
now = datetime.datetime.now()
difference = now - datetime.datetime(2001, 1, 1)
days = dec(difference.days) + (dec(difference.seconds) / dec(days_in_second))
lunarCycle = dec("0.20439731") + (days * dec("0.03386319269"))
return lunarCycle % dec(1)
def current_phase(self):
lunarCycle = self.pos()
index = (lunarCycle * dec(8)) + dec("0.5")
index = math.floor(index)
return {
0: "New Moon",
1: "Waxing Crescent",
2: "First Quarter",
3: "Waxing Gibbous",
4: "Full Moon",
5: "Waning Gibbous",
6: "Last Quarter",
7: "Waning Crescent",
}[int(index) & 7]
def illum(self):
phase = 0
lunarCycle = float(self.pos()) * 100
if lunarCycle > 50:
phase = 100 - lunarCycle
else:
phase = lunarCycle * 2
return phase
def run(self):
fdict = {
"status": self.status[self.current_phase()],
"illum": self.illum(),
}
self.output = {
"full_text": formatp(self.format, **fdict),
"color": self.color[self.current_phase()],
}
|
ifuding/Kaggle | SVPC/Code/philly/leak_cols.py | Python | apache-2.0 | 43,679 | 0.007509 | LEAK_LIST = [
['f190486d6', '58e2e02e6', 'eeb9cd3aa', '9fd594eec', '6eef030c1','15ace8c9f', 'fb0f5dbfe', '58e056e12', '20aa07010', '024c577b9','d6bb78916', 'b43a7cfd5', '58232a6fb', '1702b5bf0', '324921c7b', '62e59a501', '2ec5b290f', '241f0f867', 'fb49e4212', '66ace2992','f74e8f13d', '5c6487af1', '963a49cdc', '26fc93eb7', '1931ccfdd', '703885424', '70feb1494', '491b9ee45', '23310aa6f', 'e176a204a','6619d81fc', '1db387535', 'fc99f9426', '91f701ba2', '0572565c2','190db8488', 'adb64ff71', 'c47340d97', 'c5a231d81', '0ff32eb98']
,['e20edfcb8', '842415efb', '300d6c1f1', '720f83290', '069a2c70b', '87a91f998', '611151826', '74507e97f', '504e4b156', 'baa95693d', 'cb4f34014', '5239ceb39', '81e02e0fa', 'dfdf4b580', 'fc9d04cd7', 'fe5d62533', 'bb6260a44', '08d1f69ef', 'b4ced4b7a', '98d90a1d1', 'b6d206324', '6456250f1', '96f5cf98a', 'f7c8c6ad3', 'cc73678bf', '5fb85905d', 'cb71f66af', '212e51bf6', 'd318bea95', 'b70c62d47', '11d86fa6a', '3988d0c5e | ', '42cf36d73', '9f494676e', '1c68ee044', 'a728310c8', '612bf9b47', '105233ed9', 'c18cc7d3d', 'f08c20722']
,['266525925', '4b6dfc880', '2cff4bf0c', 'a3382e205', '6488c8200', '547d3135b', 'b46191036', '453128993', '2 | 599a7eb7', '2fc60d4d9', '009319104', 'de14e7687', 'aa31dd768', '2b54cddfd', 'a67d02050', '37aab1168', '939cc02f5', '31f72667c', '6f951302c', '54723be01', '4681de4fd', '8bd53906a', '435f27009', 'f82167572', 'd428161d9', '9015ac21d', 'ec4dc7883', '22c7b00ef', 'd4cc42c3d', '1351bf96e', '1e8801477', 'b7d59d3b5', 'a459b5f7d', '580f5ff06', '39b3c553a', '1eec37deb', '692c44993', 'ce8ce671e', '88ef1d9a8', 'bf042d928']
,['81de0d45e', '18562fc62', '543c24e33', '0256b6714', 'd6006ff44', '6a323434b', 'e3a38370e', '7c444370b', '8d2d050a2', '9657e51e1', '13f3a3d19', 'b5c839236', '70f3033c6', 'f4b374613', '849125d91', '16b532cdc', '88219c257', '74fb8f14c', 'fd1102929', '699712087', '22501b58e', '9e9274b24', '2c42b0dce', '2c95e6e31', '5263c204d', '526ed2bec', '01f7de15d', 'cdbe394fb', 'adf357c9b', 'd0f65188c', 'b8a716ebf', 'ef1e1fac8', 'a3f2345bf', '110e4132e', '586b23138', '680159bab', 'f1a1562cd', '9f2f1099b', 'bf0e69e55', 'af91c41f0']
,['1d9078f84', '64e483341', 'a75d400b8', '4fe8154c8', '29ab304b9', '20604ed8f', 'bd8f989f1', 'c1b9f4e76', '4824c1e90', '4ead853dc', 'b599b0064', 'd26279f1a', '58ed8fb53', 'ff65215db', '402bb0761', '74d7998d4', 'c7775aabf', '9884166a7', 'beb7f98fd', 'fd99c18b5', 'd83a2b684', '18c35d2ea', '0c8063d63', '400e9303d', 'c976a87ad', '8a088af55', '5f341a818', '5dca793da', 'db147ffca', '762cbd0ab', 'fb5a3097e', '8c0a1fa32', '01005e5de', '47cd6e6e4', 'f58fb412c', 'a1db86e3b', '50e4f96cf', 'f514fdb2e', '7a7da3079', 'bb1113dbb']
,['ced6a7e91', '9df4daa99', '83c3779bf', 'edc84139a', 'f1e0ada11', '73687e512', 'aa164b93b', '342e7eb03', 'cd24eae8a', '8f3740670', '2b2a10857', 'a00adf70e', '3a48a2cd2', 'a396ceeb9', '9280f3d04', 'fec5eaf1a', '5b943716b', '22ed6dba3', '5547d6e11', 'e222309b0', '5d3b81ef8', '1184df5c2', '2288333b4', 'f39074b55', 'a8b721722', '13ee58af1', 'fb387ea33', '4da206d28', 'ea4046b8d', 'ef30f6be5', 'b85fa8b27', '2155f5e16', '794e93ca6', '070f95c99', '939f628a7', '7e814a30d', 'a6e871369', '0dc4d6c7d', 'bc70cbc26', 'aca228668']
,['5030aed26', 'b850c3e18', '212efda42', '9e7c6b515', '2d065b147', '49ca7ff2e', '37c85a274', 'ea5ed6ff7', 'deabe0f4c', 'bae4f747c', 'ca96df1db', '05b0f3e9a', 'eb19e8d63', '235b8beac', '85fe78c6c', 'cc507de6c', 'e0bb9cf0b', '80b14398e', '9ca0eee11', '4933f2e67', 'fe33df1c4', 'e03733f56', '1d00f511a', 'e62cdafcf', '3aad48cda', 'd36ded502', '92b13ebba', 'f30ee55dd', '1f8754c4e', 'db043a30f', 'e75cfcc64', '5d8a55e6d', '6e29e9500', 'c5aa7c575', 'c2cabb902', 'd251ee3b4', '73700eaa4', '8ab6f5695', '54b1c1bc0', 'cbd0256fb']
,['c928b4b74', '8e4d0fe45', '6c0e0801a', '02861e414', 'aac52d8d9', '041c5d0c9', 'd7875bb6c', 'e7c0cfd0f', 'd48c08bda', '0c9462c08', '57dd44c29', 'a93118262', '850027e38', 'db3839ab0', '27461b158', '32174174c', '9306da53f', '95742c2bf', '5831f4c76', '1e6306c7c', '06393096a', '13bdd610a', 'd7d314edc', '9a07d7b1f', '4d2671746', '822e49b95', '3c8a3ced0', '83635fb67', '1857fbccf', 'c4972742d', 'b6c0969a2', 'e78e3031b', '36a9a8479', 'e79e5f72c', '092271eb3', '74d7f2dc3', '277ef93fc', 'b30e932ba', '8f57141ec', '350473311']
,['f1eeb56ae', '62ffce458', '497adaff8', 'ed1d5d137', 'faf7285a1', 'd83da5921', '0231f07ed', '7950f4c11', '051410e3d', '39e1796ab', '2e0148f29', '312832f30', '6f113540d', 'f3ee6ba3c', 'd9fc63fa1', '6a0b386ac', '5747a79a9', '64bf3a12a', 'c110ee2b7', '1bf37b3e2', 'fdd07cac1', '0872fe14d', 'ddef5ad30', '42088cf50', '3519bf4a4', 'a79b1f060', '97cc1b416', 'b2790ef54', '1a7de209c', '2a71f4027', 'f118f693a', '15e8a9331', '0c545307d', '363713112', '73e591019', '21af91e9b', '62a915028', '2ab5a56f5', 'a8ee55662', '316b978cd']
,['48b839509', '2b8851e90', '28f75e1a5', '0e3ef9e8f', '37ac53919', '7ca10e94b', '4b6c549b1', '467aa29ce', '74c5d55dc', '0700acbe1', '44f3640e4', 'e431708ff', '097836097', 'd1fd0b9c2', 'a0453715a', '9e3aea49a', '899dbe405', '525635722', '87a2d8324', 'faf024fa9', 'd421e03fd', '1254b628a', 'a19b05919', '34a4338bc', '08e89cc54', 'a29c9f491', 'a0a8005ca', '62ea662e7', '5fe6867a4', '8b710e161', '7ab926448', 'd04e16aed', '4e5da0e96', 'ff2c9aa8f', 'b625fe55a', '7124d86d9', '215c4d496', 'b6fa5a5fd', '55a7e0643', '0a26a3cfe']
,['7f72c937f', '79e55ef6c', '408d86ce9', '7a1e99f69', '736513d36', '0f07e3775', 'eb5a2cc20', '2b0fc604a', 'aecd09bf5', '91de54e0a', '66891582e', '20ef8d615', '8d4d84ddc', 'dfde54714', '2be024de7', 'd19110e37', 'e637e8faf', '2d6bd8275', 'f3b4de254', '5cebca53f', 'c4255588c', '23c780950', 'bc56b26fd', '55f4891bb', '020a817ab', 'c4592ac16', '542536b93', '37fb8b375', '0a52be28f', 'bd7bea236', '1904ce2ac', '6ae9d58e0', '5b318b659', '25729656f', 'f8ee2386d', '589a5c62a', '64406f348', 'e157b2c72', '0564ff72c', '60d9fc568']
,['3b843ae7e', 'c8438b12d', 'd1b9fc443', '19a45192a', '63509764f', '6b6cd5719', 'b219e3635', '4b1d463d7', '4baa9ff99', 'b0868a049', '3e3ea106e', '043e4971a', 'a2e5adf89', '25e2bcb45', '3ac0589c3', '413bbe772', 'e23508558', 'c1543c985', '2dfea2ff3', '9dcdc2e63', '1f1f641f1', '75795ea0a', 'dff08f7d5', '914d2a395', '00302fe51', 'c0032d792', '9d709da93', 'cb72c1f0b', '5cf7ac69f', '6b1da7278', '47b5abbd6', '26163ffe1', '902c5cd15', '45bc3b302', '5c208a931', 'e88913510', 'e1d6a5347', '38ec5d3bb', 'e3d64fcd7', '199d30938']
,['51c141e64', '0e348d340', '64e010722', '55a763d90', '13b54db14', '01fdd93d3', '1ec48dbe9', 'cf3841208', 'd208491c8', '90b0ed912', '633e0d42e', '9236f7b22', '0824edecb', '71deb9468', '1b55f7f4d', '377a76530', 'c47821260', 'bf45d326d', '69f20fee2', 'd6d63dd07', '5ab3be3e1', '93a31829f', '121d8697e', 'f308f8d9d', '0e44d3981', 'ecdef52b2', 'c69492ae6', '58939b6cc', '3132de0a3', 'a175a9aa4', '7166e3770', 'abbde281d', '23bedadb2', 'd4029c010', 'fd99222ee', 'bd16de4ba', 'fb32c00dc', '12336717c', '2ea42a33b', '50108b5b5']
,['920a04ee2', '93efdb50f', '15ea45005', '78c57d7cd', '91570fb11', 'c5dacc85b', '145c7b018', '590b24ab1', 'c283d4609', 'e8bd579ae', '7298ca1ef', 'ce53d1a35', 'a8f80f111', '2a9fed806', 'feb40ad9f', 'cfd255ee3', '31015eaab', '303572ae2', 'cd15bb515', 'cb5161856', 'a65b73c87', '71d64e3f7', 'ec5fb550f', '4af2493b6', '18b4fa3f5', '3d655b0ed', '5cc9b6615', '88c0ec0a6', '8722f33bb', '5ed0c24d0', '54f26ee08', '04ecdcbb3', 'ade8a5a19', 'd5efae759', 'ac7a97382', 'e1b20c3a6', 'b0fcfeab8', '438b8b599', '43782ef36', 'df69cf626']
,['4302b67ec', '75b663d7d', 'fc4a873e0', '1e9bdf471', '86875d9b0', '8f76eb6e5', '3d71c02f0', '05c9b6799', '26df61cc3', '27a7cc0ca', '9ff21281c', '3ce93a21b', '9f85ae566', '3eefaafea', 'afe8cb696', '72f9c4f40', 'be4729cb7', '8c94b6675', 'ae806420c', '63f493dba', '5374a601b', '5291be544', 'acff85649', '3690f6c26', '26c68cede', '12a00890f', 'dd84964c8', 'a208e54c7', 'fb06e8833', '7de39a7eb', '5fe3acd24', 'e53805953', '3de2a9e0d', '2954498ae', '6c3d38537', '86323e98a', 'b719c867c', '1f8a823f2', '9cc5d1d8f', 'd3fbad629']
,['fec5644cf', 'caa9883f6', '9437d8b64', '68811ba58', 'ef4b87773', 'ff558c2f2', '8d918c64f', '0b8e10df6', '2d6565ce2', '0fe78acfa', 'b75aa754d', '2ab9356a0', '4e86dd8f3', '348aedc21', 'd7568383a', '856856d94', '69900c0d1', '02c21443c', '5190d6dca', '20551fa5b', '79cc300c7', '8d8276242', 'da22ed2b8', '89cebceab', 'f171b61 |
canvasnetworks/canvas | website/canvas/migrations/0104_auto__add_field_userwarning_comment.py | Python | bsd-3-clause | 16,380 | 0.007143 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserWarning.comment'
db.add_column('canvas_userwarning', 'comment', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['canvas.Comment'], null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'UserWarning.comment'
db.delete_column('canvas_userwarning', 'comment_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {})
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_categories'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Co | ntent']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('can | vas.util.UnixTimestampField', [], {'default': '0'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"})
},
'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'modera |
TamiaLab/PySkCode | tests/tests_tags/tests_textmodifiers.py | Python | agpl-3.0 | 3,824 | 0.003138 | """
SkCode text modifiers tag definitions test code.
"""
import unittest
from skcode.etree import RootTreeNode
from skcode.tags import (
LowerCaseTextTreeNode,
UpperCaseTextTreeNode,
CapitalizeTextTreeNode,
DEFAULT_RECOGNIZED_TAGS_LIST
)
from skcode.tags.textmodifiers import TextModifierBaseTreeNode
class CustomTextModifierTreeNode(TextModifierBaseTreeNode):
""" Test class """
text_modifier = 'foobar'
class TextModifierTagsTestCase(unittest.TestCase):
""" Tests suite for text modifier tags module. """
def test_tag_and_aliases_in_default_recognized_tags_dict(self):
""" Test the presence of the tag and aliases in the dictionary of default recognized tags. """
self.assertIn(LowerCaseTextTreeNode, DEFAULT_RECOGNIZED_TAGS_LIST)
self.assertIn(UpperCaseTextTreeNode, DEFAULT_RECOGNIZED_TAGS_LIST)
self.assertIn(CapitalizeTextTreeNode, DEFAULT_RECOGNIZED_TAGS_LIST)
def test_tag_constant_values(self):
""" Test tag constants. """
self.assertFalse(LowerCaseTextTreeNode.newline_closes)
self.assertFalse(LowerCaseTextTreeNode.same_tag_closes)
self.assertFalse(LowerCaseTextTreeNode.weak_parent_close)
self.assertFalse(LowerCaseTextTreeNode.standalone)
self.assertTrue(LowerCaseTextTreeNode.parse_embedded)
self.assertTrue(LowerCaseTextTreeNode.inline)
self.assertFalse(LowerCaseTextTreeNode.close_inlines)
self.assertEqual((), LowerCaseTextTreeNode.alias_tag_names)
self.assertFalse(LowerCaseTextTreeNode.make_paragraphs_here)
self.assertEqual('<span class="text-{text_modifier}">{inner_html}</span>\n',
LowerCaseTextTreeNode.html_render_template)
def test_render_html_lowercase(self):
""" Test the ``render_html`` method. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('lowercase', LowerCaseTextTreeNode)
self.assertEqual('<span class="text-lowercase">test</span>\n', tree_node.render_html('test'))
def test_render_html_uppercase(self):
""" Test the ``render_html`` method. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('uppercase', UpperCaseTextTreeNode)
self.assertEqual('<span class="text-uppercase">test</span>\n', tree_node.render_html('test'))
def test_render_html_capitalize(self):
""" Test the ``render_html`` method. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('capitalize', CapitalizeTextTreeNode)
self.assertEqual('<span class="text-capitalize">test</span>\n', tree_node.render_html('test'))
def test_render_text_lowercase(self):
""" Test the ``render_text`` method. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('lowercase', LowerCaseTextTreeNode)
self.assertEqual('test', tree_node.render_text('teST'))
def test_render_text_uppercase(self):
""" Test the ``render_text`` method. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('uppercase', UpperCaseTextTreeNode)
self.assertEqual('TEST', tree_node.render_text('teST'))
def test_render_text_capitalize(self):
""" Test the ``render_text`` method. """
root_tree_node = RootTreeNode()
tree_node = root_tree_node.new_child('capitalize', CapitalizeTextTreeNode)
self.assertEqual('Test', tree_node.render_text('test'))
def test_default_render_text(self):
""" Test the default behavior of the ``render_text`` method """
root_tree_node = RootTree | Node()
tree_node = root_tree_node.new_child('capit | alize', CustomTextModifierTreeNode)
self.assertEqual('tEst', tree_node.render_text('tEst'))
|
llange/pynag | examples/Plugins/check_load.py | Python | gpl-2.0 | 1,986 | 0.01007 | # check_load.py - Check load average. Thresholds can be specified from the commandline
# Import PluginHelper and some utility constants from the Plugins module
from pynag.Plugins import PluginHelper,ok,warning,critical,unknown
# Create an instance of PluginHelper()
helper = PluginHelper()
# Optionally, let helper handle command-line arguments for us for example --threshold
# Note: If your plugin needs any commandline arguments on its own (like --hostname) you should add them
# before this step with helper.parser.add_option()
helper.parse_arguments()
# Here starts our plugin specific logic. Lets try to read /proc/loadavg
# And if it fails, we exit immediately with UNKNOWN status
try:
content = open('/proc/loadavg').read()
except Exception as e:
helper.exit(summary="Could not read /proc/loadavg", long_output=str(e), exit_code=unknown, perfdata='')
# We have read the contents of loadavg file. Lets put it in the summary of our plugin output:
helper.add_summary("Load: %s" % content)
# Read metrics from /proc/loadavg and add them as performance metrics
load1,load5,load15,processes,last_proc_id = content.split()
running,total = processes.split('/')
# If we so desire we can set default thresholds by adding warn attribute here
# However we decide that there are no thresholds by default and they have to be
# applied on runtime with the --threshold option
helper.add_metric(labe | l='load1',value=load1)
helper.add_metric(label='load5',value=load5)
helper.add_metric(label='load15',value=load15)
helper.add_metric(label='running_processes',value=running)
helper.add_metric(label='total_processes',value=total)
# By default assume everything is ok. Any thresholds specified with --threshold can overwrite this status:
helper.status(ok)
# Here all metrics will be checked against thresholds that are eit | her
# built-in or added via --threshold from the command-line
helper.check_all_metrics()
# Print out plugin information and exit nagios-style
helper.exit()
|
prarthitm/edxplatform | openedx/core/lib/api/permissions.py | Python | agpl-3.0 | 6,295 | 0.002065 | """
API library for Django REST Framework permissions-oriented workflows
"""
from django.conf import settings
from django.http import Http404
from rest_framework import permissions
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from student.roles import CourseStaffRole, CourseInstructorRole
from openedx.core.lib.log_utils import audit_log
class ApiKeyHeaderPermission(permissions.BasePermission):
"""
Django REST Framework permissions class used to manage API Key integrations
"""
def has_permission(self, request, view):
"""
Check for permissions by matching the configured API key and header
If settings.DEBUG is True and settings.EDX_API_KEY is not set or None,
then allow the request. Otherwise, allow the request if and only if
settings.EDX_API_KEY is set and the X-Edx-Api-Key HTTP header is
present in the request and matches the setting.
"""
api_key = getattr(settings, "EDX_API_KEY", None)
if settings.DEBUG and api_key is None:
return True
elif api_key is not None and request.META.get("HTTP_X_EDX_API_KEY") == api_key:
audit_log("ApiKeyHeaderPermission used",
path=request.path,
ip=request.META.get("REMOTE_ADDR"))
return True
return False
class ApiKeyHeaderPermissionIsAuthenticated(ApiKeyHeaderPermission, permissions.IsAuthenticated):
"""
Allow someone to access the view if they have the API key OR they are authenticated.
See ApiKeyHeaderPermission for more information how the API key portion is implemented.
"""
def has_permission(self, request, view):
# TODO We can optimize this later on when we know which of these methods is used more often.
api_permissions = ApiKeyHeaderPermission.has_permission(self, request, view)
is_authenticated_permissions = permissions.IsAuthenticated.has_permission(self, request, view)
return api_permissions or is_authenticated_permissions
class IsUserInUrl(permissions.BasePermission):
"""
Permission that checks to see if the request user matches the user in the URL.
"""
def has_permission(self, request, view):
"""
Returns true if the current request is by the user themselves.
Note: a 404 is returned for non-staff instead of a 403. This is to prevent
users from being able to detect the existence of accounts.
"""
url_username = request.parser_context.get('kwargs', {}).get('username', '')
if request.user.username.lower() != url_username.lower():
if request.user.is_staff:
return False # staff gets 403
raise Http404()
return True
class IsCourseStaffInstructor(permissions.BasePermission):
"""
Permission to check that user is a course instructor or staff of
a master course given a course object or the user is a coach of
the course itself.
"""
def has_object_permission(self, request, view, obj):
return (hasattr(request, 'user') and
# either the user is a staff or instructor of the master course
(hasattr(obj, 'course_id') and
(CourseInstructorRole(obj.course_id).has_user(request.user) or
CourseStaffRole(obj.course_id).has_user(request.user))) or
# or it is a safe method and the user is a coach on the course object
(request.method in permissions.SAFE_METHODS
and hasattr(obj, 'coach') and obj.coach == request.user))
class IsMasterCourseStaffInstructor(permissions.BasePermission):
"""
Permission to check that user is instructor or staff of the master course.
"""
def has_permission(self, request, view):
"""
This method is assuming that a `master_course_id` parameter
is available in the request as a GET parameter, a POST parameter
or it is in the JSON payload included in the request.
The reason is because this permission class is going
to check if the user making the request is an instructor
for the specified course.
"""
master_course_id = (request.GET.get('master_course_id')
or request.POST.get('master_course_id')
or request.data.get('master_course_id'))
if master_course_id is not None:
try:
course_key = CourseKey.from_string(master_course_id)
except InvalidKeyError:
raise Http404()
return (hasattr(request, 'user') and
(CourseInstructorRole(course_key).has_user(request.user) or
CourseStaffRole(course_key).has_user(request.user)))
return False
class IsUserInUrlOrStaff(IsUserInUrl):
"""
Permission that checks to see if the request user matches the user in the URL or has is_staff access.
"""
def has_permission(self, request, view):
if request.user.is_staff:
return True
return super(IsUserInUrlOrStaff, self).has_permission(request, view)
class IsStaffOrReadOnly(permissions.BasePermission): |
"""Permission that checks to see if the user is global or course
staff, permitting only read-only access if they are not.
"""
def has_object_permission(self, request, view, obj):
return (request.user.is_staff or
CourseStaffRole(obj.course_id).has_user(request.user) or
request.method in permissions.SAFE_METHODS)
class IsS | taffOrOwner(permissions.BasePermission):
"""
Permission that allows access to admin users or the owner of an object.
The owner is considered the User object represented by obj.user.
"""
def has_object_permission(self, request, view, obj):
return request.user.is_staff or obj.user == request.user
def has_permission(self, request, view):
user = request.user
return user.is_staff \
or (user.username == request.GET.get('username')) \
or (user.username == getattr(request, 'data', {}).get('username')) \
or (user.username == getattr(view, 'kwargs', {}).get('username'))
|
plotly/plotly.py | packages/python/plotly/plotly/validators/bar/error_x/_valueminus.py | Python | mit | 447 | 0.002237 | import _plotly_utils.base | validators
class ValueminusValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="valueminus", parent_name="bar.error_x", **kwargs):
super(ValueminusValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs
) | |
Abdoctor/behave | examples/async_step/features/steps/async_steps35.py | Python | bsd-2-clause | 373 | 0.002681 | # -- REQUIRES: Python >= 3.5
from behave import step
from behave.api.async_step import async_run_until_complete
impo | rt asyncio
@step('an async-step waits {duration:f} seconds')
@async_run_until_complete
async def step_async_step_waits_seconds_py35(context, duration):
"""Simple example of a coroutine as async-step (in Python 3.5)"" | "
await asyncio.sleep(duration)
|
citassa1985/youtube-dl | youtube_dl/extractor/vimeo.py | Python | unlicense | 28,337 | 0.001871 | # encoding: utf-8
from __future__ import unicode_literals
import json
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_urlparse,
)
from ..utils import (
encode_dict,
ExtractorError,
InAdvancePagedList,
int_or_none,
RegexNotFoundError,
sanitized_Request,
smuggle_url,
std_headers,
unified_strdate,
unsmuggle_url,
urlencode_postdata,
unescapeHTML,
)
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
_LOGIN_URL = 'https://vimeo.com/log_in'
def _login(self):
(username, password) = self._get_login_info()
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
self.report_login()
webpage = self._download_webpage(self._LOGIN_URL, None, False)
token, vuid = self._extract_xsrft_and_vuid(webpage)
data = urlencode_postdata(encode_dict({
'action': 'login',
'email': username,
'password': password,
'service': 'vimeo',
'token': token,
}))
login_request = sanitized_Request(self._LOGIN_URL, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_request.add_header('Referer', self._LOGIN_URL)
self._set_vimeo_cookie('vuid', vuid)
self._download_webpage(login_request, None, False, 'Wrong login info')
def _extract_xsrft_and_vuid(self, webpage):
xsrft = self._search_regex(
r'xsrft\s*[=:]\s*(?P<q>["\'])(?P<xsrft>.+?)(?P=q)',
webpage, 'login token', group='xsrft')
vuid = self._search_regex(
r'["\']vuid["\']\s*:\s*(["\'])(?P<vuid>.+?)\1',
webpage, 'vuid', group='vuid')
return xsrft, vuid
def _set_vimeo_cookie(self, name, value):
self._set_cookie('vimeo.com', name, value)
class VimeoIE(VimeoBaseInfoExtractor):
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
_VALID_URL = r'''(?x)
https?://
(?:(?:www|(?P<player>player))\.)?
vimeo(?P<pro>pro)?\.com/
(?!channels/[^/?#]+/?(?:$|[?#])|album/)
(?:.*?/)?
(?:(?:play_redirect_hls|moogaloop\.swf)\?clip_id=)?
(?:videos?/)?
(?P<id>[0-9]+)
/?(?:[?&].*)?(?:[#].*)?$'''
IE_NAME = 'vimeo'
_TESTS = [
{
'url': 'http://vimeo.com | /56015672#at=0',
'md5': '8879b6cc097e987f02484baf890129e5',
'info_dict': {
'id': '56015672',
'ext': 'mp4',
'title': "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
'description': 'md5:2d3305bad981a06ff79f027f19865021',
'upload_date': '20121220',
| 'uploader_id': 'user7108434',
'uploader': 'Filippo Valsorda',
'duration': 10,
},
},
{
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
'note': 'Vimeo Pro video (#1197)',
'info_dict': {
'id': '68093876',
'ext': 'mp4',
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
'description': 'md5:fd69a7b8d8c34a4e1d2ec2e4afd6ec30',
'duration': 1595,
},
},
{
'url': 'http://player.vimeo.com/video/54469442',
'md5': '619b811a4417aa4abe78dc653becf511',
'note': 'Videos that embed the url in the player page',
'info_dict': {
'id': '54469442',
'ext': 'mp4',
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software 2012',
'uploader': 'The BLN & Business of Software',
'uploader_id': 'theblnbusinessofsoftware',
'duration': 3610,
'description': None,
},
},
{
'url': 'http://vimeo.com/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'note': 'Video protected with password',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
'title': 'youtube-dl password protected test video',
'upload_date': '20130614',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
'description': 'This is "youtube-dl password protected test video" by Jaime Marquínez Ferrándiz on Vimeo, the home for high quality videos and the people\u2026',
},
'params': {
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/channels/keypeele/75629013',
'md5': '2f86a05afe9d7abc0b9126d229bbe15d',
'note': 'Video is freely available via original URL '
'and protected with password when accessed via http://vimeo.com/75629013',
'info_dict': {
'id': '75629013',
'ext': 'mp4',
'title': 'Key & Peele: Terrorist Interrogation',
'description': 'md5:8678b246399b070816b12313e8b4eb5c',
'uploader_id': 'atencio',
'uploader': 'Peter Atencio',
'upload_date': '20130927',
'duration': 187,
},
},
{
'url': 'http://vimeo.com/76979871',
'note': 'Video with subtitles',
'info_dict': {
'id': '76979871',
'ext': 'mp4',
'title': 'The New Vimeo Player (You Know, For Videos)',
'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
'upload_date': '20131015',
'uploader_id': 'staff',
'uploader': 'Vimeo Staff',
'duration': 62,
}
},
{
# from https://www.ouya.tv/game/Pier-Solar-and-the-Great-Architects/
'url': 'https://player.vimeo.com/video/98044508',
'note': 'The js code contains assignments to the same variable as the config',
'info_dict': {
'id': '98044508',
'ext': 'mp4',
'title': 'Pier Solar OUYA Official Trailer',
'uploader': 'Tulio Gonçalves',
'uploader_id': 'user28849593',
},
},
{
'url': 'https://vimeo.com/109815029',
'note': 'Video not completely processed, "failed" seed status',
'only_matching': True,
},
{
'url': 'https://vimeo.com/groups/travelhd/videos/22439234',
'only_matching': True,
},
]
@staticmethod
def _extract_vimeo_url(url, webpage):
# Look for embedded (iframe) Vimeo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/.+?)\1', webpage)
if mobj:
player_url = unescapeHTML(mobj.group('url'))
surl = smuggle_url(player_url, {'Referer': url})
return surl
# Look for embedded (swf embed) Vimeo player
mobj = re.search(
r'<embed[^>]+?src="((?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
if mobj:
return mobj.group(1)
def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get('videopassword', None)
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
token, vuid = self._extract_xsrft_and_vuid(webpage)
|
amol-/tgext.utils | tgext/utils/ajax.py | Python | mit | 414 | 0 | f | rom tg import request, abort, override_template
from tg.decorators import before_validate, before_render
@before_validate
def ajax_only(*args, **kwargs):
if not request.is_xhr:
abort(400)
def ajax_expose(template):
@before_render
def _ajax_expose(*args, **kwargs):
if request.is_xhr:
override_template(request.controller_state.method, template)
return _ajax_e | xpose
|
pmacosta/pexdoc | tests/support/pinspect_support_module_1.py | Python | mit | 5,165 | 0.00213 | # pinspect_support_module_1.py
# Copyright (c) 2013-2019 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0103,C0111,C0411,C0412,C0413,R0201,R0205,R0903,W0212,W0621
from __future__ import print_function
import sys
import pexdoc.exh
import pexdoc.pcontracts
import tests.support.pinspect_support_module_2
def module_enclosing_func(offset): # noqa: D202
"""Test function to see if module-level enclosures are detected."""
def module_closure_func(value):
"""Actual closure function."""
return offset + value
return module_closure_func
def class_enclosing_func():
"""Test function to see if classes within enclosures are detected."""
import tests.support.pinspect_support_module_3
class ClosureClass(object):
"""Actual closure class."""
def __init__(self): # noqa: D401
"""Constructor method."""
self.obj = None
def get_obj(self):
"""Getter method."""
return self.obj
def set_obj(self, obj):
"""Setter method."""
self.obj = obj
def sub_enclosure_method(self): # noqa: D202
"""Test method to see if class of classes are detected."""
class SubClosureClass(object):
"""Actual sub-closure class."""
def __init__(self): # noqa: D401
"""Constructor method."""
self.subobj = None
return SubClosureClass
mobj = sys.modules["tests.support.pinspect_support_module_2"]
obj = property(
mobj.getter_func_for_closure_class,
| set_obj,
tests.support.pinspect_support_module_3.deleter,
| )
return ClosureClass
class ClassWithPropertyDefinedViaLambdaAndEnclosure(object):
"""Class with lambda for property function and enclosed function to define prop."""
def __init__(self): # noqa: D107
self._clsvar = None
clsvar = property(
lambda self: self._clsvar + 10,
tests.support.pinspect_support_module_2.setter_enclosing_func(5),
doc="Class variable property",
)
def dummy_decorator(func): # noqa: D401
"""Dummy property decorator, to test if chained decorators are handled correctly."""
return func
def simple_property_generator(): # noqa: D401,D202
"""Function to test if properties done via enclosed functions properly detected."""
def fget(self):
"""Actual getter function."""
return self._value
return property(fget)
class ClassWithPropertyDefinedViaFunction(object):
"""Class to test if properties defined via property function handled correctly."""
def __init__(self): # noqa: D107
self._state = None
@pexdoc.pcontracts.contract(state=int)
@dummy_decorator
def _setter_func(self, state):
"""Setter method with property defined via property() function."""
exobj = (
pexdoc.exh.get_exh_obj()
if pexdoc.exh.get_exh_obj()
else pexdoc.exh.ExHandle()
)
exobj.add_exception(
exname="dummy_exception_1", extype=ValueError, exmsg="Dummy message 1"
)
exobj.add_exception(
exname="dummy_exception_2", extype=TypeError, exmsg="Dummy message 2"
)
self._state = state
def _getter_func(self):
"""Getter method with property defined via property() function."""
return self._state
def _deleter_func(self): # noqa: D401
"""Deleter method with property defined via property() function."""
print("Cannot delete attribute")
state = property(_getter_func, _setter_func, _deleter_func, doc="State attribute")
import math
class ClassWithPropertyDefinedViaDecorators(object):
"""Class to test if properties defined via decorator functions handled correctly."""
def __init__(self): # noqa: D107
self._value = None
def __call__(self): # noqa: D102
self._value = 2 * self._value if self._value else self._value
@property
def temp(self):
"""Getter method defined with decorator."""
return math.sqrt(self._value)
@temp.setter
@pexdoc.pcontracts.contract(value=int)
def temp(self, value):
"""Setter method defined with decorator."""
self._value = value
@temp.deleter
def temp(self): # noqa: D401
"""Deleter method defined with decorator."""
print("Cannot delete attribute")
encprop = simple_property_generator()
import tests.support.pinspect_support_module_4
def class_namespace_test_enclosing_func():
"""Test namespace support for enclosed class properties."""
# pylint: disable=C0301
class NamespaceTestClosureClass(object): # noqa: D200,D210,D400
r""" Actual class
""" # This is to test a comment after a multi-line docstring
def __init__(self, value):
_, _, _ = (5, 3, 7)
self._value = value
nameprop = (
tests.support.pinspect_support_module_4.another_property_action_enclosing_function()
)
return NamespaceTestClosureClass
|
shaded-enmity/custodia | setup.py | Python | gpl-3.0 | 712 | 0 | #!/usr/bin/python
#
# Copyright (C) 2015 Custodia project Contributors, for licensee see COPYING
from distutils.core import setup
setup(
name='custodia',
version='0.1.0',
license='GPLv3+',
maintainer='Custodia | project Contributors',
maintainer_email='simo@redhat.com',
url='https://github.com/latchset/custodia',
packages=['custodia', 'custodia.httpd', 'custodia.store',
'custodia.message', 'custodia.kubernetes'],
data_files=[('share/man/man7', ["man/custodia.7"]),
('share/doc/custodia', ['LICENSE', 'README', 'API.md']) | ,
('share/doc/custodia/examples', ['custodia.conf']),
],
scripts=['custodia/custodia']
)
|
ufjfeng/leetcode-jf-soln | python/223_rectangle_area.py | Python | mit | 1,071 | 0.014006 | """
Find the total area covered by two rectilinear rectangles in a 2D plane.
Each rectangle is defined by its bottom left | corner and top right corner as
shown in the figure.
![Example layout]
(https://leetcode.com/static/images/problemset/rectangle_area.png)
Assume that the total area is never beyond the maximum possible value of int.
Credits:
Special thanks to @mithmatt for adding this proble | m, creating the above
image and all test cases.
"""
class Solution(object):
def computeArea(self, A, B, C, D, E, F, G, H):
"""
:type A: int
:type B: int
:type C: int
:type D: int
:type E: int
:type F: int
:type G: int
:type H: int
:rtype: int
"""
areaA=(C-A)*(D-B)
areaB=(G-E)*(H-F)
if (G-A)*(E-C)>=0 or (H-B)*(F-D)>=0:
areaO=0
else:
overlapX=min(abs(G-A),abs(C-E),abs(C-A),abs(G-E))
overlapY=min(abs(H-B),abs(D-F),abs(D-B),abs(H-F))
areaO=overlapX*overlapY
return areaA+areaB-areaO
|
zentralopensource/zentral | tests/inventory/test_clean_ip_address.py | Python | apache-2.0 | 814 | 0 | from django.test import SimpleTestCase
from zentral.contrib.inventory.utils import clean_ip_address
class CleanIPAddressTestCase(SimpleTestCase):
def test_ipv4(se | lf):
for value, result in (( | None, None),
(123, None),
("127.0.0.1 ", "127.0.0.1"),
(" 10.12.13.17 ", "10.12.13.17"),
("127.0000.0.1", None)):
self.assertEqual(clean_ip_address(value), result)
def test_ipv6(self):
for value, result in (("0:0:0:0:0:0:0:1", "::1"),
("2001:db8::1", "2001:db8::1"),
("::FFFF:129.144.52.38", "129.144.52.38"),
):
self.assertEqual(clean_ip_address(value), result)
|
pferreir/indico-backup | bin/utils/VSGuideHTMLFix.py | Python | gpl-3.0 | 4,708 | 0.006372 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
""" Simple script to parse and do some corrections HTML exported by the source OpenOffice documents
used to produce the Video Services guides.
It assumes you are using it from indico's bin directory in development mode.
If this isn't right, please change the 'ihelppath' variable and the end of this file.
"""
from HTMLParser import HTMLParser
import htmlentitydefs
import os
class MyHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
def process(self, target):
if not os.path.exists(target):
print 'Could not find file: ' + target
return
self.reset()
self._inStyleTag = False
outName = target + '.tmp'
self._out = file(outName, 'w')
self.feed(file(target).read())
self._out.close()
os.remove(target)
os.rename(outName, target)
self.close()
@classmethod
def _processAttrs(cls, tag, attrs):
attrs = dict(attrs)
if tag.lower() == 'img':
attrs.pop('height','')
attrs.pop('HEIGHT','')
attrs.pop('width','')
attrs.pop('WIDTH','')
if not 'style' in attrs or attrs['style'].find('text-align: center') == -1:
attrs['style'] = attrs.pop('style','') + ";text-align: center;"
if tag.lower() == 'p' and ('align' in attrs and attrs['align'].lower() == 'center' or 'ALIGN' in attrs and attrs['ALIGN'].lower() == 'center'):
attrs.pop('align','')
attrs.pop('ALIGN','')
if not 'style' in attrs or attrs['style'].find('text-align: center') == -1:
attrs['style'] = attrs.pop('style','') + ";text-align: center;"
return tag, attrs
def handle_starttag(self, tag, attrs):
if tag.lower() == 'style':
self._inStyleTag = True
tag, attrs = MyHTMLParser._processAttrs(tag, attrs)
strattrs = "".join([' %s="%s"' % (key, value) for key, value in attrs.iteritems()])
self._out.write("<%s%s>" % (tag, strattrs))
def handle_startendtag(self, tag, attrs):
tag, attrs = MyHTMLParser._processAttrs(tag, attrs)
strattrs = "".join([' %s="%s"' % (key, value) for key, value in attrs])
self._out.write("<%s%s />" % (tag, strattrs))
def handle_endtag(self, tag):
if tag.lower() == 'style':
self._inStyleTag = False
self._out.write("</%s>" % tag)
def handle_data(self, text):
if self._inStyleTag:
iPStyle1 = text.find("P {")
iPStyle2 = text.find("p {")
iPStyle3 = text.find("P{")
iPStyle4 = text.find("p{")
iPStyle = max(iPStyle1, iPStyle2, iPStyle3, iPStyle4)
endIPStyle = text.find('}', iPStyle)
self._out.write(text[:endIPStyle])
if not text[:endIPStyle].endswith(';margin: 0; padding: 0;'):
self._out.write(';margin: 0; padding: 0;')
self._out.write(text[endIPStyle:])
else:
self._out.write("%s" % text)
def handle_comment(self, comment):
self._out.write("<!-- %s -->\n" % comment)
def handle_entit | yref(self, ref):
self._out.write("&%s" % ref)
if htmlentitydefs.entitydefs.has_key(ref):
self._out.write(";")
def handle_charref(self, ref):
self._out.write("&#%s;" % ref)
def handle_pi(self, text):
self._out.write("<?%s>" % text)
def handle_decl(self, text):
self._out.write("<!%s>" % text)
if __name__ == "__main__":
p = MyHTMLParser()
ihelpPath = "../../indico/htdocs/ihelp/"
p.proc | ess(ihelpPath + "VideoServices/IndicoUserGuide_VS/index.html")
p.process(ihelpPath + "VideoServices/EventManagerUserGuide_VS/index.html")
p.process(ihelpPath + "VideoServices/ServerAdminUserGuide_VS/index.html")
p.process(ihelpPath + "VideoServices/VSAdminUserGuide_VS/index.html")
|
luterien/madcyoa | main/views.py | Python | apache-2.0 | 321 | 0.015576 | from django.sho | rtcuts import render, redirect
from django.core.urlresolvers import reverse
def test(request):
return render(request, "test.html", {"data": "unknown!"})
def index(request):
if request.user.is_authenticated():
return redirect(reverse("my-stories")) |
return render(request, "main/index.html", {})
|
odrling/peony-twitter | docs/conf.py | Python | mit | 10,839 | 0 | # -*- coding: utf-8 -*-
#
# Peony documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 30 16:36:34 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
import inspect
import os
import pathlib
import re
import sys
conf_py = pathlib.Path(inspect.getfile(inspect.currentframe())).absolute()
docs = conf_py.parent
maindir = docs.parent
sys.path.insert(0, str(maindir))
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon'
]
rtd = "https://%s.readthedocs.io/en/stable"
python_docs = "https://docs.python.org/3"
intersphinx_mapping = {'python': (python_docs, None),
'aiohttp': (rtd % "aiohttp", None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Peony'
copyright = '2016-2017, Florian Badie'
author = 'odrling'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
init = maindir / "peony" / "__init__.py"
with init.open() as stream:
ex = r'__version__\s*=\s*?[\"\']([^\"\']*)'
match = re.search(ex, stream.read())
version = match.group(1)
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# on_rtd is whether we are on readthedocs.org, this line of code
# grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need
# to specify it
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Peony v0.2.2'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a
# favicon of the docs. This file should be a Windows icon file (.ico)
# being 16x16 or 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_s | earch_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `ji | eba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scor |
sorig/shogun | examples/undocumented/python/modelselection_grid_search_krr.py | Python | bsd-3-clause | 4,752 | 0.016204 | #!/usr/bin/env python
#
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Heiko Strathmann
from numpy import array
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat,2.1,1,1e-5,1e-2], \
[traindat,testdat,label_traindat,2.1,1,1e-5,1e-2]]
def modelselection_grid_search_krr (fm_train=traindat,fm_test=testdat,label_train=label_traindat,\
width=2.1,C=1,epsilon=1e-5,tube_epsilon=1e-2):
from shogun import CrossValidation, CrossValidationResult
from shogun import MeanSquaredError
from shogun import CrossValidationSplitting
from shogun import RegressionLabels
from shogun import RealFeatures
from shogun import KernelRidgeRegression
from shogun import GridSearchModelSelection
from shogun import ModelSelectionParameters
# training data
features_train=RealFeatures(traindat)
features_test=RealFeatures(testdat)
labels=RegressionLabels(label_traindat)
# labels
labels=RegressionLabels(label_train)
# predictor, set tau=0 here, doesnt matter
predictor=KernelRidgeRegression()
# splitting strategy for 5 fold cross-validation (for classification its better
# to use "StratifiedCrossValidation", but the standard
# "StratifiedCrossValidationSplitting" is also available
splitting_strategy=CrossValidationSplitting(labels, 5)
# evaluation method
evaluation_criterium=MeanSquaredError()
# cross-validation instance
cross_validation=CrossValidation(predictor, features_train, labels,
splitting_strategy, evaluation_criterium)
# (optional) repeat x-val (set larger to get better estimates)
cross_validation.set_num_runs(2)
# print all parameter available for modelselection
# Dont worry if yours is not included but, write to the mailing list
#predictor.print_modsel_params()
# build parameter tree to select regularization parameter
param_tree_root=create_param_tree()
# model selection instance
model_selection=GridSearchModelSelection(cross_validation, param_tree_root)
# perform model selection with selected methods
#print "performing model selection of"
#print "parameter tree:"
#param_tree_root.print_tree()
#print "starting model selection"
# print the current parameter combination, if no parameter nothing is printed
print_state=False
best_parameters=model_selection.select_model(print_state)
# print best parameters
#print "best parameters:"
#best_parameters.print_tree()
# apply them and print result
best_parameters.apply_to_machine(predictor)
result=cross_validation.evaluate()
#print "mean:", result.mean
# creates all the parameters to optimize
def create_param_tree():
from shogun import ModelSelectionParameters, R_EXP, R_LINEAR
from shogun import ParameterCombination
from shogun import GaussianKernel, PolyKernel
import math
root=ModelSelectionParameters()
tau=ModelSelectionParameters("tau")
root.append_child(tau)
# also R_LINEAR/R_LOG is available | as type
min=-1
max=1
type=R_EXP
step=1.5
base=2
tau.build_values(min, max, type, step, base)
# gaussian kernel with width
gaussian_kernel=GaussianKernel()
# pr | int all parameter available for modelselection
# Dont worry if yours is not included but, write to the mailing list
#gaussian_kernel.print_modsel_params()
param_gaussian_kernel=ModelSelectionParameters("kernel", gaussian_kernel)
gaussian_kernel_width=ModelSelectionParameters("log_width");
gaussian_kernel_width.build_values(2.0*math.log(2.0), 2.5*math.log(2.0), R_LINEAR, 1.0)
param_gaussian_kernel.append_child(gaussian_kernel_width)
root.append_child(param_gaussian_kernel)
# polynomial kernel with degree
poly_kernel=PolyKernel()
# print all parameter available for modelselection
# Dont worry if yours is not included but, write to the mailing list
#poly_kernel.print_modsel_params()
param_poly_kernel=ModelSelectionParameters("kernel", poly_kernel)
root.append_child(param_poly_kernel)
# note that integers are used here
param_poly_kernel_degree=ModelSelectionParameters("degree")
param_poly_kernel_degree.build_values(1, 2, R_LINEAR)
param_poly_kernel.append_child(param_poly_kernel_degree)
return root
if __name__=='__main__':
print('ModelselectionGridSearchKRR')
modelselection_grid_search_krr(*parameter_list[0])
|
nCoda/macOS | .eggs/py2app-0.14-py2.7.egg/py2app/bootstrap/setup_pkgresource.py | Python | gpl-3.0 | 453 | 0 | def _setup_pkgresources():
import pkg_resources
import os
import plistlib
pl = plistlib.readPlist(os.path.join(
os.path.dirname(os.getenv('RESOURCEPATH')), "Info.plist"))
app | name = pl.get('CFBundleIdentifier')
if appname is None:
appname = pl['CFBundleDisplayName']
path = os.path.expanduser('~/Library/Caches/%s/python-eggs' % (appname,))
pkg_reso | urces.set_extraction_path(path)
_setup_pkgresources()
|
collects/VTK | Examples/Infovis/Python/random3d.py | Python | bsd-3-clause | 847 | 0.004723 | from vtk import *
source = vtkRandomGraphSource()
source.SetNumberOfVertices(150)
source.SetEdgeProbability(0.01)
source.SetUseEdgeProbability(True)
source.SetStartWithTree(True)
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(source.GetOutputPort())
view.SetVertexLabelArrayName("vertex id")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("vertex id")
view.SetColorVertices(True)
view.SetLayoutStrategyToSpanTree()
view.SetInteractionModeT | o3D() # Left mouse button causes 3D rotate instead of zoom
view.SetLabelPlacementModeToNoOverlap()
theme = vtkViewTheme.CreateMellowTheme()
theme.SetCellColor(.2,.2,.6)
theme.SetLineWidth(2)
theme.SetPointSize(10)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view | .GetInteractor().Start()
|
helixyte/everest_nosql | everest_nosql/tests/test_aggregates.py | Python | mit | 1,812 | 0.003311 | """
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Nov 27, 2013.
"""
from everest_nosql.aggregate import NoSqlAggregate
import pytest
from everest.querying.specifications import desc
from everest.querying.specifications import eq
from everest.querying.specifications import gt
from everest.tests.complete_app.interfaces import IMyEntity
from everest.tests.test_aggregates import BaseTestRootAggregate
__docformat__ = 'reStructuredText en'
__all__ = ['TestNosSqlRootAggregate',
]
class Fixtures(object):
ent0 = lambda entity_tree_fac: entity_tree_fac(id=0, text='222')
ent1 = lambda entity_tree_fac: entity_tree_fac(id=1, text='111')
ent2 = lambda entity_tree_fac: entity_tree_fac(id=2, text='000')
@pytest.mark.usefixtures('nosql')
class TestNosSqlRootAggregate(BaseTestRootAggregate):
config_file_name = 'everest_nosql.tests:configure.zcml'
agg_class = NoSqlAggregate
def test_nested_attribute(self, class_entity_repo, ent0, ent1, ent2):
| agg = class_entity_repo.get_aggregate(IMyEntity)
agg.add(ent0)
| agg.add(ent1)
agg.add(ent2)
assert len(list(agg.iterator())) == 3
agg.filter = eq(**{'parent.text_ent':'222'})
assert len(list(agg.iterator())) == 1
agg.filter = None
assert len(list(agg.iterator())) == 3
# TODO: Nested attribute ordering does not work with NoSQL.
agg.order = desc('id')
assert next(agg.iterator()) is ent2
# With nested filter and order.
agg.filter = gt(**{'parent.text_ent':'000'})
assert next(agg.iterator()) is ent1
# With nested filter, order, and slice.
agg.slice = slice(1, 2)
assert next(agg.iterator()) is ent0
|
chapmanbe/pymitools | pymitools/ontologies/metadataCollector.py | Python | apache-2.0 | 8,067 | 0 | """Includes MetadataCollector class.
Responsible for keeping track of when widget values change or are selected.
"""
import ipywidgets as widgets
from IPython.display import display
import requests
class MetadataCollector:
"""Handle information inside the widgets."""
def __init__(self, topic, ontologies, required=False, value_changed=None,
bioportal_api_key='efa3babf-b23c-4399-89f7-689bb9d576fb'):
"""Provide bioportal key, create widgets.
Create (but not display) needed widgets. If the topic is required
before upload, highlight the text box red.
:param topic: The topic name to be associated with the key words
:param ontolgies: The ontolgies to be searched.
:param required: Whether or not the topic is required to have at least
one key word added before upload.
:param value_changed: Callback which is called everytime the first word
is added to an empty added words widget or when
the last word is removed from the added words
widget.
:param bioportal_api_key: The key used to access the bioportal REST API
"""
self._topic = topic
self._required = required
self._ontologies = ontologies
self._value_changed = value_changed
self._color = 'red'
# self._results_in | fo stores keywords as keys and bioportal
# results as values
| self._results_info = dict()
# self._final_results stores only the info for added words
self._final_results = dict()
self._selected = None
self._ready = False
results_name = topic + " results:"
self._search_input_widget = widgets.Text(description=topic,
value='', width='49%')
self._search_results_widget = widgets.Select(description=results_name,
options=[],
width='300')
self._added_word_widget = widgets.Select(description='selected words:',
options=[],
width='300')
self._add_button = widgets.Button(description='add', width='100%',
disabled=True)
self._remove_button = widgets.Button(description='remove',
width='100%',
disabled=True)
if required:
self._search_input_widget.background_color = self._color
search_contains = [self._search_input_widget]
search_container = widgets.HBox(children=search_contains)
button_contains = [self._add_button, self._remove_button]
button_container = widgets.VBox(children=button_contains)
bottom_contains = [self._search_results_widget, button_container,
self._added_word_widget]
bottom_container = widgets.HBox(children=bottom_contains)
self._container = widgets.VBox(children=[search_container,
bottom_container])
self._api_url = 'http://data.bioontology.org/'
self._key = bioportal_api_key
self._headers = {'Authorization': 'apikey token=' + self._key}
def GET(self, url, params=None):
"""Convenient method for requests.get().
Headers already included in call. JSON response data is returned.
:param url: The website to access JSON data from.
:param params: Parameters for the REST request.
"""
request = requests.get(url, headers=self._headers, params=params)
return request.json()
def is_required(self):
"""Return whether the field is required or not before upload."""
return self._required
def get_topic(self):
"""Return the topic that is used for the widgets."""
return self._topic
def display(self):
"""Display the 5 widgets to be used for the topic(s)."""
display(self._container)
self._search_input_widget.observe(self.__search_value_changed,
names='value')
self._search_results_widget.observe(self.__results_value_change,
names='value')
self._add_button.on_click(self.__add_button_click)
self._remove_button.on_click(self.__remove_button_click)
self._added_word_widget.observe(self.__selected_value_change,
names='value')
def has_results(self):
"""Check if there are words in the added words widget."""
return self._ready
def get_results(self):
"""Return the final dictionary results.
The dictionary keys are the key words in the added words widget, and
the values are the responses from bioportal.
"""
return self._final_results
def __search(self, search_term, ontologies):
"""Search specified ontologies using bioportals REST API.
Returns list of suggested keywords and a dictionary with the
keywords as keys and bioportal response data as values.
:param searchTerm: The term to search bioportal with.
:param ontologies: A list of ontology IDs to search.
"""
parameters = {'ontologies': ontologies,
'suggest': 'true', 'pagesize': 15}
search = self._api_url + 'search?q=' + search_term
data = self.GET(search, params=parameters)
nameList = []
nameDict = {}
if "collection" in data:
collection = data["collection"]
else:
return (nameList, nameDict)
for d in collection:
nameDict[d["prefLabel"]] = d
nameList.append(d["prefLabel"])
return (nameList, nameDict)
def __search_value_changed(self, change):
new_keyword = change['new'].strip()
if new_keyword:
keywords, info = self.__search(new_keyword, self._ontologies)
if len(keywords) == 0:
temp = ['NO RESULTS FOUND']
self._search_results_widget.options = temp
self._add_button.disabled = True
else:
self._search_results_widget.options = keywords
self._results_info = info
self._add_button.disabled = False
else:
temp = []
self._search_results_widget.options = temp
self._selected = None
self._add_button.disabled = True
def __results_value_change(self, change):
self._selected = change['new']
def __selected_value_change(self, change):
self._remove_button.disabled = False
def __add_button_click(self, change):
tmp = self._selected
tmp2 = self._added_word_widget.value
self._final_results[tmp] = self._results_info[tmp]
added_words = self._added_word_widget.options
if self._selected not in added_words:
if (tmp == tmp2):
self._remove_button.disabled = False
added_words.append(self._selected)
self._added_word_widget.options = added_words
self._ready = True
# Execute value change delegate
if self._value_changed:
self._value_changed()
def __remove_button_click(self, change):
selected = self._added_word_widget.value
added_words = self._added_word_widget.options
added_words.remove(selected)
self._added_word_widget.options = added_words
self._final_results.pop(selected, None)
if len(self._final_results) == 0:
self._ready = False
# Execute value change delegate
if self._value_changed:
self._value_changed()
self._remove_button.disabled = True
|
BytesGalore/RIOT | tests/pkg_tinycbor/tests/01-run.py | Python | lgpl-2.1 | 474 | 0 | #!/usr/bin/env py | thon3
# Copyright (C) 2016 Kaspar Schleiser <kaspar@schleiser.de>
# Copyright (C) 2016 Takuo Yonezawa <Yonezawa-T2@mail.dnp.co.jp>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
from testrunner import run
def testfunc(child):
child.expect(r"OK \(1 tests\)")
if __name__ == "_ | _main__":
sys.exit(run(testfunc))
|
cncdnua/cncdnua | cncdnua/cncdnua/urls.py | Python | mit | 405 | 0.002469 | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autod | iscover()
urlpatterns = patterns | (
'',
url(r'', include('frontpage.urls')),
url(r'^auth/', include('social.apps.django_app.urls', namespace='social')),
url(r'^admin/', include(admin.site.urls)),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}, name='logout'),
)
|
yoga30696/Coursera-Using-databases-with-python | Week 3/tracks/tracks.py | Python | gpl-3.0 | 2,704 | 0.011834 | import xml.etree.ElementTree as ET
import sqlite3
conn = sqlite3.connect('trackdb.sqlite')
cur = conn.cursor()
# Make some fresh tables using executescript()
cur.executescript('''
DROP TABLE IF EXISTS Artist;
DROP TABLE IF EXISTS Album;
DROP TABLE IF EXISTS Track;
DROP TABLE IF EXISTS Genre;
CREATE TABLE Artist (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Genre (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name | TEXT UNIQUE
);
CREATE TABLE Album (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
artist_id INTEGER,
title TEXT UNIQUE
);
CREATE TABLE Track (
id INTEGER NOT NULL PRIMARY KEY
AUTOINCREMENT UNIQUE,
title TEXT UNIQUE,
album_id INTEGER,
genre_id INTEGER,
| len INTEGER, rating INTEGER, count INTEGER
);
''')
fname = raw_input('Enter file name: ')
if ( len(fname) < 1 ) : fname = 'Library.xml'
# <key>Track ID</key><integer>369</integer>
# <key>Name</key><string>Another One Bites The Dust</string>
# <key>Artist</key><string>Queen</string>
def lookup(d, key):
found = False
for child in d:
if found : return child.text
if child.tag == 'key' and child.text == key :
found = True
return None
stuff = ET.parse(fname)
all = stuff.findall('dict/dict/dict')
print 'Dict count:', len(all)
for entry in all:
if ( lookup(entry, 'Track ID') is None ) : continue
name = lookup(entry, 'Name')
artist = lookup(entry, 'Artist')
album = lookup(entry, 'Album')
count = lookup(entry, 'Play Count')
rating = lookup(entry, 'Rating')
genre = lookup(entry, 'Genre')
length = lookup(entry, 'Total Time')
if name is None or artist is None or album is None or genre is None :
continue
print name, artist, album, count, rating, length, genre
cur.execute('''INSERT OR IGNORE INTO Artist (name)
VALUES ( ? )''', ( artist, ) )
cur.execute('SELECT id FROM Artist WHERE name = ? ', (artist, ))
artist_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Album (title, artist_id)
VALUES ( ?, ? )''', ( album, artist_id ) )
cur.execute('SELECT id FROM Album WHERE title = ? ', (album, ))
album_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Genre (name)
VALUES ( ? )''', ( genre, ) )
cur.execute('SELECT id FROM Genre WHERE name = ? ', (genre, ))
genre_id = cur.fetchone()[0]
cur.execute('''INSERT OR REPLACE INTO Track
(title, album_id, genre_id, len, rating, count)
VALUES ( ?, ?, ?, ?, ?, ? )''',
( name, album_id, genre_id, length, rating, count ) )
conn.commit()
|
kevindkeogh/qbootstrapper-flask | qbflask/__init__.py | Python | mit | 401 | 0.002494 | #!/usr/bin/python3
'''qbootstraper main application
'''
from flask import Flask
from flask_wtf import csrf
app = Flask(__name__)
c | srf.CSRFProtect().init_app(app) # enable CSRF protection
from qbflask.views import *
app.config.update(dict(
DATABASE='qbflask.db',
DEBUG=True,
SECRET_KEY='secret',
| USERNAME='admin',
PASSWORD='admin'
))
if __name__ == '__main__':
app.run()
|
grimmjow8/ansible | test/units/executor/test_play_iterator.py | Python | gpl-3.0 | 18,831 | 0.001062 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# A | nsible is free softwa | re: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.play_iterator import HostState, PlayIterator
from ansible.playbook import Playbook
from ansible.playbook.task import Task
from ansible.playbook.play_context import PlayContext
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
class TestPlayIterator(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_host_state(self):
hs = HostState(blocks=[x for x in range(0, 10)])
hs.tasks_child_state = HostState(blocks=[0])
hs.rescue_child_state = HostState(blocks=[1])
hs.always_child_state = HostState(blocks=[2])
hs.__repr__()
hs.run_state = 100
hs.__repr__()
hs.fail_state = 15
hs.__repr__()
for i in range(0, 10):
hs.cur_block = i
self.assertEqual(hs.get_current_block(), i)
new_hs = hs.copy()
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_play_iterator(self):
#import epdb; epdb.st()
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: false
roles:
- test_role
pre_tasks:
- debug: msg="this is a pre_task"
tasks:
- debug: msg="this is a regular task"
- block:
- debug: msg="this is a block task"
- block:
- debug: msg="this is a sub-block in a block"
rescue:
- debug: msg="this is a rescue task"
- block:
- debug: msg="this is a sub-block in a rescue"
always:
- debug: msg="this is an always task"
- block:
- debug: msg="this is a sub-block in an always"
post_tasks:
- debug: msg="this is a post_task"
""",
'/etc/ansible/roles/test_role/tasks/main.yml': """
- name: role task
debug: msg="this is a role task"
- block:
- name: role block task
debug: msg="inside block in role"
always:
- name: role always task
debug: msg="always task in block in role"
- include: foo.yml
- name: role task after include
debug: msg="after include in role"
- block:
- name: starting role nested block 1
debug:
- block:
- name: role nested block 1 task 1
debug:
- name: role nested block 1 task 2
debug:
- name: role nested block 1 task 3
debug:
- name: end of role nested block 1
debug:
- name: starting role nested block 2
debug:
- block:
- name: role nested block 2 task 1
debug:
- name: role nested block 2 task 2
debug:
- name: role nested block 2 task 3
debug:
- name: end of role nested block 2
debug:
""",
'/etc/ansible/roles/test_role/tasks/foo.yml': """
- name: role included task
debug: msg="this is task in an include from a role"
"""
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
mock_var_manager._fact_cache['host00'] = dict()
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# lookup up an original task
target_task = p._entries[0].tasks[0].block[0]
task_copy = target_task.copy(exclude_parent=True)
found_task = itr.get_original_task(hosts[0], task_copy)
self.assertEqual(target_task, found_task)
bad_task = Task()
found_task = itr.get_original_task(hosts[0], bad_task)
self.assertIsNone(found_task)
# pre task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# role task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.name, "role task")
self.assertIsNotNone(task._role)
# role block task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role block task")
self.assertIsNotNone(task._role)
# role block always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role always task")
self.assertIsNotNone(task._role)
# role include task
#(host_state, task) = itr.get_next_task_for_host(hosts[0])
#self.assertIsNotNone(task)
#self.assertEqual(task.action, 'debug')
#self.assertEqual(task.name, "role included task")
#self.assertIsNotNone(task._role)
# role task after include
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role task after include")
self.assertIsNotNone(task._role)
# role nested block tasks
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "starting role nested block 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 1 task 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 1 task 2")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested b |
unterweg/peanoclaw | supermucIBMMPIConfiguration.py | Python | bsd-3-clause | 431 | 0.018561 | #
# Definitions for compiling with IBM-MPI on SuperMUC
#
def getMPIIncludes():
return ['/opt/ibmhpc/pecurrent/mpich2/intel/include64']
def g | etMPILibrarypaths():
return ['/opt/ibmhpc/pecurrent/mpich2/intel/lib64', '/opt/ibmhpc/pecurrent/mpich2/../pempi/intel/lib64', '/opt/ibmhpc/pecurrent/ppe.pami/intel/lib64/pami64']
def getMPILibraries():
return ['cxxmpich', 'pthread', 'mpich', 'opa', 'mpl', 'dl', | 'poe', 'pami']
|
rozofs/rozofs | tests/IT/IT.py | Python | gpl-2.0 | 44,593 | 0.047294 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os.path
import subprocess
import time
import re
import shlex
from display_array import *
from optparse import OptionParser
fileSize=int(4)
loop=int(32)
process=int(8)
EXPOR | T_SID_NB=int(8)
STORCLI_SID_NB=int(8)
nbGruyere=int(256)
stopOnFailure=Tru | e
fuseTrace=False
DEFAULT_MNT="mnt1_1_g0"
ALL_MNT="mnt1_1_g0,mnt2_1_g0,mnt3_1_g0,mnt4_1_g0"
mnts=DEFAULT_MNT
mnt=""
DEFAULT_RETRIES=int(20)
tst_file="tst_file"
device_number=""
mapper_modulo=""
mapper_redundancy=""
vid=""
list_cid=[]
list_sid=[]
list_host=[]
hunic=[]
inverse=2
forward=3
safe=4
#___________________________________________________
def my_duration (val):
#___________________________________________________
hour=val/3600
min=val%3600
sec=min%60
min=min/60
return "%2d:%2.2d:%2.2d"%(hour,min,sec)
#___________________________________________________
def reset_counters():
# Use debug interface to reset profilers and some counters
#___________________________________________________
return
#___________________________________________________
def get_all_mount_points():
#___________________________________________________
global ALL_MNT
string="df"
parsed = shlex.split(string)
cmd = subprocess.Popen(parsed, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ALL_MNT=""
for line in cmd.stdout:
if line.split()[0] != "rozofs":
continue
mount=line.split()[5]
mount=mount.split('/')
mount=mount[len(mount)-1]
if ALL_MNT == "":
ALL_MNT=mount
else:
ALL_MNT=ALL_MNT+','+mount
#___________________________________________________
def get_device_numbers(hid,cid):
# Use debug interface to get the number of sid from exportd
#___________________________________________________
device_number=1
mapper_modulo=1
mapper_redundancy=1
storio_name="storio:0"
string="./build/src/rozodiag/rozodiag -i localhost%d -T storaged -c storio"%(hid)
parsed = shlex.split(string)
cmd = subprocess.Popen(parsed, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in cmd.stdout:
if "mode" in line:
if "multiple" in line:
storio_name="storio:%d"%(cid)
break;
string="./build/src/rozodiag/rozodiag -i localhost%d -T %s -c device"%(hid,storio_name)
parsed = shlex.split(string)
cmd = subprocess.Popen(parsed, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in cmd.stdout:
if "device_number" in line:
device_number=line.split()[2]
if "mapper_modulo" in line:
mapper_modulo=line.split()[2]
if "mapper_redundancy" in line:
mapper_redundancy=line.split()[2]
return device_number,mapper_modulo,mapper_redundancy
#___________________________________________________
def get_sid_nb():
# Use debug interface to get the number of sid from exportd
#___________________________________________________
global list_cid
global list_sid
global list_host
global hunic
inst=get_rozofmount_instance()
string="./build/src/rozodiag/rozodiag -T mount:%d:1 -c storaged_status"%(inst)
parsed = shlex.split(string)
cmd = subprocess.Popen(parsed, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
storcli_sid=int(0)
for line in cmd.stdout:
if "UP" in line or "DOWN" in line:
storcli_sid=storcli_sid+1
words=line.split()
list_cid.append(int(words[0]))
list_sid.append(int(words[2]))
list_host.append(int(words[4].split('localhost')[1]))
hunic=[]
for h in list_host:
if h not in hunic:
hunic.append(h)
string="./build/src/rozodiag/rozodiag -T export -c vfstat_stor"
parsed = shlex.split(string)
cmd = subprocess.Popen(parsed, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
export_sid=int(0)
for line in cmd.stdout:
if len(line.split()) == 0:
continue
if line.split()[0] != vid:
continue;
if "UP" in line or "DOWN" in line:
export_sid=export_sid+1
return export_sid,storcli_sid
#___________________________________________________
def reset_storcli_counter():
# Use debug interface to get the number of sid from exportd
#___________________________________________________
inst=get_rozofmount_instance()
string="./build/src/rozodiag/rozodiag -T mount:%d:1 -c counter reset"%(inst)
parsed = shlex.split(string)
cmd = subprocess.Popen(parsed, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#___________________________________________________
def check_storcli_crc():
# Use debug interface to get the number of sid from exportd
#___________________________________________________
inst=get_rozofmount_instance()
string="./build/src/rozodiag/rozodiag -T mount:%d:1 -c profile"%(inst)
parsed = shlex.split(string)
cmd = subprocess.Popen(parsed, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in cmd.stdout:
if "read_blk_crc" in line:
return True
return False
#___________________________________________________
def get_layout():
# Get the inverse forward and safe values
#___________________________________________________
global inverse, forward, safe
inst=get_rozofmount_instance()
string="./build/src/rozodiag/rozodiag -T mount:%d -c layout"%(inst)
parsed = shlex.split(string)
cmd = subprocess.Popen(parsed, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in cmd.stdout:
if "LAYOUT_" in line:
words=line.split()
layout=words[0]
values=layout.split('_')
inverse=values[1]
forward=values[2]
safe=values[3]
return
#___________________________________________________
def export_count_sid_up ():
# Use debug interface to count the number of sid up
# seen from the export.
#___________________________________________________
global vid
string="./build/src/rozodiag/rozodiag -T export -c vfstat_stor"
parsed = shlex.split(string)
cmd = subprocess.Popen(parsed, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
match=int(0)
for line in cmd.stdout:
if len(line.split()) == 0:
continue
if line.split()[0] != vid:
continue
if "UP" in line:
match=match+1
return match
#___________________________________________________
def get_volume_id ():
p = subprocess.Popen(["attr","-g","rozofs",mnt], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.stdout:
if "VID" in line:
return line.split()[2]
return -1
#___________________________________________________
def get_rozofmount_instance ():
p = subprocess.Popen(["ps","-ef"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for proc in p.stdout:
if not "rozofsmount/rozofsmount" in proc:
continue
for words in proc.split():
if mnt in words.split("/"):
for opt in proc.split(" "):
if opt.startswith("instance="):
instance=opt.split("instance=")[1]
return int(instance)
print "Instance of %s not found !!!\n"%(mnt)
exit(-1)
#___________________________________________________
def get_site_number ():
inst=get_rozofmount_instance()
string="./build/src/rozodiag/rozodiag -T mount:%d -c start_config"%(inst)
parsed = shlex.split(string)
cmd = subprocess.Popen(parsed, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in cmd.stdout:
words=line.split('=')
if words[0].strip() == "running_site":
return int(words[1])
return 0
#___________________________________________________
def storcli_count_sid_available ():
# Use debug interface to count the number of sid
# available seen from the storcli.
#___________________________________________________
inst=get_rozofmount_instance()
string="./build/src/rozodiag/rozodiag -T mount:%d:1 -c storaged_status"%(inst)
parsed = shlex.split(string)
cmd = subprocess.Popen(parsed, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Looking for state=UP and selectable=YES
match=int(0)
for line in cmd.stdout:
words=line.split('|')
if len(words) >= 11:
if 'YES' in words[6] and 'UP' in words[4]:
match=match+1
return match
#__________________________________________ |
gudnithor4/ThrounHugb | plot.py | Python | mit | 1,664 | 0.006707 | import math
import matplotlib.pyplot as plt
import numpy
import reiknivelar
def plot_framtidarvirdi(eign, timabil, vextir):
vaxtabrot = vextir/100.0
plotData = []
fig1 = plt.figure()
for manudur in range(0,timabil+1):
plotData.append(eign*(math.pow((1+vaxtabrot), manudur)))
plt.plot(plotData, linewidth=2, color='r')
plt.ylabel(u"Krónur")
plt.xlabel(u"Ár")
fig1.canvas.set_window_title(u'Framtíðarvirði')
plt.sho | w()
def plot_reglulegurspar(greidsla, timabil, vextir):
v = vextir/100.0
vex = v/12.0
plotData = []
fig2 = plt.figure()
for manudur in range(0, timabil+1):
plotD | ata.append((greidsla / vex)*((math.pow((1 + vex), manudur)) - 1))
plt.plot(plotData,linewidth=2, color='r')
plt.ylabel(u"Krónur")
plt.xlabel(u"Mánuðir")
fig2.canvas.set_window_title('Reglulegur sparnaður')
plt.show()
def plot_hofudstols_ryrnun(hofudstoll, timabil, vextir, verdbolga, manadarGreidsla, fjoldiManadarGreidslna):
plotData1 = reiknivelar.hofudstols_ryrnun_an_sparnadar(hofudstoll, timabil, vextir, verdbolga)
plotData2 = reiknivelar.hofudstols_ryrnun_med_sparnadi(hofudstoll, timabil, vextir, verdbolga, manadarGreidsla, fjoldiManadarGreidslna)
fig3 = plt.figure()
line1, = plt.plot(plotData1, linewidth=2, color='r')
line2, = plt.plot(plotData2, linewidth=2, color='b')
plt.ylabel(u"Krónur")
plt.xlabel(u"Mánuðir")
fig3.canvas.set_window_title("Greiðsla af láni")
fig3.legend([line1, line2], [u'Niðugreiðsla án sparnaðar', u'Niðurgreiðsla með sparnaði'], bbox_to_anchor=[0.5, 0.955], loc='center', ncol=2)
plt.show() |
Meuh-Factory/womoobox | settings.py | Python | gpl-2.0 | 490 | 0.006122 | # Configuration
# Generate key with specific length and chars
import string
KEY_LENGTH = 50
KEY_REF_SETS = string.ascii_letters + string.digits
# When getting last Moos to init map
MAX_NUMBER_OF_INITIAL_MOO = 25
# When getting last Moos from last call
MAX_NUMBER_OF_MOO = 25
# Do not accept more than 1 (same animal) moo every X minutes
MIN_DURATION_BETWEEN_MOO = 1 # mi | nutes
# Supported languages
SUPPORTED_LANGUAGES = (
'fr',
| 'en',
)
DEFAULT_LANGUAGE = SUPPORTED_LANGUAGES[1] |
mostaphaRoudsari/Honeybee | src/Honeybee_EnergyPlus NoMass Opaque Material.py | Python | gpl-3.0 | 5,463 | 0.011715 | #
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2020, Mostapha Sadeghipour Roudsari <mostapha@ladybug.tools>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to create a custom opaque material that has no mass, which can be plugged into the "Honeybee_EnergyPlus Construction" component.
_
It is important to note that this component creates a material with no mass and, because of this, the accuracy of the component is not as great as a material that has mass. However, this component is very useful if you only have an R-value for a material (or a construction) and you know that the mass is relatively small.
_
If you want to create a material that accounts for mass, you should use the "Honeybee_EnergyPlus Window Material" component.
-
Provided by Honeybee 0.0.66
Args:
_name: A text name for your NoMass Opaque Material.
_roughness_: A text value that indicated the roughness of your material. This can be either "VeryRough", "Rough", "MediumRough", "MediumSmooth", "Smooth", and "VerySmooth". The default is set to "Rough".
_R_Value: A number representing the R-Value of the material in m2-K/W.
_thermAbsp_: An number between 0 and 1 that represents the thermal abstorptance of the material. The default is set to 0.9, which is common for most non-metallic materials.
_solAbsp_: An number between 0 and 1 that represents the abstorptance of solar radiation by the material. The default is set to 0.7, which is common for most non-metallic materials.
_visAbsp_: An number between 0 and 1 that represents the abstorptance of visible light by the material. The default is set to 0.7, which is common for most non-metallic materials.
Returns:
EPMaterial: A no-mass opaque material that can be plugged into the "Honeybee_EnergyPlus Construction" component.
"""
ghenv.Component.Name = "Honeybee_EnergyPlus NoMass Opaque Material"
ghenv.Component.NickName = 'EPNoMassMat'
ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "HB-Legacy"
ghenv.Component.SubCategory = "06 | Energy | Material | Construction"
#compatibleHBVersion = VER 0.0.56\nFEB_01_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "1"
except: pass
import Grasshopper.Kernel as gh
w = gh.GH_RuntimeMessageLevel.Warning
def checkInputs():
#Check to be sure that SHGC and VT are between 0 and 1.
checkData = True
def checkBtwZeroAndOne(variable, default, variableName):
if variable == None: newVariable = default
else:
if variable <= 1 and variable >= 0: newVariable = variable
else:
newVariable = 0
checkData = False
warning = variableName + " must be between 0 and 1."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
return newVariable
thermAbs = checkBtwZeroAndOne(_thermAbsp_, None, "_thermAbsp_")
solAbsp = checkBtwZeroAndOne(_solAbsp_, None, "_solAbsp_")
visAbsp = checkBtwZeroAndOne(_visAbsp_, None, "_visAbsp_")
#Check the Roughness value.
| if _roughness_ != None: _roughness = _roughness_.upper()
else: _roughness = None
if _roughness == None or _roughness == "VERYROUGH" or _roughness == "ROUGH" or _roughness == "MEDIUMROUGH" or _roughness == "MEDIUMSMOOTH" or _roughness == "SMOOTH" or _roughness == "VERYSMOOTH": pass
else:
checkData = | False
warning = "_roughness_ is not valid."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
return checkData
def main(name, roughness, R_Value, thermAbsp, solAbsp, visAbsp):
if roughness == None: roughness = "Rough"
if thermAbsp == None: thermAbsp = 0.9
if solAbsp == None: solAbsp = 0.7
if visAbsp == None: visAbsp = 0.7
values = [name.upper(), roughness, R_Value, thermAbsp, solAbsp, visAbsp]
comments = ["Name", "Roughness", "Thermal Resistance {m2-K/W}", "Thermal Absorptance", "Solar Absorptance", "Visible Absorptance"]
materialStr = "Material:NoMass,\n"
for count, (value, comment) in enumerate(zip(values, comments)):
if count!= len(values) - 1:
materialStr += str(value) + ", !" + str(comment) + "\n"
else:
materialStr += str(value) + "; !" + str(comment)
return materialStr
if _name and _R_Value:
checkData = checkInputs()
if checkData == True:
EPMaterial = main(_name, _roughness_, _R_Value, _thermAbsp_, _solAbsp_, _visAbsp_) |
jimporter/bfg9000 | test/integration/languages/test_java.py | Python | bsd-3-clause | 2,704 | 0 | import glob
import os
from .. import *
@skip_if('java' not in test_features, 'skipping java tests')
@skip_if_backend('msbuild')
class TestJava(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'java'), install=True,
*args, **kwargs)
def test_build(self):
self.build('program.jar')
for i in glob.glob('*.class*'):
os.remove(i)
self.assertOutput(['java', '-jar', 'program.jar'],
'hello from java!\n')
def test_install(self):
self.build('install')
self.assertDirectory(self.installdir, [
os.path.join(self.libdir, 'program.jar'),
])
os.chd | ir(self.srcdir)
cleandir(self.builddir)
self.assertOutput(
['java', '-jar', os.path.join(self.libdir, 'program.jar')],
'hello from java!\n'
)
@skip_if('gcj' not in test_features, 'skipping gcj tests')
class TestGcj(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'java'),
extra_env={'JAVAC': os.getenv('GCJ', | 'gcj')},
*args, **kwargs)
def test_build(self):
self.build('program')
self.assertOutput([executable('program')], 'hello from java!\n')
@skip_if('java' not in test_features, 'skipping java tests')
@skip_if_backend('msbuild')
class TestJavaLibrary(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join('languages', 'java_library'),
install=True, *args, **kwargs)
def test_build(self):
self.build('program.jar')
for i in glob.glob('*.class*'):
os.remove(i)
self.assertOutput(['java', '-jar', 'program.jar'],
'hello from library!\n')
def test_install(self):
self.build('install')
self.assertDirectory(self.installdir, [
os.path.join(self.libdir, 'lib.jar'),
os.path.join(self.libdir, 'program.jar'),
])
os.chdir(self.srcdir)
cleandir(self.builddir)
self.assertOutput(
['java', '-jar', os.path.join(self.libdir, 'program.jar')],
'hello from library!\n'
)
def test_package(self):
self.build('install')
self.configure(
srcdir=os.path.join('languages', 'java_package'), installdir=None,
extra_env={'CLASSPATH': os.path.join(self.libdir, '*')}
)
self.build()
self.assertOutput(['java', '-jar', 'program.jar'],
'hello from library!\n')
|
mattpitkin/corner.py | corner/tests/test_hist2d.py | Python | bsd-2-clause | 2,349 | 0 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as pl
from m | atplotlib.testing.decorators import image_comparison
import corner
def _run_hist2d(nm, N=50000, seed=1234, **kwargs):
| # Generate some fake data.
np.random.seed(seed)
x = np.random.randn(N)
y = np.random.randn(N)
fig, ax = pl.subplots(1, 1, figsize=(8, 8))
corner.hist2d(x, y, ax=ax, **kwargs)
@image_comparison(baseline_images=["cutoff"], extensions=["png"])
def test_cutoff():
_run_hist2d("cutoff", range=[(0, 4), (0, 2.5)])
@image_comparison(baseline_images=["cutoff2"], extensions=["png"])
def test_cutoff2():
_run_hist2d("cutoff2", range=[(-4, 4), (-0.1, 0.1)], N=100000,
fill_contours=True, smooth=1)
@image_comparison(baseline_images=["basic"], extensions=["png"])
def test_basic():
_run_hist2d("basic")
@image_comparison(baseline_images=["color"], extensions=["png"])
def test_color():
_run_hist2d("color", color="g")
@image_comparison(baseline_images=["levels1"], extensions=["png"])
def test_levels1():
_run_hist2d("levels1", levels=[0.68, 0.95])
@image_comparison(baseline_images=["levels2"], extensions=["png"])
def test_levels2():
_run_hist2d("levels2", levels=[0.5, 0.75])
@image_comparison(baseline_images=["filled"], extensions=["png"])
def test_filled():
_run_hist2d("filled", fill_contours=True)
@image_comparison(baseline_images=["smooth1"], extensions=["png"])
def test_smooth1():
_run_hist2d("smooth1", bins=50)
@image_comparison(baseline_images=["smooth2"], extensions=["png"])
def test_smooth2():
_run_hist2d("smooth2", bins=50, smooth=(1.0, 1.5))
@image_comparison(baseline_images=["philsplot"], extensions=["png"])
def test_philsplot():
_run_hist2d("philsplot", plot_datapoints=False, fill_contours=True,
levels=[0.68, 0.95], color="g", bins=50, smooth=1.)
@image_comparison(baseline_images=["lowN"], extensions=["png"])
def test_lowN():
_run_hist2d("lowN", N=20)
@image_comparison(baseline_images=["lowNfilled"], extensions=["png"])
def test_lowNfilled():
_run_hist2d("lowNfilled", N=20, fill_contours=True)
@image_comparison(baseline_images=["lowNnofill"], extensions=["png"])
def test_lowNnofill():
_run_hist2d("lowNnofill", N=20, no_fill_contours=True)
|
kingvuplus/rr | lib/python/Plugins/Extensions/DVDBurn/ProjectSettings.py | Python | gpl-2.0 | 11,215 | 0.027463 | from Screens.Screen import Screen
from Screens.ChoiceBox import ChoiceBox
from Screens.InputBox import InputBox
from Screens.MessageBox import MessageBox
from Screens.HelpMenu import HelpableScreen
from Components.ActionMap import HelpableActionMap, ActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Components.Sources.P | rogress import Progress
from Components.FileList import FileList
from Tools.Directories import fileExists, resolveFilename, SCOPE_PLUGINS, SCOPE_FONTS, SCOPE_HDD
from Components.config import config, getConfigListEntry
from Components.ConfigList import ConfigListScreen
class FileBrowser(Screen, HelpableScreen):
def __init__(self, session, scope, configRef):
Screen.__init__(self, session)
# for the skin: first try FileBrowser_DVDBurn, then FileBrowse | r, this allows individual skinning
self.skinName = ["FileBrowser_DVDBurn", "FileBrowser" ]
HelpableScreen.__init__(self)
self.scope = scope
pattern = ""
self.configRef = configRef
currDir = "/"
if self.scope == "project":
currDir = self.getDir()
pattern = "(?i)^.*\.(ddvdp\.xml)"
elif self.scope == "menutemplate":
currDir = self.getDir()
pattern = "(?i)^.*\.(ddvdm\.xml)"
if self.scope == "menubg":
currDir = self.getDir(configRef.getValue())
pattern = "(?i)^.*\.(jpeg|jpg|jpe|png|bmp)"
elif self.scope == "menuaudio":
currDir = self.getDir(configRef.getValue())
pattern = "(?i)^.*\.(mp2|m2a|ac3)"
elif self.scope == "vmgm":
currDir = self.getDir(configRef.getValue())
pattern = "(?i)^.*\.(mpg|mpeg)"
elif self.scope == "font_face":
currDir = self.getDir(configRef.getValue(), resolveFilename(SCOPE_FONTS))
pattern = "(?i)^.*\.(ttf)"
elif self.scope == "isopath":
currDir = configRef.getValue()
elif self.scope == "image":
currDir = resolveFilename(SCOPE_HDD)
pattern = "(?i)^.*\.(iso)"
self.filelist = FileList(currDir, matchingPattern=pattern)
self["filelist"] = self.filelist
self["FilelistActions"] = ActionMap(["SetupActions"],
{
"save": self.ok,
"ok": self.ok,
"cancel": self.exit
})
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("DVD file browser"))
def getDir(self, currentVal=None, defaultDir=None):
if currentVal:
return (currentVal.rstrip("/").rsplit("/",1))[0]
return defaultDir or (resolveFilename(SCOPE_PLUGINS)+"Extensions/DVDBurn/")
def ok(self):
if self.filelist.canDescent():
self.filelist.descent()
if self.scope == "image":
path = self["filelist"].getCurrentDirectory() or ""
if fileExists(path+"VIDEO_TS"):
self.close(path,self.scope,self.configRef)
else:
ret = self["filelist"].getCurrentDirectory() + '/' + self["filelist"].getFilename()
self.close(ret,self.scope,self.configRef)
def exit(self):
if self.scope == "isopath":
self.close(self["filelist"].getCurrentDirectory(),self.scope,self.configRef)
self.close(None,False,None)
class ProjectSettings(Screen,ConfigListScreen):
skin = """
<screen name="ProjectSettings" position="center,center" size="560,440" title="Collection settings" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="config" position="5,50" size="550,276" scrollbarMode="showOnDemand" />
<ePixmap pixmap="skin_default/div-h.png" position="0,350" zPosition="1" size="560,2" />
<widget source="info" render="Label" position="10,360" size="550,80" font="Regular;18" halign="center" valign="center" />
</screen>"""
def __init__(self, session, project = None):
Screen.__init__(self, session)
self.project = project
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText(_("Load"))
if config.usage.setup_level.index >= 2: # expert+
self["key_blue"] = StaticText(_("Save"))
else:
self["key_blue"] = StaticText()
if config.usage.setup_level.index >= 2: # expert+
infotext = _("Available format variables") + ":\n$i=" + _("Track") + ", $t=" + _("Title") + ", $d=" + _("Description") + ", $l=" + _("length") + ", $c=" + _("chapters") + ",\n" + _("Record") + " $T=" + _("Begin time") + ", $Y=" + _("Year") + ", $M=" + _("month") + ", $D=" + _("day") + ",\n$A=" + _("audio tracks") + ", $C=" + _("Channel") + ", $f=" + _("filename")
else:
infotext = ""
self["info"] = StaticText(infotext)
self.keydict = {}
self.settings = project.settings
ConfigListScreen.__init__(self, [])
self.initConfigList()
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.exit,
"red": self.cancel,
"blue": self.saveProject,
"yellow": self.loadProject,
"cancel": self.cancel,
"ok": self.ok,
}, -2)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Collection settings"))
def changedConfigList(self):
key = self.keydict[self["config"].getCurrent()[1]]
if key == "authormode" or key == "output":
self.initConfigList()
def initConfigList(self):
authormode = self.settings.authormode.getValue()
output = self.settings.output.getValue()
self.list = []
self.list.append(getConfigListEntry(_("Collection name"), self.settings.name))
self.list.append(getConfigListEntry(_("Authoring mode"), self.settings.authormode))
self.list.append(getConfigListEntry(_("Output"), self.settings.output))
if output == "iso":
self.list.append(getConfigListEntry(_("ISO path"), self.settings.isopath))
if authormode.startswith("menu"):
self.list.append(getConfigListEntry(_("Menu")+' '+_("template file"), self.settings.menutemplate))
if config.usage.setup_level.index >= 2: # expert+
self.list.append(getConfigListEntry(_("Menu")+' '+_("Title"), self.project.menutemplate.settings.titleformat))
self.list.append(getConfigListEntry(_("Menu")+' '+_("Subtitles"), self.project.menutemplate.settings.subtitleformat))
self.list.append(getConfigListEntry(_("Menu")+' '+_("background image"), self.project.menutemplate.settings.menubg))
self.list.append(getConfigListEntry(_("Menu")+' '+_("Language selection"), self.project.menutemplate.settings.menulang))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("headline")+' '+_("color"), self.settings.color_headline))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("text")+' '+_("color"), self.settings.color_button))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("highlighted button")+' '+_("color"), self.settings.color_highlight))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("font face"), self.settings.font_face))
#self.list.append(getConfigListEntry(_("Font size")+' ('+_("headline")+', '+_("Title")+', '+_("Subtitles")+')', self.settings.font_size))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("spaces (top, between rows, left)"), self.settings.space))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("Audio"), self.settings.menuaudio))
if config.usage.setup_level.in |
iwm911/plaso | plaso/parsers/sqlite_plugins/interface.py | Python | apache-2.0 | 10,505 | 0.006092 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a SQLite parser."""
import logging
import os
import tempfile
from plaso.lib import errors
from plaso.lib import plugin
import pytz
import sqlite3
class SQLiteCache(plugin.BasePluginCache):
"""A cache storing query results for SQLite plugins."""
def CacheQueryResults(
self, sql_results, attribute_name, key_name, values):
"""Build a dict object based on a SQL command.
This function will take a SQL command, execute it and for
each resulting row it will store a key in a dictionary.
An example:
sql_results = A SQL result object after executing the
SQL command: 'SELECT foo, bla, bar FROM my_table'
attribute_name = 'all_the_things'
key_name = 'foo'
values = ['bla', 'bar']
Results from running this against the database:
'first', 'stuff', 'things'
'second', 'another stuff', 'another thing'
This will result in a dict object being created in the
cache, called 'all_the_things' and it will contain the following value:
all_the_things = {
'first': ['stuff', 'things'],
'second': ['another_stuff', 'another_thing']}
Args:
sql_results: The SQL result object (sqlite.Cursor) after executing
a SQL command on the database.
attribute_name: The attribute name in the cache to store
results to. This will be the name of the
dict attribute.
key_name: The name of the result field that should be used
as a key in the resulting dict that is created.
values: A list of result fields that are stored as values
to the dict. If this list has only one value in it
the value will be stored directly, otherwise the value
will be a list containing the extracted results based
on the names provided in this list.
"""
setattr(self, attribute_name, {})
attribute = getattr(self, attribute_name)
row = sql_results.fetchone()
while row:
if len(values) == 1:
attribute[row[key_name]] = row[values[0]]
else:
attribute[row[key_name]] = []
for value in values:
attribute[row[key_name]].append(row[value])
row = sql_results.fetchone()
class SQLitePlugin(plugin.BasePlugin):
"""A SQLite plugin for Plaso."""
__abstract = True
NAME = 'sqlite'
# Queries to be executed.
# Should be a list of tuples with two entries, SQLCommand and callback
# function name.
QUERIES = []
# List of tables that should be present in the database, for verification.
REQUIRED_TABLES = frozenset([])
def __init__(self, pre_obj):
"""Initialize the database plugin."""
super(SQLitePlugin, self).__init__(pre_obj)
self.zone = getattr(self._knowledge_base, 'zone', pytz.utc)
def Process(self, cache=None, database=None, **kwargs):
"""Determine if this is the right plugin for this database.
This function takes a SQLiteDatabase object and compares the list
of required tables against the available tables in the database.
If all the tables defined in REQUIRED_TABLES are present in the
database then this plugin is considered to be the correct plugin
and the function will return back a generator that yields event
objects.
Args:
cache: A SQLiteCache object.
database: A database object (instance of SQLiteDatabase).
Returns:
A generator that yields event objects.
Raises:
errors.WrongPlugin: If the database does not contain all the tables
defined in the REQUIRED_TABLES set.
ValueError: If the database attribute is not passed in.
"""
if database is None:
raise ValueError(u'Database is not set.')
if not frozenset(database.tables) >= self.REQUIRED_TABLES:
raise errors.WrongPlugin(
u'Not the correct database tables for: {}'.format(
self.plugin_name))
super(SQLitePlugin, self).Process(**kwargs)
return self.GetEntries(cache=cache, database=database)
def GetEntries(self, cache=None, database=None, **kwargs):
"""Yields EventObjects extracted from a SQLite database.
Args:
cache: A SQLiteCache object.
database: A database object (instance of SQLiteDatabase).
Yields:
EventObject extracted from the SQlite database.
"""
for query, action in self.QUERIES:
try:
call_back = getattr(self, action, self.Default)
cursor = database.cursor
sql_results = cursor.execute(query)
row = sql_results.fetchone()
while row:
event_generator = call_back(row=row, cache=cache, database=database)
if event_generator:
for event_object in event_generator:
event_object.query = query
if not hasattr(event_object, 'offset'):
if 'id' in row.keys():
event_object.offset = row['id']
else:
event_object.offset = 0
yield event_object
row = sql_results.fetchone()
except sqlite3.DatabaseError as exception:
logging.debug(u'SQLite error occured: {0:s}'.format(exception))
def Default(self, unused_row, unused_cache):
"""Default callback method for SQLite events, does nothing."""
logging.debug('Default handler: {0:s | }'.format(unused_row))
class SQLiteDatabase(object):
"""A simple wrapper for opening up a SQLite | database."""
# Magic value for a SQLite database.
MAGIC = 'SQLite format 3'
_READ_BUFFER_SIZE = 65536
def __init__(self, file_entry):
"""Initializes the database object.
Args:
file_enty: the file entry object.
"""
self._file_entry = file_entry
self._temp_file_name = ''
self._open = False
self._database = None
self._cursor = None
self._tables = []
@property
def tables(self):
"""Returns a list of all the tables in the database."""
if not self._open:
self.Open()
return self._tables
@property
def cursor(self):
"""Returns a cursor object from the database."""
if not self._open:
self.Open()
return self._database.cursor()
def Open(self):
"""Opens up a database connection and build a list of table names."""
file_object = self._file_entry.GetFileObject()
# TODO: Remove this when the classifier gets implemented
# and used. As of now, there is no check made against the file
# to verify it's signature, thus all files are sent here, meaning
# that this method assumes everything is a SQLite file and starts
# copying the content of the file into memory, which is not good
# for very large files.
file_object.seek(0, os.SEEK_SET)
data = file_object.read(len(self.MAGIC))
if data != self.MAGIC:
file_object.close()
raise IOError(
u'File {} not a SQLite database. (invalid signature)'.format(
self._file_entry.name))
# TODO: Current design copies the entire file into a buffer
# that is parsed by each SQLite parser. This is not very efficient,
# especially when many SQLite parsers are ran against a relatively
# large SQLite database. This temporary file that is created should
# be usable by all SQLite parsers so the file should only be read
# once in memory and then deleted when all SQLite parsers have completed.
# TODO: Change this into a proper implementation using APSW
# and virtua |
yishinli/emc2 | src/emc/usr_intf/axis/scripts/image-to-gcode.py | Python | lgpl-2.1 | 26,563 | 0.00832 | #!/usr/bin/python
## image-to-gcode is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by the
## Free Software Foundation; either version 2 of the License, or (at your
## option) any later version. image-to-gcode is distributed in the hope
## that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
## warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
## the GNU General Public License for more details. You should have
## received a copy of the GNU General Public License along with image-to-gcode;
## if not, write to the Free Software Foundation, Inc., 59 Temple Place,
## Suite 330, Boston, MA 02111-1307 USA
##
## image-to-gcode.py is Copyright (C) 2005 Chris Radek
## chris@timeguy.com
## image-to-gcode.py is Copyright (C) 2006 Jeff Epler
## jepler@unpy.net
import sys, os
BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
sys.path.insert(0, os.path.join(BASE, "lib", "python"))
import gettext;
gettext.install("emc2", localedir=os.path.join(BASE, "share", "locale"), unicode=True)
import Image, numarray
import numarray.ieeespecial as ieee
from rs274.author import Gcode
import rs274.options
from math import *
import operator
epsilon = 1e-5
def ball_tool(r,rad):
s = -sqrt(rad**2-r**2)
return s
def endmill(r,dia):
return 0
def vee_common(angle):
slope = tan(angle * pi / 180)
def f(r, dia):
return r * slope
return f
tool_makers = [ ball_tool, endmill, vee_common(30), vee_common(45), vee_common(60)]
def make_tool_shape(f, wdia, resp):
res = 1. / resp
dia = int(wdia*res+.5)
wrad = wdia/2.
if dia < 2: dia = 2
n = numarray.array([[ieee.plus_inf] * dia] * dia, type="Float32")
hdia = dia / 2.
l = []
for x in range(dia):
for y in range(dia):
r = hypot(x-hdia, y-hdia) * resp
if r < wrad:
z = f(r, wrad)
l.append(z)
n[x,y] = z
n = n - n.min()
return n
def amax(seq):
res = 0
for i in seq:
if abs(i) > abs(res): res = i
return res
def group_by_sign(seq, slop=sin(pi/18), key=lambda x:x):
sign = None
subseq = []
for i in seq:
ki = key(i)
if sign is None:
subseq.append(i)
if ki != 0:
sign = ki / abs(ki)
else:
subseq.append(i)
if sign * ki < -slop:
sign = ki / abs(ki)
yield subseq
subseq = [i]
if subseq: yield subseq
class Convert_Scan_Alternating:
def __init__(self):
self.st = 0
def __call__(self, primary, items):
st = self.st = self.st + 1
if st % 2: items.reverse()
if st == 1: yield True, items
else: yield False, items
def reset(self):
self.st = 0
class Convert_Scan_Increasing:
def __call__(self, primary, items):
yield True, items
def reset(self):
pass
class Convert_Scan_Decreasing:
def __call__(self, primary, items):
items.reverse()
yield True, items
def reset(self):
pass
class Convert_Scan_Upmill:
def __init__(self, slop = sin(pi / 18)):
self.slop = slop
def __call__(self, primary, items):
for span in group_by_sign(items, self.slop, operator.itemgetter(2)):
if amax([it[2] for it in span]) < 0:
| span.reverse()
| yield True, span
def reset(self):
pass
class Convert_Scan_Downmill:
def __init__(self, slop = sin(pi / 18)):
self.slop = slop
def __call__(self, primary, items):
for span in group_by_sign(items, self.slop, operator.itemgetter(2)):
if amax([it[2] for it in span]) > 0:
span.reverse()
yield True, span
def reset(self):
pass
class Reduce_Scan_Lace:
def __init__(self, converter, slope, keep):
self.converter = converter
self.slope = slope
self.keep = keep
def __call__(self, primary, items):
slope = self.slope
keep = self.keep
if primary:
idx = 3
test = operator.le
else:
idx = 2
test = operator.ge
def bos(j):
return j - j % keep
def eos(j):
if j % keep == 0: return j
return j + keep - j%keep
for i, (flag, span) in enumerate(self.converter(primary, items)):
subspan = []
a = None
for i, si in enumerate(span):
ki = si[idx]
if a is None:
if test(abs(ki), slope):
a = b = i
else:
if test(abs(ki), slope):
b = i
else:
if i - b < keep: continue
yield True, span[bos(a):eos(b+1)]
a = None
if a is not None:
yield True, span[a:]
def reset(self):
self.primary.reset()
unitcodes = ['G20', 'G21']
convert_makers = [ Convert_Scan_Increasing, Convert_Scan_Decreasing, Convert_Scan_Alternating, Convert_Scan_Upmill, Convert_Scan_Downmill ]
def progress(a, b):
if os.environ.has_key("AXIS_PROGRESS_BAR"):
print >>sys.stderr, "FILTER_PROGRESS=%d" % int(a*100./b+.5)
sys.stderr.flush()
class Converter:
def __init__(self,
image, units, tool_shape, pixelsize, pixelstep, safetyheight, \
tolerance, feed, convert_rows, convert_cols, cols_first_flag,
entry_cut, spindle_speed, roughing_offset, roughing_delta,
roughing_feed):
self.image = image
self.units = units
self.tool = tool_shape
self.pixelsize = pixelsize
self.pixelstep = pixelstep
self.safetyheight = safetyheight
self.tolerance = tolerance
self.base_feed = feed
self.convert_rows = convert_rows
self.convert_cols = convert_cols
self.cols_first_flag = cols_first_flag
self.entry_cut = entry_cut
self.spindle_speed = spindle_speed
self.roughing_offset = roughing_offset
self.roughing_delta = roughing_delta
self.roughing_feed = roughing_feed
self.cache = {}
w, h = self.w, self.h = image.shape
ts = self.ts = tool_shape.shape[0]
self.h1 = h - ts
self.w1 = w - ts
self.tool_shape = tool_shape * self.pixelsize * ts / 2;
def one_pass(self):
g = self.g
g.set_feed(self.feed)
if self.convert_cols and self.cols_first_flag:
self.g.set_plane(19)
self.mill_cols(self.convert_cols, True)
if self.convert_rows: g.safety()
if self.convert_rows:
self.g.set_plane(18)
self.mill_rows(self.convert_rows, not self.cols_first_flag)
if self.convert_cols and not self.cols_first_flag:
self.g.set_plane(19)
if self.convert_rows: g.safety()
self.mill_cols(self.convert_cols, not self.convert_rows)
if self.convert_cols:
self.convert_cols.reset()
if self.convert_rows:
self.convert_rows.reset()
g.safety()
def convert(self):
self.g = g = Gcode(safetyheight=self.safetyheight,
tolerance=self.tolerance,
spindle_speed=self.spindle_speed,
units=self.units)
g.begin()
g.continuous(self.tolerance)
g.safety()
if self.roughing_delta and self.roughing_offset:
base_image = self.image
rough = make_tool_shape(ball_tool,
2*self.roughing_offset, self.pixelsize)
w, h = base_image.shape
tw, th = rough.shape
w1 = w + tw
h1 = h + th
nim1 = numarray.zeros((w1, h1), 'Float32') + base_image.min()
nim1[tw/2:tw/2+w, th/2:th/2+h] = base_image
self.image = numarray.zeros((w,h), type |
leyyin/university-SE | school/util.py | Python | mit | 110 | 0 |
def is_integer(nr):
try:
int(nr)
return True
except ValueError:
| return False
| |
FreedomCoop/valuenetwork | account/admin.py | Python | agpl-3.0 | 1,314 | 0 | from django.contrib import admin
from account.models import (
Account,
AccountDeletion,
EmailAddress,
PasswordExpiry,
PasswordHistory,
SignupCode,
)
class SignupCodeAdmin(admin.ModelAdmin):
list_display = ["code", "max_uses", "use_count", "expiry", "created"]
search_fields = ["code", "email"]
list_filter = ["created"]
raw_id_fields = ["inviter"]
class AccountAdmin(admin.ModelAdmin):
raw_id_fields = ["user"]
class AccountDeletionAdmin(AccountAdmin):
list_display = ["email", "date_requested", "date_expunged"]
class EmailAddressAdmin(AccountAdmin):
list_display = ["user", "email", "verified", "primary"]
search_fields = ["email", "user__username"]
class PasswordExpiryAdmin(admin.ModelAdmin):
raw_id_fields = ["user"]
class PasswordHistoryAdmin(admin.ModelAdmin):
raw_id_fields = ["user"]
list_display = ["user", "timestam | p"]
list_filter = ["user"]
ordering = ["user__username", "-timestamp"]
admin.site.register(Account, AccountAdmin)
admin.site.register(SignupCode, SignupCodeAdmin)
admin.site.register(AccountDeletion, AccountDeletionAdmin)
admin.site.register(EmailAddress, EmailAddressAdmin)
admin.site.register(PasswordExpiry, PasswordExpiryAdmi | n)
admin.site.register(PasswordHistory, PasswordHistoryAdmin)
|
ZachAnders/MPServices | mpservices.py | Python | bsd-2-clause | 1,761 | 0.034072 | #!/usr/bin/python
import sys
import osxdaemons
COL_RED = "\033[91m"
COL_GRN = "\033[92m"
COL_END = "\033[0m"
load_actions = ["up", "on", "load", "start"]
unload_actions = ["down", "off", "unload", "stop"]
ACTIONS = {act:"load" for act in load_actions}
ACTIONS.update({act:"unload" for act in unload_actions})
def usage():
sname = str(sys.argv[0])
print("Lists or starts/stops macports related services.")
print("Usage: ./" + sname + " [<service name> <verb>] ")
print("Valid verbs: ")
def match_service(sname):
matches = [daemon for daemon in osxdaemons.get_all_daemons() if sname in daemon]
if len(matches) > 1:
print("Matched too many services:\n")
for match in matches:
print("> " + match + "\n")
return None
#print("Found service: " + matches[0] + "\n")
return matches[0]
def match_action(action):
if action in ACTIONS:
action = ACTIONS[action]
return action
else:
return None
de | f service_action(service, action):
action = match_action(action)
if action:
print(action.title() + "ing service: " + service)
return osxdaemons.do(service, action)
else:
print("Wtf I don't know how to " + action + ".")
usage()
return -1
def print_services():
running_daemons = osxdaemons | .get_running_daemons()
for daemon in osxdaemons.get_all_daemons():
outs = daemon + " "*(60-len(daemon))
if daemon in running_daemons:
col = COL_GRN
status = "RUNNING"
else:
col = COL_RED
status = "NOT RUNNING"
print(outs + col + status + COL_END)
def main():
if len(sys.argv) == 1:
print_services()
return 0
elif len(sys.argv) == 3:
sname, action = sys.argv[1:3]
sname = match_service(sname)
return service_action(sname, action)
else:
usage()
return 0
if __name__ == "__main__":
sys.exit(main())
|
pdehaye/theming-edx-platform | common/lib/xmodule/xmodule/combined_open_ended_module.py | Python | agpl-3.0 | 21,389 | 0.003974 | import logging
from lxml import etree
from pkg_resources import resource_string
from xmodule.raw_module import RawDescriptor
from .x_module import XModule
from xblock.core import Integer, Scope, String, List, Float, Boolean
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpe | nEndedV1Descriptor
from collections import namedtuple
from .fields import Date, Timedelta
import textwrap
log = logging.getLogger("mitx.courseware")
V1_SETTINGS_ATTRIBUTES = [
"display_name", "max_attempts", "graded", "accept_file_upload",
"skip_spelling_checks", "due", "graceperiod", "weight", "min_to_calibrate",
"max_to_calibrate", "peer_ | grader_count", "required_peer_grading",
]
V1_STUDENT_ATTRIBUTES = ["current_task_number", "task_states", "state",
"student_attempts", "ready_to_reset", "old_task_states"]
V1_ATTRIBUTES = V1_SETTINGS_ATTRIBUTES + V1_STUDENT_ATTRIBUTES
VersionTuple = namedtuple('VersionTuple', ['descriptor', 'module', 'settings_attributes', 'student_attributes'])
VERSION_TUPLES = {
1: VersionTuple(CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module, V1_SETTINGS_ATTRIBUTES,
V1_STUDENT_ATTRIBUTES),
}
DEFAULT_VERSION = 1
DEFAULT_DATA = textwrap.dedent("""\
<combinedopenended>
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
</rubric>
<task>
<selfassessment/></task>
<task>
<openended min_score_to_attempt="4" max_score_to_attempt="12" >
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
<task>
<openended min_score_to_attempt="9" max_score_to_attempt="12" >
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "peer_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
""")
class VersionInteger(Integer):
"""
A model type that converts from strings to integers when reading from json.
Also does error checking to see if version is correct or not.
"""
def from_json(self, value):
try:
value = int(value)
if value not in VERSION_TUPLES:
version_error_string = "Could not find version {0}, using version {1} instead"
log.error(version_error_string.format(value, DEFAULT_VERSION))
value = DEFAULT_VERSION
except:
value = DEFAULT_VERSION
return value
class CombinedOpenEndedFields(object):
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
default="Open Response Assessment",
scope=Scope.settings
)
current_task_number = Integer(
help="Current task that the student is on.",
default=0,
scope=Scope.user_state
)
old_task_states = List(
help=("A list of lists of state dictionaries for student states that are saved."
"This field is only populated if the instructor changes tasks after"
"the module is created and students have attempted it (for example changes a self assessed problem to " |
paymill/paymill-python | paymill/services/checksum_service.py | Python | mit | 5,102 | 0.004509 | # coding=utf-8
from ..models.checksum import Checksum
from ..models.address import Address
from ..models.shopping_cart_item import ShoppingCartItem
from ..models.shopping_cart import ShoppingCart
from .paymill_service import PaymillService
import json
__author__ = 'yalnazov'
class ChecksumService(PaymillService):
def endpoint_path(self):
return '/checksums'
def paymill_object(self):
return Checksum
def create(self, checksum_type, amount, currency, return_url, cancel_url, description=None, checksum_action='transaction',
fee_amount=None, fee_payment=None, fee_currency=None, checkout_options=None, require_reusable_payment=None,
re | usable_payment_description=None, items=None, | shipping_address=None, billing_address=None, app_id=None,
client_id=None):
"""Creates new transaction/payment Checksum
:param str checksum_type: Type of request verified by this checksum
:param int amount: Amount (in cents) which will be charged
:param str currency: ISO 4217 formatted currency code
:param str return_url: URL to redirect customers to after checkout has completed.
:param int cancel_url: URL to redirect customers to after they have canceled the checkout.
:param str description: A short description for the transaction
:param str checksum_action: enum(transaction, payment) or null_ Requested action verified by this checksum (default: transaction)
:param int fee_amount: Fee included in the transaction amount (set by a connected app). Mandatory if fee_payment is set.
:param str fee_payment: The identifier of the payment from which the fee will be charged (Payment object).
:param str fee_currency: The currency of the fee (e.g. EUR, USD). If it´s not set, the currency of the transaction is used.
We suggest to always use as it might cause problems, if your account does not support the same currencies as your merchants accounts.
:param list checkout_options: Various options that determine behavior before/during/after checkout such as editability of address fields.
:param boolean require_reusable_payment: Set this to true if you want to ask the buyer for a billing agreement during checkout.
If the buyer accepts, the resulting payment can be reused for transactions and subscriptions without additional interaction.
:param str reusable_payment_description: Description appears at the acquirers checkout page (e.g. PayPal) when you request permission for a reusable payment, max. 127 characters.
:param list of ShoppingCartItem items: Shopping cart items purchased with this transaction.
:param Address shipping_address: Shipping address for this transaction.
:param Address billing_address: Billing address for this transaction.
:params str app_id: App (ID) that created this payment or null if created by yourself.
:param str client_id or None: The identifier of a client
:return Checksum: the created Checksum object
"""
params = dict(checksum_type=checksum_type, amount=amount, currency=currency, return_url=return_url, cancel_url=cancel_url)
if description is not None:
params.update(description=description)
if checksum_action is not None:
params.update(checksum_action=checksum_action)
if shipping_address is not None and isinstance(shipping_address, Address):
params.update(shipping_address=str(shipping_address.to_json()))
if billing_address is not None and isinstance(billing_address, Address):
params.update(billing_address=str(billing_address.to_json()))
if items is not None and isinstance(items, list) and len(items) > 0 and isinstance(items[0], ShoppingCartItem):
params.update(items=str(ShoppingCart(items=items).to_json()))
if fee_amount is not None:
params.update(fee_amount=fee_amount)
if fee_payment is not None:
params.update(fee_payment=fee_payment)
if fee_currency is not None:
params.update(fee_currency=fee_currency)
if checkout_options is not None and isinstance(checkout_options, dict):
params.update(checkout_options=json.dumps(checkout_options))
if app_id is not None:
params.update(app_id=app_id)
if reusable_payment_description is not None:
params.update(reusable_payment_description=reusable_payment_description)
if require_reusable_payment is not None:
params.update(require_reusable_payment=require_reusable_payment)
if client_id is not None:
params.update(client=client_id)
return self._create(params)
def detail(self, obj):
"""Returns/refreshes the remote Subscription representation with that obj.id
:param Subscription obj: the Subscription object with an id set
:return Subscription: the fresh Subscription object
"""
return self._detail(obj)
|
aspic2/ZombieScript | tests/main_tests.py | Python | mit | 110 | 0 | '''Sorry, I never got around to writing tests.'''
f | rom nose.tools | import *
from zombiescript import main
|
Buchhold/QLever | misc/fix_nt_file.py | Python | apache-2.0 | 554 | 0.016245 | import sys
def fix(element):
if element and element[0] == '<':
new = '<'
for c in element[1:-1]:
if c == '<':
| new += '<'
elif c == '>':
new += '>' |
else:
new += c
new += '>'
return new
else:
return element
for line in open(sys.argv[1]):
cols = line.strip().split('\t')
if len(cols) != 4 or cols[3] != '.':
sys.err.write('Ignoring malformed line: ' + line)
else:
# for now only touch subject for efficiency reasons.
cols[0] = fix(cols[0])
print '\t'.join(cols)
|
redhat-openstack/heat | heat/tests/test_template_format.py | Python | apache-2.0 | 7,015 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
import six
import yaml
from heat.common import config
from heat.common import exception
from heat.common import template_format
from heat.tests.common import HeatTestCase
from heat.tests import utils
class JsonToYamlTest(HeatTestCase):
def setUp(self):
super(JsonToYamlTest, self).setUp()
self.expected_test_count = 2
self.longMessage = True
self.maxDiff = None
def test_convert_all_templates(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates')
template_test_count = 0
for (json_str,
yml_str,
file_name) in self.convert_all_json_to_yaml(path):
self.compare_json_vs_yaml(json_str, yml_str, file_name)
template_test_count += 1
if template_test_count >= self.expected_test_count:
break
self.assertTrue(template_test_count >= self.expected_test_count,
'Expected at least %d templates to be tested, not %d' %
(self.expected_test_count, template_test_count))
def compare_json_vs_yaml(self, json_str, yml_str, file_name):
yml = template_format.parse(yml_str)
self.assertEqual(u'2012-12-12', yml[u'HeatTemplateFormatVersion'],
file_name)
self.assertFalse(u'AWSTemplateFormatVersion' in yml, file_name)
del(yml[u'HeatTemplateFormatVersion'])
jsn = template_format.parse(json_str)
if u'AWSTemplateFormatVersion' in jsn:
del(jsn[u'AWSTemplateFormatVersion'])
self.assertEqual(yml, jsn, file_name)
def convert_all_json_to_yaml(self, dirpath):
for path in os.listdir(dirpath):
if not path.endswith('.template') and not path.endswith('.json'):
continue
f = open(os.path.join(dirpath, path), 'r')
json_str = f.read()
yml_str = template_format.convert_json_to_yaml(json_str)
yield (json_str, yml_str, f.name)
class YamlMinimalTest(HeatTestCase):
def _parse_template(self, tmpl_str, msg_str):
parse_ex = self.assertRaises(ValueError,
template_format.parse,
tmpl_str)
self.assertIn(msg_str, six.text_type(parse_ex))
def test_long_yaml(self):
template = {'HeatTemplateFormatVersion': '2012-12-12'}
config.cfg.CONF.set_override('max_template_size', 1024)
template['Resources'] = ['a'] * (config.cfg.CONF.max_template_size / 3)
limit = config.cfg.CONF.max_template_size
long_yaml = yaml.safe_dump(template)
self.assertTrue(len(long_yaml) > limit)
ex = self.assertRaises(exception.RequestLimitExceeded,
template_format.parse, long_yaml)
msg = ('Request limit exceeded: Template exceeds maximum allowed size '
'(1024 bytes)')
self.assertEqual(msg, six.text_type(ex))
def test_parse_no_version_format(self):
yaml = ''
self._parse_template(yaml, 'Template format version not found')
yaml2 = '''Parameters: {}
Mappings: {}
Resources: {}
Outputs: {}
'''
self._parse_template(yaml2, 'Template format version not found')
def test_parse_string_template(self):
tmpl_str = 'just string'
msg = 'The template is not a JSON object or YAML mapping.'
self._parse_template(tmpl_str, msg)
def test_parse_invalid_yaml_and_json_template(self):
tmpl_str = '{test'
msg = 'line 1, column 1'
self._parse_template(tmpl_str, msg)
def test_parse_json_document(self):
tmpl_str = '["foo" , "bar"]'
msg = 'The template is not a JSON object or | YAML mapping.'
self._parse_template(tmpl_str, msg)
def test_parse_empty_json_template(self):
tmpl_str = '{}'
msg = 'Template format version not found'
self._parse_template(tmpl_str, msg)
def test_parse_yaml_template(self):
tmpl_str = 'heat_template_version: | 2013-05-23'
expected = {'heat_template_version': '2013-05-23'}
self.assertEqual(expected, template_format.parse(tmpl_str))
class YamlParseExceptions(HeatTestCase):
scenarios = [
('scanner', dict(raised_exception=yaml.scanner.ScannerError())),
('parser', dict(raised_exception=yaml.parser.ParserError())),
('reader',
dict(raised_exception=yaml.reader.ReaderError('', '', '', '', ''))),
]
def test_parse_to_value_exception(self):
text = 'not important'
with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = self.raised_exception
self.assertRaises(ValueError,
template_format.parse, text)
class JsonYamlResolvedCompareTest(HeatTestCase):
def setUp(self):
super(JsonYamlResolvedCompareTest, self).setUp()
self.longMessage = True
self.maxDiff = None
def load_template(self, file_name):
filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates', file_name)
f = open(filepath)
t = template_format.parse(f.read())
f.close()
return t
def compare_stacks(self, json_file, yaml_file, parameters):
t1 = self.load_template(json_file)
t2 = self.load_template(yaml_file)
del(t1[u'AWSTemplateFormatVersion'])
t1[u'HeatTemplateFormatVersion'] = t2[u'HeatTemplateFormatVersion']
stack1 = utils.parse_stack(t1, parameters)
stack2 = utils.parse_stack(t2, parameters)
# compare resources separately so that resolved static data
# is compared
t1nr = dict(stack1.t.t)
del(t1nr['Resources'])
t2nr = dict(stack2.t.t)
del(t2nr['Resources'])
self.assertEqual(t1nr, t2nr)
self.assertEqual(set(stack1.keys()), set(stack2.keys()))
for key in stack1:
self.assertEqual(stack1[key].t, stack2[key].t)
def test_neutron_resolved(self):
self.compare_stacks('Neutron.template', 'Neutron.yaml', {})
def test_wordpress_resolved(self):
self.compare_stacks('WordPress_Single_Instance.template',
'WordPress_Single_Instance.yaml',
{'KeyName': 'test'})
|
andrewkaufman/gaffer | python/GafferTest/NumericBookmarkSetTest.py | Python | bsd-3-clause | 4,620 | 0.042208 | ##########################################################################
#
# Copyright (c) 2019, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTW | ARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, | BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferTest
class NumericBookmarkSetTest( GafferTest.TestCase ) :
def testAccessors( self ) :
s = Gaffer.ScriptNode()
b = Gaffer.NumericBookmarkSet( s, 1 )
self.assertEqual( b.getBookmark(), 1 )
for i in range( 1, 10 ) :
b.setBookmark( i )
self.assertEqual( b.getBookmark(), i )
for i in ( 0, 10 ) :
with self.assertRaises( RuntimeError ) :
b.setBookmark( i )
def testBookmarkUpdates( self ) :
s = Gaffer.ScriptNode()
s["a"] = Gaffer.Node()
s["b"] = Gaffer.Node()
s["c"] = Gaffer.Node()
b = Gaffer.NumericBookmarkSet( s, 1 )
self.assertEqual( b.size(), 0 )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["a"] )
self.assertEqual( set(b), { s["a"] } )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["b"] )
self.assertEqual( set(b), { s["b"] } )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, None )
self.assertEqual( b.size(), 0 )
Gaffer.MetadataAlgo.setNumericBookmark( s, 2, s["c"] )
b2 = Gaffer.NumericBookmarkSet( s, 2 )
self.assertEqual( set(b2), { s["c"] } )
Gaffer.MetadataAlgo.setNumericBookmark( s, 2, s["a"] )
self.assertEqual( set(b2), { s["a"] } )
def testSignals( self ) :
s = Gaffer.ScriptNode()
s["a"] = Gaffer.Node()
s["b"] = Gaffer.Node()
mirror = set()
def added( _, member ) :
mirror.add( member )
def removed( _, member ) :
mirror.remove( member )
b = Gaffer.NumericBookmarkSet( s, 1 )
ca = b.memberAddedSignal().connect( added )
cr = b.memberRemovedSignal().connect( removed )
self.assertEqual( set(b), mirror )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["a"] )
self.assertEqual( set(b), mirror )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["b"] )
self.assertEqual( set(b), mirror )
def testSignalOrder( self ) :
s = Gaffer.ScriptNode()
s["a"] = Gaffer.Node()
s["b"] = Gaffer.Node()
b = Gaffer.NumericBookmarkSet( s, 1 )
callbackFailures = { "added" : 0, "removed" : 0 }
# Check we have no members when one is removed as we're
# defined as only ever containing one node. We can't assert
# here as the exception gets eaten and the test passes anyway
def removed( _, member ) :
if set(b) != set() :
callbackFailures["removed"] += 1
cr = b.memberRemovedSignal().connect( removed )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["a"] )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["b"] )
self.assertEqual( callbackFailures["removed"], 0 )
# Check member is added before signal, same deal re: asserts
def added( _, member ) :
if set(b) != { s["a"] } :
callbackFailures["added"] += 1
ca = b.memberAddedSignal().connect( added )
Gaffer.MetadataAlgo.setNumericBookmark( s, 1, s["a"] )
self.assertEqual( callbackFailures["added"], 0 )
if __name__ == "__main__":
unittest.main()
|
UWPCE-PythonCert/IntroPython2016 | students/sheree/session_03/homework/LPTHW-EXC-16.py | Python | unlicense | 1,164 | 0.005155 | #this is exercize 16 on page 59 of the LPTHW pdf
#exercise 16 - reading and writing files
# we're using arguments here, the arg is a filename var
from sys import argv
# argv standard options for filename
script, filename = argv
# print the file name we passed
# | print an option to exit
# print an option to continue, ie: return for yes
print("We're going to erase %r. | ") % filename
print("If you don't want that, hit CTRL-C (^c).")
print("If you do want that, hit RETURN.")
# getting input with a ? prompt
raw_input("?")
# printing a status message
# setting target to file I don't understand the w here
print("Opening the file...")
target = open(filename, "w") # W IS FOR WRITE APPARENTLY!!!
print("Truncating the file. Goodbye!")
target.truncate()
print("Now I'm going to ask you for three lines.")
line1 = raw_input("line 1: ")
line2 = raw_input("line 2: ")
line3 = raw_input("line 3: ")
print("I'm going to write these to the file.")
# target.write(line1)
# target.write("\n")
# target.write(line2)
# target.write("\n")
# target.write(line3)
# target.write("\n")
target.write("line1\nline2\nline3\n")
print("And finally, we close it.")
target.close() |
trevor/calendarserver | txweb2/test/test_http.py | Python | apache-2.0 | 47,272 | 0.00404 |
from __future__ import nested_scopes
import time, sys, os
from zope.interface import implements
from twisted.trial import unittest
from txweb2 import http, http_headers, responsecode, iweb, stream
from txweb2 import channel
from twisted.internet import reactor, protocol, address, interfaces, utils
from twisted.internet import defer
from twisted.internet.defer import waitForDeferred, deferredGenerator
from twisted.protocols import loopback
from twisted.python import util, runtime
from txweb2.channel.http import SSLRedirectRequest, HTTPFactory, HTTPChannel
from twisted.internet.task import deferLater
class RedirectResponseTestCase(unittest.TestCase):
def testTemporary(self):
"""
Verify the "temporary" parameter sets the appropriate response code
"""
req = http.RedirectResponse("http://example.com/", temporary=False)
self.assertEquals(req.code, responsecode.MOVED_PERMANENTLY)
req = http.RedirectResponse("http://example.com/", temporary=True)
self.assertEquals(req.code, responsecode.TEMPORARY_REDIRECT)
class PreconditionTestCase(unittest.TestCase):
def checkPreconditions(self, request, response, expectedResult, expectedCode,
**kw):
preconditionsPass = True
try:
http.checkPreconditions(request, response, **kw)
except http.HTTPError, e:
preconditionsPass = False
self.assertEquals(e.response.code, expectedCode)
self.assertEquals(preconditionsPass, expectedResult)
def testWithoutHeaders(self):
request = http.Request(None, "GET", "/", "HTTP/1.1", 0, http_headers.Headers())
out_headers = http_headers.Headers()
response = http.Response(responsecode.OK, out_headers, None)
self.checkPreconditions(request, response, True, responsecode.OK)
out_headers.setHeader("ETag", http_headers.ETag('foo'))
self.checkPreconditions(request, response, True, responsecode.OK)
out_headers.removeHeader("ETag")
out_headers.setHeader("Last-Modified", 946771200) # Sun, 02 Jan 2000 00:00:00 GMT
self.checkPreconditions(request, response, True, responsecode.OK)
out_headers.setHeader("ETag", http_headers.ETag('foo'))
self.checkPreconditions(request, response, True, responsecode.OK)
def testIfMatch(self):
request = http.Request(None, "GET", "/", "HTTP/1.1", 0, http_headers.Headers())
out_headers = http_headers.Headers()
response = http.Response(responsecode.OK, out_headers, None)
# Behavior with no ETag set, should be same as with an ETag
request.headers.set | RawHeaders("If-Match", ('*',))
self.checkPreconditions(request, response, True, responsecode.OK)
self.checkPreconditions(request, response, False, responsecode.PRECONDITION_FAILED, entityExists=False)
# Ask for tag, but no | etag set.
request.headers.setRawHeaders("If-Match", ('"frob"',))
self.checkPreconditions(request, response, False, responsecode.PRECONDITION_FAILED)
## Actually set the ETag header
out_headers.setHeader("ETag", http_headers.ETag('foo'))
out_headers.setHeader("Last-Modified", 946771200) # Sun, 02 Jan 2000 00:00:00 GMT
# behavior of entityExists
request.headers.setRawHeaders("If-Match", ('*',))
self.checkPreconditions(request, response, True, responsecode.OK)
self.checkPreconditions(request, response, False, responsecode.PRECONDITION_FAILED, entityExists=False)
# tag matches
request.headers.setRawHeaders("If-Match", ('"frob", "foo"',))
self.checkPreconditions(request, response, True, responsecode.OK)
# none match
request.headers.setRawHeaders("If-Match", ('"baz", "bob"',))
self.checkPreconditions(request, response, False, responsecode.PRECONDITION_FAILED)
# But if we have an error code already, ignore this header
response.code = responsecode.INTERNAL_SERVER_ERROR
self.checkPreconditions(request, response, True, responsecode.INTERNAL_SERVER_ERROR)
response.code = responsecode.OK
# Must only compare strong tags
out_headers.setHeader("ETag", http_headers.ETag('foo', weak=True))
request.headers.setRawHeaders("If-Match", ('W/"foo"',))
self.checkPreconditions(request, response, False, responsecode.PRECONDITION_FAILED)
def testIfUnmodifiedSince(self):
request = http.Request(None, "GET", "/", "HTTP/1.1", 0, http_headers.Headers())
out_headers = http_headers.Headers()
response = http.Response(responsecode.OK, out_headers, None)
# No Last-Modified => always fail.
request.headers.setRawHeaders("If-Unmodified-Since", ('Mon, 03 Jan 2000 00:00:00 GMT',))
self.checkPreconditions(request, response, False, responsecode.PRECONDITION_FAILED)
# Set output headers
out_headers.setHeader("ETag", http_headers.ETag('foo'))
out_headers.setHeader("Last-Modified", 946771200) # Sun, 02 Jan 2000 00:00:00 GMT
request.headers.setRawHeaders("If-Unmodified-Since", ('Mon, 03 Jan 2000 00:00:00 GMT',))
self.checkPreconditions(request, response, True, responsecode.OK)
request.headers.setRawHeaders("If-Unmodified-Since", ('Sat, 01 Jan 2000 00:00:00 GMT',))
self.checkPreconditions(request, response, False, responsecode.PRECONDITION_FAILED)
# But if we have an error code already, ignore this header
response.code = responsecode.INTERNAL_SERVER_ERROR
self.checkPreconditions(request, response, True, responsecode.INTERNAL_SERVER_ERROR)
response.code = responsecode.OK
# invalid date => header ignored
request.headers.setRawHeaders("If-Unmodified-Since", ('alalalalalalalalalala',))
self.checkPreconditions(request, response, True, responsecode.OK)
def testIfModifiedSince(self):
if time.time() < 946771200:
self.fail(RuntimeError("Your computer's clock is way wrong, "
"this test will be invalid."))
request = http.Request(None, "GET", "/", "HTTP/1.1", 0, http_headers.Headers())
out_headers = http_headers.Headers()
response = http.Response(responsecode.OK, out_headers, None)
# No Last-Modified => always succeed
request.headers.setRawHeaders("If-Modified-Since", ('Mon, 03 Jan 2000 00:00:00 GMT',))
self.checkPreconditions(request, response, True, responsecode.OK)
# Set output headers
out_headers.setHeader("ETag", http_headers.ETag('foo'))
out_headers.setHeader("Last-Modified", 946771200) # Sun, 02 Jan 2000 00:00:00 GMT
request.headers.setRawHeaders("If-Modified-Since", ('Mon, 03 Jan 2000 00:00:00 GMT',))
self.checkPreconditions(request, response, False, responsecode.NOT_MODIFIED)
# With a non-GET method
request.method="PUT"
self.checkPreconditions(request, response, False, responsecode.NOT_MODIFIED)
request.method="GET"
request.headers.setRawHeaders("If-Modified-Since", ('Sat, 01 Jan 2000 00:00:00 GMT',))
self.checkPreconditions(request, response, True, responsecode.OK)
# But if we have an error code already, ignore this header
response.code = responsecode.INTERNAL_SERVER_ERROR
self.checkPreconditions(request, response, True, responsecode.INTERNAL_SERVER_ERROR)
response.code = responsecode.OK
# invalid date => header ignored
request.headers.setRawHeaders("If-Modified-Since", ('alalalalalalalalalala',))
self.checkPreconditions(request, response, True, responsecode.OK)
# date in the future => assume modified
request.headers.setHeader("If-Modified-Since", time.time() + 500)
self.checkPreconditions(request, response, True, responsecode.OK)
def testIfNoneMatch(self):
request = http.Request(None, "GET", "/", "HTTP/1.1", 0, http_headers.Headers())
out_headers = http_headers.Headers()
response = http.Response(responsecode.OK, out_headers, None)
request.hea |
jalonsob/Informes | vizgrimoire/metrics/__init__.py | Python | gpl-3.0 | 766 | 0.001305 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) an | y later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Plac | e - Suite 330, Boston, MA 02111-1307, USA.
#
|
probml/pyprobml | scripts/cifar_viz_tf.py | Python | mit | 1,065 | 0 | # Based on
# https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_classification.ipynb
# (MIT License)
import superimport
from __future__ import absolute_import, division, print_function
from tensorflow import keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = "../figures"
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
# print(tf.__version__)
np.random.seed(0)
data = keras.datasets.cifar10
(train_images, train_labels), (tes | t_images, test_labels) = data.load_data()
# print(np.shape(train_images))
# print(np.shape(test_images))
# For CIFAR:
# (50000, 32, 32, 3)
# (10000, 32, 32, 3)
class_names = ['plane', 'car', 'bird', 'cat', 'deer', 'dog',
'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i])
y = train_labels[i][0]
plt.xlabel(class_names[y])
s | ave_fig("cifar10-data.pdf")
plt.show()
|
KNNSpeed/hockeylauncher | Adafruit-Motor-HAT-Python-Library/examples/StackingTest.py | Python | mit | 2,575 | 0.017864 | #!/usr/bin/python
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor
import time
import atexit
import threading
import random
# bottom hat is default address 0x60
bottomhat = Adafruit_MotorHAT(addr=0x60)
# top hat has A0 jumper closed, so its address 0x61
tophat = Adafruit_MotorHAT(addr=0x61)
# create empty threads (these will hold the stepper 1, 2 & 3 threads)
stepperThreads = [threading.Thread(), threading.Thread(), threading.Thread()]
# recommended for auto-disabling motors on shutdown!
def turnOffMotors():
tophat.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
tophat.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
tophat.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
tophat.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
bottomhat.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
bottomhat.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
bottomhat.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
b | ottomhat.getMotor(4).run(Adafruit_MotorHAT | .RELEASE)
atexit.register(turnOffMotors)
myStepper1 = bottomhat.getStepper(200, 1) # 200 steps/rev, motor port #1
myStepper2 = bottomhat.getStepper(200, 2) # 200 steps/rev, motor port #2
myStepper3 = tophat.getStepper(200, 1) # 200 steps/rev, motor port #1
myStepper1.setSpeed(60) # 60 RPM
myStepper2.setSpeed(30) # 30 RPM
myStepper3.setSpeed(15) # 15 RPM
# get a DC motor!
myMotor = tophat.getMotor(3)
# set the speed to start, from 0 (off) to 255 (max speed)
myMotor.setSpeed(150)
# turn on motor
myMotor.run(Adafruit_MotorHAT.FORWARD);
stepstyles = [Adafruit_MotorHAT.SINGLE, Adafruit_MotorHAT.DOUBLE, Adafruit_MotorHAT.INTERLEAVE]
steppers = [myStepper1, myStepper2, myStepper3]
def stepper_worker(stepper, numsteps, direction, style):
#print("Steppin!")
stepper.step(numsteps, direction, style)
#print("Done")
while (True):
for i in range(3):
if not stepperThreads[i].isAlive():
randomdir = random.randint(0, 1)
print("Stepper %d" % i),
if (randomdir == 0):
dir = Adafruit_MotorHAT.FORWARD
print("forward"),
else:
dir = Adafruit_MotorHAT.BACKWARD
print("backward"),
randomsteps = random.randint(10,50)
print("%d steps" % randomsteps)
stepperThreads[i] = threading.Thread(target=stepper_worker, args=(steppers[i], randomsteps, dir, stepstyles[random.randint(0,len(stepstyles)-1)],))
stepperThreads[i].start()
# also, lets switch around the DC motor!
myMotor.setSpeed(random.randint(0,255)) # random speed
#myMotor.run(random.randint(0,1)) # random forward/back
|
bcoca/ansible-modules-extras | cloud/centurylink/clc_group.py | Python | gpl-3.0 | 17,070 | 0.001172 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
#
DOCUMENTATION = '''
module: clc_group
short_description: Create/delete Server Groups at Centurylink Cloud
description:
- Create or delete Server Groups at Centurylink Centurylink Cloud
version_added: "2.0"
options:
name:
description:
- The name of the Server Group
required: True
description:
description:
- A description of the Server Group
required: False
parent:
description:
- The parent group of the server group. If parent is not provided, it creates the group at top level.
required: False
location:
description:
- Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
associated with the account
required: False
state:
description:
- Whether to create or delete the group
default: present
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the tasks to finish before returning.
choices: [ True, False ]
default: True
required: False
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Create a Server Group
--- |
- name: Create Server Group
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create / Verify a Server Group at CenturyLink Cloud
clc_group:
name: 'My Cool Server Group'
parent: 'Default Group'
state: present
register: clc
- name: debug
debug: var=clc
# Delete a Server Group
---
- name: Delete Server Group
hosts: localhost
gather_facts: False
connection: l | ocal
tasks:
- name: Delete / Verify Absent a Server Group at CenturyLink Cloud
clc_group:
name: 'My Cool Server Group'
parent: 'Default Group'
state: absent
register: clc
- name: debug
debug: var=clc
'''
RETURN = '''
group:
description: The group information
returned: success
type: dict
sample:
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":"2015-07-29T18:52:47Z",
"modifiedBy":"service.wfad",
"modifiedDate":"2015-07-29T18:52:47Z"
},
"customFields":[
],
"description":"test group",
"groups":[
],
"id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
"links":[
{
"href":"/v2/groups/wfad",
"rel":"createGroup",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad",
"rel":"createServer",
"verbs":[
"POST"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"parentGroup"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
"rel":"defaults",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
"rel":"billing"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
"rel":"archiveGroupAction"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
"rel":"statistics"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
"rel":"horizontalAutoscalePolicyMapping",
"verbs":[
"GET",
"PUT",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
}
],
"locationId":"UC1",
"name":"test group",
"status":"active",
"type":"default"
}
'''
__version__ = '${version}'
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcGroup(object):
clc = None
root_group = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.group_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
location = self.module.params.get('location')
group_name = self.module.params.get('name')
parent_name = self.module.params.get('parent')
group_description = self.module.params.get('description')
state = self.module.params.get('state')
self._set_clc_credentials_from_env()
self.group_dict = self._get_group_tree_for_datacenter(
|
CentralLabFacilities/pepper_behavior_sandbox | pepper_behavior/skills/turn_base_without.py | Python | gpl-3.0 | 394 | 0.025381 | import smach
import rospy
import actionlib
im | port tf
import math
class TurnWithoutMovebase(smach.State):
def __init__(self, controller, angle):
self.controller = controller
self.angle = angle
smach.State.__init__(self, outcomes=['success'])
def execute(self, userdata):
msg = '0:0:' + str(math.radians(self.angle))
self.contro | ller.publish(msg)
return 'success'
|
rnixx/kivy | kivy/tests/test_filechooser_unicode.py | Python | mit | 3,716 | 0.000552 | # -*- coding: utf-8 -*-
# XXX: please be careful to only save this file with an utf-8 editor
import unittest
import pytest
from kivy import platform
unicode_char = chr
class FileChooserUnicodeTestCase(unittest.TestCase):
def setUp(self):
self.skip_test = platform == 'macosx' or platform == 'ios'
# on mac, files ending in \uffff etc. simply are changed so don't
# do any tests because we cannot predict the real filenames that will
# be created. If it works on win and linux it also works on mac.
# note filechooser should still work, it's only the test that fail
# because we have to create file ourselves.
if self.skip_test:
return
import os
from os.path import join
from zipfile import ZipFile
basepath = os.path.dirname(__file__) + u''
basepathu = join(basepath, u'filechooser_files')
self.basepathu = basepathu
basepathb = os.path.dirname(__file__.encode())
basepathb = join(basepathb, b'filechooser_files')
self.assertIsInstance(basepathb, bytes)
self.basepathb = basepathb
# this will test creating unicode and bytes filesnames
ufiles = [u'कीवीtestu',
u'कीवीtestu' + unicode_char(0xEEEE),
u'कीवीtestu' + unicode_char(0xEEEE - 1),
u'कीवीtestu' + unicode_char(0xEE)]
# don't use non-ascii directly because that will test source file
# text conversion, not path issues :)
bfiles = [b'\xc3\xa0\xc2\xa4\xe2\x80\xa2\xc3\xa0\xc2\xa5\xe2\x82\xac\
\xc3\xa0\xc2\xa4\xc2\xb5\xc3\xa0\xc2\xa5\xe2\x82\xactestb',
b'oor\xff\xff\xff\xff\xee\xfe\xef\x81\x8D\x99testb']
self.ufiles = [join(basepathu, f) for f in ufiles]
self.bfiles = []
if not os.path.isdir(basepathu):
os.mkdir(basepathu)
for f in self.ufiles:
open(f, 'wb').close()
for f in self.bfiles:
open(f, 'wb').close()
# existing files
existfiles = [u'कीवीtest', u'कीऒµà¥€test',
u'à ¤•à ¥€à ¤µà ¥€test', u'testl\ufffe',
u'testl\uffff']
self.exitsfiles = [join(basepathu, f) for f in existfiles]
with ZipFile(join(basepath, u'unicode_files.zip'), 'r') as myzip:
myzip.extractall(path=basepathu)
for f in self.exitsfiles:
open(f, 'rb').close()
@pytest.fixture(autouse=True)
def set_clock(self, kivy_clock):
self.kivy_clock = kivy_clock
def test_filechooserlistview_unicode(self):
if self.skip_test:
return
from kivy.uix.filechooser import FileChooserListView
from kivy.clock import Clock
from os.path import join
wid = FileChooserListView(path=self.basepathu)
for i in range(1):
Clock.tick()
files = [join(self.basepathu, f) for f in wid.files]
for f in self.ufiles:
self.assertIn(f, files)
# we cannot test the bfiles because we'd have to know the system
# unicode encoding to be able to compare to returned unicode
for f in self.exitsfiles:
self.assertIn(f, files)
def tearDown(self):
if self.skip_test:
return
from os import remove, rmdir
try:
for f in self.ufiles:
| remove(f)
for f in self.exitsfiles:
remove(f)
for f in self.bfiles:
remove( | f)
rmdir(self.basepathu)
except:
pass
|
xsm110/Apache-Beam | sdks/python/apache_beam/utils/counters.py | Python | apache-2.0 | 6,154 | 0.005362 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: profile=False
# cython: overflowcheck=True
"""Counters collect the progress of the Worker for reporting to the service."""
import threading
from apache_beam.transforms import cy_combiners
class Counter(object):
"""A counter aggregates a series of values.
The aggregation kind of the Counter is specified when the Counter
is created. The values aggregated must be of an appropriate for the
aggregation used. Aggregations supported are listed in the code.
(The aggregated value will be reported to the Dataflow service.)
Do not create directly; call CounterFactory.get_counter instead.
Attributes:
name: the name of the counter, a string
combine_fn: the CombineFn to use for aggregation
accumulator: the accumulator created for the combine_fn
"""
# Handy references to common counters.
SUM = cy_combiners.SumInt64Fn()
MEAN = cy_combiners.MeanInt64Fn()
def __init__(self, name, combine_fn):
"""Creates a Counter object.
Args:
name: the name of this counter. Typically has three parts:
"step-output-counter".
combine_fn: the CombineFn to use for aggregation
"""
self.name = name
self.combine_fn = combine_fn
self.accumulator = combine_fn.create_accumulator()
self._add_input = self.combine_fn.add_input
def update(self, value):
self.accumulator = self._add_input(self.accumulator, value)
def value(self):
return self.combine_fn.extract_output(self.accumulator)
def __str__(self):
return '<%s>' % self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return '%s %s %s' % (self.name, self.combine_fn.__class__.__name__,
self.value())
class AccumulatorCombineFnCounter(Counter):
"""Counter optimized for a mutating accumulator that holds all the logic."""
def __init__(self, name, combine_fn):
assert isinstance(combine_fn, cy_combiners.AccumulatorCombineFn)
super(AccumulatorCombineFnCounter, self).__init__(name, combine_fn)
self._fast_add_input = self.accumulator.add_input
def update(self, value):
self._fast_add_input(value)
# Counters that represent Accumulators have names starting with this
USER_COUNTER_PREFIX = 'user-'
class CounterFactory(object):
"""Keeps track of unique counters."""
def __init__(self):
self.counters = {}
# Lock to be acquired when accessing the counters map.
self._lock = threading.Lock()
def get_counter(self, name, combine_fn):
"""Returns a counter with the requested name.
Passing in the same name will return the same counter; the
combine_fn must agree.
Args:
name: the name of this counter. Typically has three parts:
"step-output-counter".
combine_fn: the CombineFn to use for aggregation
Returns:
A new or existing counter with the requested name.
"""
with self._lock:
counter = self.counters.get(name, None)
if counter:
assert counter.combine_fn == combine_fn
else:
if isinstance(combine_fn, cy_combiners.AccumulatorCombineFn):
counter = AccumulatorCombineFnCounter(name, combine_fn)
else:
counter = Counter(name, combine_fn)
self.counters[name] = counter
return counter
def get_aggregator_counter(self, step_name, aggregator):
"""Returns an AggregationCounter for this step's aggregator.
Passing in the same values will return the same counter.
Args:
step_name: the name of this step.
aggregator: an Aggregator object.
Returns:
A new or existing counter.
"""
return self.get_counter(
'%s%s-%s' % (USER_COUNTER_PREFIX, step_name, aggregator.name),
aggregator.combine_fn)
def get_counters(self):
"""Returns the current set of counters.
Returns:
An iterable that contains the current set of counters. To make sure that
multiple threads can iterate over the set of counters, we return a new
iterable here. Note that the actual set of counters may get modified after |
this method returns hence the returned iterable may be stale.
"""
with self._lock:
return self.counters.values()
def get_aggregator_values(self, aggregator_or_name):
"""Returns dict of step names to values of the aggregator."""
with self._lock:
return get_aggregator_values(
aggregator_or_name, self.counters, lambda counter: counter.value())
|
def get_aggregator_values(aggregator_or_name, counter_dict,
value_extractor=None):
"""Extracts the named aggregator value from a set of counters.
Args:
aggregator_or_name: an Aggregator object or the name of one.
counter_dict: a dict object of {name: value_wrapper}
value_extractor: a function to convert the value_wrapper into a value.
If None, no extraction is done and the value is return unchanged.
Returns:
dict of step names to values of the aggregator.
"""
name = aggregator_or_name
if value_extractor is None:
value_extractor = lambda x: x
if not isinstance(aggregator_or_name, basestring):
name = aggregator_or_name.name
return {n: value_extractor(c) for n, c in counter_dict.iteritems()
if n.startswith(USER_COUNTER_PREFIX)
and n.endswith('-%s' % name)}
|
pmarti/pyclutter | clutter/keysyms.py | Python | lgpl-2.1 | 30,005 | 0.0001 | # -*- Mode: Python; py-indent-offset: 4 -*-
# pyclutter - Python bindings for Clutter
# Copyright (C) 2006 Emmanuele Bassi
#
# clutter/keysyms.py: list of keysyms.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
# This file based on GDK's gdkkeysyms.h which in turn
# I think is from xlibs keysymdef.h
VoidSymbol = 0xFFFFFF
BackSpace = 0xFF08
Tab = 0xFF09
Linefeed = 0xFF0A
Clear = 0xFF0B
Return = 0xFF0D
Pause = 0xFF13
Scroll_Lock = 0xFF14
Sys_Req = 0xFF15
Escape = 0xFF1B
Delete = 0xFFFF
Multi_key = 0xFF20
Codeinput = 0xFF37
SingleCandidate = 0xFF3C
MultipleCandidate = 0xFF3D
PreviousCandidate = 0xFF3E
Kanji = 0xFF21
Muhenkan = 0xFF22
Henkan_Mode = 0xFF23
Henkan = 0xFF23
Romaji = 0xFF24
Hiragana = 0xFF25
Katakana = 0xFF26
Hiragana_Katakana = 0xFF27
Zenkaku = 0xFF28
Hankaku = 0xFF29
Zenkaku_Hankaku = 0xFF2A
Touroku = 0xFF2B
Massyo = 0xFF2C
Kana_Lock = 0xFF2D
Kana_Shift = 0xFF2E
Eisu_Shift = 0xFF2F
Eisu_toggle = 0xFF30
Kanji_Bangou = 0xFF37
Zen_Koho = 0xFF3D
Mae_Koho = 0xFF3E
Home = 0xFF50
Left = 0xFF51
Up = 0xFF52
Right = 0xFF53
Down = 0xFF54
Prior = 0xFF55
Page_Up = 0xFF55
Next = 0xFF56
Page_Down = 0xFF56
End = 0xFF57
Begin = 0xFF58
Select = 0xFF60
Print = 0xFF61
Execute = 0xFF62
Insert = 0xFF63
Undo = 0xFF65
Redo = 0xFF66
Menu = 0xFF67
Find = 0xFF68
Cancel = 0xFF69
Help = 0xFF6A
Break = 0xFF6B
Mode_switch = 0xFF7E
script_switch = 0xFF7E
Num_Lock = 0xFF7F
KP_Space = 0xFF80
KP_Tab = 0xFF89
KP_Enter = 0xFF8D
KP_F1 = 0xFF91
KP_F2 = 0xFF92
KP_F3 = 0xFF93
KP_F4 = 0xFF94
KP_Home = 0xFF95
KP_Left = 0xFF96
KP_Up = 0xFF97
KP_Right = 0xFF98
KP_Down = 0xFF99
KP_Prior = 0xFF9A
KP_Page_Up = 0xFF9A
KP_Next = 0xFF9B
KP_Page_Down = 0xFF9B
KP_End = 0xFF9C
KP_Begin = 0xFF9D
KP_Insert = 0xFF9E
KP_Delete = 0xFF9F
KP_Equal = 0xFFBD
KP_Multiply = 0xFFAA
KP_Add = 0xFFAB
KP_Separator = 0xFFAC
KP_Subtract = 0xFFAD
KP_Decimal = 0xFFAE
KP_Divide = 0xFFAF
KP_0 = 0xFFB0
KP_1 = 0xFFB1
KP_2 = 0xFFB2
KP_3 = 0xFFB3
KP_4 = 0xFFB4
KP_5 = 0xFFB5
KP_6 = 0xFFB6
KP_7 = 0xFFB7
KP_8 = 0xFFB8
KP_9 = 0xFFB9
F1 = 0xFFBE
F2 = 0xFFBF
F3 = 0xFFC0
F4 = 0xFFC1
F5 = 0xFFC2
F6 = 0xFFC3
F7 = 0xFFC4
F8 = 0xFFC5
F9 = 0xFFC6
F10 = 0xFFC7
F11 = 0xFFC8
L1 = 0xFFC8
F12 = 0xFFC9
L2 = 0xFFC9
F13 = 0xFFCA
L3 = 0xFFCA
F14 = 0xFFCB
L4 = 0xFFCB
F15 = 0xFFCC
L5 = 0xFFCC
F16 = 0xFFCD
L6 = 0xFFCD
F17 = 0xFFCE
L7 = 0xFFCE
F18 = 0xFFCF
L8 = 0xFFCF
F19 = 0xFFD0
L9 = 0xFFD0
F20 = 0xFFD1
L10 = 0xFFD1
F21 = 0xFFD2
R1 = 0xFFD2
F22 = 0xFFD3
R2 = 0xFFD3
F23 = 0xFFD4
R3 = 0xFFD4
F24 = 0xFFD5
R4 = 0xFFD5
F25 = 0xFFD6
R5 = 0xFFD6
F26 = 0xFFD7
R6 = 0xFFD7
F27 = 0xFFD8
R7 = 0xFFD8
F28 = 0xFFD9
R8 = 0xFFD9
F29 = 0xFFDA
R9 = 0xFFDA
F30 = 0xFFDB
R10 = 0xFFDB
F31 = 0xFFDC
R11 = 0xFFDC
F32 = 0xFFDD
R12 = 0xFFDD
F33 = 0xFFDE
R13 = 0xFFDE
F34 = 0xFFDF
R14 = 0xFFDF
F35 = 0xFFE0
R15 = 0xFFE0
Shift_L = 0xFFE1
Shift_R = 0xFFE2
Control_L = 0xFFE3
Control_R = 0xFFE4
Caps_Lock = 0xFFE5
Shift_Lock = 0xFFE6
Meta_L = 0xFFE7
Meta_R = 0xFFE8
Alt_L = 0xFFE9
Alt_R = 0xFFEA
Super_L = 0xFFEB
Super_R = 0xFFEC
Hyper_L = 0xFFED
Hyper_R = 0xFFEE
ISO_Lock = 0xFE01
ISO_Level2_Latch = 0xFE02
ISO_Level3_Shift = 0xFE03
ISO_Level3_Latch = 0xFE04
ISO_Level3_Lock = 0xFE05
ISO_Group_Shift = 0xFF7E
ISO_Group_Latch = 0xFE06
ISO_Group_Lock = 0xFE07
ISO_Next_Group = 0xFE08
ISO_Next_Group_Lock = 0xFE09
ISO_Prev_Group = 0xFE0A
ISO_Prev_Group_Lock = 0xFE0B
ISO_First_Group = 0xFE0C
ISO_First_Group_Lock = 0xFE0D
ISO_Last_Group = 0xFE0E
ISO_Last_Group_Lock = 0xFE0F
ISO_Left_Tab = 0xFE20
ISO_Move_Line_Up = 0xFE21
ISO_Move_Line_Down = 0xFE22
ISO_Partial_Line_Up = 0xFE23
ISO_Partial_Line_Down = 0xFE24
ISO_Partial_Space_Left = 0xFE25
ISO_Partial_Space_Right = 0xFE26
ISO_Set_Margin_Left = 0xFE27
ISO_Set_Margin_Right = 0xFE28
ISO_Release_Margin_Left = 0xFE29
ISO_Release_Margin_Right = 0xFE2A
ISO_Release_Both_Margins = 0xFE2B
ISO_Fast_Cursor_Left = 0xFE2C
ISO_Fast_Cursor_Right = 0xFE2D
ISO_Fast_Cursor_Up = 0xFE2E
ISO_Fast_Cursor_Down = 0xFE2F
ISO_Continuous_Underline = 0xFE30
ISO_Discontinuous_Underline = 0xFE31
ISO_Emphasize = 0xFE32
ISO_Center_Object = 0xFE33
ISO_Enter = 0xFE34
dead_grav | e = 0xFE50
dead_acute = 0xFE51
dead_circumflex = 0xFE52
dead_tilde = 0xFE53
dead_macron = 0xFE54
dead_breve = 0xFE55
dead_abovedot = 0xFE56
| dead_diaeresis = 0xFE57
dead_abovering = 0xFE58
dead_doubleacute = 0xFE59
dead_caron = 0xFE5A
dead_cedilla = 0xFE5B
dead_ogonek = 0xFE5C
dead_iota = 0xFE5D
dead_voiced_sound = 0xFE5E
dead_semivoiced_sound = 0xFE5F
dead_belowdot = 0xFE60
First_Virtual_Screen = 0xFED0
Prev_Virtual_Screen = 0xFED1
Next_Virtual_Screen = 0xFED2
Last_Virtual_Screen = 0xFED4
Terminate_Server = 0xFED5
AccessX_Enable = 0xFE70
AccessX_Feedback_Enable = 0xFE71
RepeatKeys_Enable = 0xFE72
SlowKeys_Enable = 0xFE73
BounceKeys_Enable = 0xFE74
StickyKeys_Enable = 0xFE75
MouseKeys_Enable = 0xFE76
MouseKeys_Accel_Enable = 0xFE77
Overlay1_Enable = 0xFE78
Overlay2_Enable = 0xFE79
AudibleBell_Enable = 0xFE7A
Pointer_Left = 0xFEE0
Pointer_Right = 0xFEE1
Pointer_Up = 0xFEE2
Pointer_Down = 0xFEE3
Pointer_UpLeft = 0xFEE4
Pointer_UpRight = 0xFEE5
Pointer_DownLeft = 0xFEE6
Pointer_DownRight = 0xFEE7
Pointer_Button_Dflt = 0xFEE8
Pointer_Button1 = 0xFEE9
Pointer_Button2 = 0xFEEA
Pointer_Button3 = 0xFEEB
Pointer_Button4 = 0xFEEC
Pointer_Button5 = 0xFEED
Pointer_DblClick_Dflt = 0xFEEE
Pointer_DblClick1 = 0xFEEF
Pointer_DblClick2 = 0xFEF0
Pointer_DblClick3 = 0xFEF1
Pointer_DblClick4 = 0xFEF2
Pointer_DblClick5 = 0xFEF3
Pointer_Drag_Dflt = 0xFEF4
Pointer_Drag1 = 0xFEF5
Pointer_Drag2 = 0xFEF6
Pointer_Drag3 = 0xFEF7
Pointer_Drag4 = 0xFEF8
Pointer_Drag5 = 0xFEFD
Pointer_EnableKeys = 0xFEF9
Pointer_Accelerate = 0xFEFA
Pointer_DfltBtnNext = 0xFEFB
Pointer_DfltBtnPrev = 0xFEFC
_3270_Duplicate = 0xFD01
_3270_FieldMark = 0xFD02
_3270_Right2 = 0xFD03
_3270_Left2 = 0xFD04
_3270_BackTab = 0xFD05
_3270_EraseEOF = 0xFD06
_3270_EraseInput = 0xFD07
_3270_Reset = 0xFD08
_3270_Quit = 0xFD09
_3270_PA1 = 0xFD0A
_3270_PA2 = 0xFD0B
_3270_PA3 = 0xFD0C
_3270_Test = 0xFD0D
_3270_Attn = 0xFD0E
_3270_CursorBlink = 0xFD0F
_3270_AltCursor = 0xFD10
_3270_KeyClick = 0xFD11
_3270_Jump = 0xFD12
_3270_Ident = 0xFD13
_3270_Rule = 0xFD14
_3270_Copy = 0xFD15
_3270_Play = 0xFD16
_3270_Setup = 0xFD17
_3270_Record = 0xFD18
_3270_ChangeScreen = 0xFD19
_3270_DeleteWord = 0xFD1A
_3270_ExSelect = 0xFD1B
_3270_CursorSelect = 0xFD1C
_3270_PrintScreen = 0xFD1D
_3270_Enter = 0xFD1E
space = 0x020
exclam = 0x021
quotedbl = 0x022
numbersign = 0x023
dollar = 0x024
percent = 0x025
ampersand = 0x026
apostrophe = 0x027
quoteright = 0x027
parenleft = 0x028
parenright = 0x029
asterisk = 0x02a
plus = 0x02b
comma = 0x02c
minus = 0x02d
period = 0x02e
slash = 0x02f
_0 = 0x030
_1 = 0x031
_2 = 0x032
_3 = 0x033
_4 = 0x034
_5 = 0x035
_6 = 0x036
_7 = 0x037
_8 = 0x038
_9 = 0x039
colon = 0x03a
semicolon = 0x03b
less = 0x03c
equal = 0x03d
greater = 0x03e
question = 0x03f
at = 0x040
A = 0x041
B = 0x042
C = 0x043
D = 0x044
E = 0x045
F = 0x046
G = 0x047
H = 0x048
I = 0x049
J = 0x04a
K = 0x04b
L = 0x04c
M = 0x04d
N = 0x04e
O = 0x04f
P = 0x050
Q = 0x051
R = 0x052
S = 0x053
T = 0x054
U = 0x055
V = 0x056
W = 0x057
X = 0x058
Y = 0x059
Z = 0x05a
bracketleft = 0x05b
backslash = 0x05c
bracketright = 0x05d
asciicircum = 0x05e
underscore = 0x05f
grave = 0x060
quoteleft = 0x060
a = 0x061
b = 0x062
c = 0x063
d = 0x064
e = 0x065
f = 0x066
g = 0x067
h = 0x068
i = 0x069
j = 0x06a
k = 0x06b
l = 0x06c
m = 0x06d
n = 0x06e
o = 0x06f
p = 0x070
q = 0x071
r = 0x072
s = 0x073
t = 0x074
u = 0x075
v = 0x076
w = 0x077
x = 0x078
y = 0x079
z = 0x07a
braceleft = 0x07b
bar = 0x07c
braceright = 0x07d
asciitilde = 0x07e
|
google/TaglessCRM | src/plugins/pipeline_plugins/utils/__init__.py | Python | apache-2.0 | 601 | 0.001664 | # python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or | agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex | press or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
gpodder/mygpo-feedservice | manage.py | Python | agpl-3.0 | 254 | 0 | #!/us | r/bin/env python
import os
import sys
if __ | name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "feedservice.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
leapcode/leap_pycommon | src/leap/common/events/zmq_components.py | Python | gpl-3.0 | 5,662 | 0.000177 | # -*- coding: utf-8 -*-
# zmq.py
# Copyright (C) 2015, 2016 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
The server for the events mechanism.
"""
import os
import logging
import txzmq
import re
from abc import ABCMeta
try:
import zmq.auth
from leap.common.events.auth | import TxAuthenticator
from leap.common.events.auth import TxAuthenticationRequest
except ImportError:
pass
from txzmq.connection import ZmqEndpoint, ZmqEndpointType
from leap.common.config import flags, get_path_prefix
from leap.common.zmq_utils i | mport zmq_has_curve
from leap.common.zmq_utils import maybe_create_and_get_certificates
from leap.common.zmq_utils import PUBLIC_KEYS_PREFIX
logger = logging.getLogger(__name__)
ADDRESS_RE = re.compile("^([a-z]+)://([^:]+):?(\d+)?$")
LOCALHOST_ALLOWED = '127.0.0.1'
class TxZmqComponent(object):
"""
A twisted-powered zmq events component.
"""
_factory = txzmq.ZmqFactory()
_factory.registerForShutdown()
_auth = None
__metaclass__ = ABCMeta
_component_type = None
def __init__(self, path_prefix=None, enable_curve=True, factory=None):
"""
Initialize the txzmq component.
"""
if path_prefix is None:
path_prefix = get_path_prefix(flags.STANDALONE)
if factory is not None:
self._factory = factory
self._config_prefix = os.path.join(path_prefix, "leap", "events")
self._connections = []
if enable_curve:
self.use_curve = zmq_has_curve()
else:
self.use_curve = False
@property
def component_type(self):
if not self._component_type:
raise Exception(
"Make sure implementations of TxZmqComponent"
"define a self._component_type!")
return self._component_type
def _zmq_bind(self, connClass, address):
"""
Bind to an address.
:param connClass: The connection class to be used.
:type connClass: txzmq.ZmqConnection
:param address: The address to bind to.
:type address: str
:return: The binded connection and port.
:rtype: (txzmq.ZmqConnection, int)
"""
proto, addr, port = ADDRESS_RE.search(address).groups()
endpoint = ZmqEndpoint(ZmqEndpointType.bind, address)
connection = connClass(self._factory)
if self.use_curve:
socket = connection.socket
public, secret = maybe_create_and_get_certificates(
self._config_prefix, self.component_type)
socket.curve_publickey = public
socket.curve_secretkey = secret
self._start_authentication(connection.socket)
if proto == 'tcp' and int(port) == 0:
connection.endpoints.extend([endpoint])
port = connection.socket.bind_to_random_port('tcp://%s' % addr)
else:
connection.addEndpoints([endpoint])
return connection, int(port)
def _zmq_connect(self, connClass, address):
"""
Connect to an address.
:param connClass: The connection class to be used.
:type connClass: txzmq.ZmqConnection
:param address: The address to connect to.
:type address: str
:return: The binded connection.
:rtype: txzmq.ZmqConnection
"""
endpoint = ZmqEndpoint(ZmqEndpointType.connect, address)
connection = connClass(self._factory)
if self.use_curve:
socket = connection.socket
public, secret = maybe_create_and_get_certificates(
self._config_prefix, self.component_type)
server_public_file = os.path.join(
self._config_prefix, PUBLIC_KEYS_PREFIX, "server.key")
server_public, _ = zmq.auth.load_certificate(server_public_file)
socket.curve_publickey = public
socket.curve_secretkey = secret
socket.curve_serverkey = server_public
connection.addEndpoints([endpoint])
return connection
def _start_authentication(self, socket):
if not TxZmqComponent._auth:
TxZmqComponent._auth = TxAuthenticator(self._factory)
TxZmqComponent._auth.start()
auth_req = TxAuthenticationRequest(self._factory)
auth_req.start()
auth_req.allow(LOCALHOST_ALLOWED)
# tell authenticator to use the certificate in a directory
public_keys_dir = os.path.join(self._config_prefix, PUBLIC_KEYS_PREFIX)
auth_req.configure_curve(domain="*", location=public_keys_dir)
auth_req.shutdown()
TxZmqComponent._auth.shutdown()
# This has to be set before binding the socket, that's why this method
# has to be called before addEndpoints()
socket.curve_server = True
class TxZmqServerComponent(TxZmqComponent):
"""
A txZMQ server component.
"""
_component_type = "server"
class TxZmqClientComponent(TxZmqComponent):
"""
A txZMQ client component.
"""
_component_type = "client"
|
leighpauls/k2cro4 | third_party/WebKit/Tools/Scripts/webkitpy/common/lru_cache_unittest.py | Python | bsd-3-clause | 3,651 | 0.001917 | #!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and | the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DI | SCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common import lru_cache
class LRUCacheTest(unittest.TestCase):
def setUp(self):
self.lru = lru_cache.LRUCache(3)
self.lru['key_1'] = 'item_1'
self.lru['key_2'] = 'item_2'
self.lru['key_3'] = 'item_3'
self.lru2 = lru_cache.LRUCache(1)
self.lru2['key_1'] = 'item_1'
def test_items(self):
self.assertEqual(set(self.lru.items()), set([('key_1', 'item_1'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
def test_put(self):
self.lru['key_4'] = 'item_4'
self.assertEqual(set(self.lru.items()), set([('key_4', 'item_4'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
def test_update(self):
self.lru['key_1']
self.lru['key_5'] = 'item_5'
self.assertEqual(set(self.lru.items()), set([('key_1', 'item_1'), ('key_3', 'item_3'), ('key_5', 'item_5')]))
def test_keys(self):
self.assertEqual(set(self.lru.keys()), set(['key_1', 'key_2', 'key_3']))
def test_delete(self):
del self.lru['key_1']
self.assertFalse('key_1' in self.lru)
def test_contain(self):
self.assertTrue('key_1' in self.lru)
self.assertFalse('key_4' in self.lru)
def test_values(self):
self.assertEqual(set(self.lru.values()), set(['item_1', 'item_2', 'item_3']))
def test_len(self):
self.assertEqual(len(self.lru), 3)
def test_size_one_pop(self):
self.lru2['key_2'] = 'item_2'
self.assertEqual(self.lru2.keys(), ['key_2'])
def test_size_one_delete(self):
del self.lru2['key_1']
self.assertFalse('key_1' in self.lru2)
def test_pop_error(self):
self.assertRaises(KeyError, self.lru2.__getitem__, 'key_2')
del self.lru2['key_1']
self.assertRaises(KeyError, self.lru2.__getitem__, 'key_2')
def test_get_middle_item(self):
self.lru['key_2']
self.lru['key_4'] = 'item_4'
self.lru['key_5'] = 'item_5'
self.assertEqual(set(self.lru.keys()), set(['key_2', 'key_4', 'key_5']))
def test_set_again(self):
self.lru['key_1'] = 'item_4'
self.assertEqual(set(self.lru.items()), set([('key_1', 'item_4'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
if __name__ == "__main__":
unittest.main()
|
ufeslabic/parse-facebook | lib_output.py | Python | mit | 6,654 | 0.021491 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv, sys, subprocess
from lib_time import *
# returns the given float number with only 2 decimals and a % appended
def float_to_percentage(float_number):
return("%0.2f" % float_number +"%")
# normalize the dictionary with the word count to generate the wordcloud
def normalize_dict(dic):
max_elem = max(dic.values())
for key, value in dic.items():
normalized_val = int((100 * value)/max_elem)
if normalized_val == 0:
normalized_val = 1
dic[key]= normalized_val
return dic
# writes the normalized dict in a txt to be pasted manually in wordle.net
def dict_to_txt_for_wordle(dict_in, filename, sort_key=lambda t:t, value_key=lambda t:t):
if not dict_in:
dict_in = {'No hashtags found':1}
ordered_list = []
dict_in = normalize_dict(dict_in)
for key, value in dict_in.items():
ordered_list.append([key, value_key(value)])
ordered_list = sorted(ordered_list, key=sort_key, reverse=True)
out = open(filename, 'w', encoding= 'utf-8')
for item in ordered_list[:120]:
i = 0
while i < item[1]:
out.write(item[0] + ' ')
i+=1
out.close()
# creates a CSV file of the dictionary data received
def top_something_to_csv(dict_in, filename, column_titles, reverse, sort_key, value_format=lambda t: t):
ordered_list = []
for key, value in dict_in.items():
ordered_list.append([key, value_format(value)])
ordered_list = sorted(ordered_list, key=sort_key, reverse=reverse)
with open(filename, 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(column_titles)
for item in ordered_list:
file_writer.writerow([item[0], item[1]])
csvfile.close()
# writes a CSV file in the following format:
# post_type | interactions_# | %_of_total
# where interactions can be shares, likes or comments
def int_dictionary_to_csv(int_dict_in, filename, column_titles):
total = sum(int_dict_in.values())
float_dict_post_percent = {}
for key, value in int_dict_in.items():
float_dict_post_percent[key] = (value * 100)/total
with open(filename, 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(column_titles)
for key, value in float_dict_post_percent.items():
file_writer.writerow([key, int_dict_in[key], float_to_percentage(value)])
# writes a CSV file in the following format:
# date(dd/mm/yyyy) | post_type | post_text| interactions_#
# where interactions can be shares, likes or comments and post_type can be status, photo, video or share
def int_dictionary_interactions_summary_to_csv(int_dict_comments_in, int_dict_shares_in, int_dict_likes_in, filename):
column_titles = ['post_type', 'comments_#', 'comments_%', '', 'likes_#', 'likes_%','', 'shares_#', 'shares_%',]
total_comments = sum(int_dict_comments_in.values())
total_shares = sum(int_dict_shares_in.values())
total_likes = sum(int_dict_likes_in.values())
with open(filename, 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(column_titles)
for key in int_dict_comments_in.keys():
pct_comments = (int_dict_comments_in[key]*100)/total_comments
pct_likes = (int_dict_likes_in[key]*100)/total_likes
pct_shares = (int_dict_shares_in[key]*100)/total_shares
file_writer.writerow([key, int_dict_comments_in[key], float_to_percentage(pct_comments),' ', int_dict_likes_in[key], float_to_percentage(pct_likes), ' ', int_dict_shares_in[key], float_to_percentage(pct_shares)])
# writes a CSV file in the following format:
# dd/mm/YYYY | post_type | post_text | interactions_#
# where interactions can be shares, likes or comments
def interactions_summary_to_csv(list_summary, filename, column_titles):
list_summary = sorted(list_summary, key = lambda x: x[0])
with open(filename, 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(column_titles)
for item in list_summary:
line = [timestamp_to_str_date(item[0])] + item[1:]
file_writer.writerow(line)
# creates a CSV file of the dictionary data received
def top_something_to_csv(dict_in, filename, column_titles, reverse, sort_key, value_format=lambda t: t):
ordered_list = []
for key, value in dict_in.items():
ordered_list.append([key, value_format(value)])
ordered_list = sorted(ordered_list, key=sort_key, reverse=reverse)
with open(filename, 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
fil | e_writer.writerow(column_titles)
for item in ordered_list:
f | ile_writer.writerow([item[0], item[1]])
csvfile.close()
def comments_timeline():
list_datetime_commments = []
with open('comments.tab', 'rt', encoding="utf8") as csvfile:
csv_in = csv.reader(csvfile, delimiter='\t')
next(csv_in)
for line in csv_in:
str_raw_time = line[3]
temp_datetime = datetime.datetime.strptime(str_raw_time, "%Y-%m-%dT%H:%M:%S+0000")
list_datetime_commments.append(temp_datetime)
dict_int_str_date = comments_per_day(list_datetime_commments)
dict_int_str_date_hour = comments_per_hour(list_datetime_commments)
top_something_to_csv(dict_int_str_date, 'comments_per_day.csv', ['date', 'number_of_comments'], reverse=False, sort_key=lambda t: datetime.date(int(t[0][6:]), int(t[0][3:5]), int(t[0][:2])))
top_something_to_csv(dict_int_str_date_hour, 'comments_per_hour.csv', ['date', 'number_of_comments'], reverse=False, sort_key=lambda t: datetime.datetime.strptime(t[0], "%d/%m/%Y %H"))
def write_top_comment_replies(top_comments_list):
with open('top_comments_replies.csv', 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(['post_text', 'comment_text', 'likes_#'])
for item in top_comments_list:
if item[3] == '1':
file_writer.writerow([item[0], item[1], item[2]])
def write_top_comments(top_comments_list):
with open('top_comments.csv', 'w', newline='', encoding="utf8") as csvfile:
file_writer = csv.writer(csvfile, delimiter='|', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file_writer.writerow(['post_text', 'comment_text', 'likes_#', 'is_reply'])
for item in top_comments_list:
file_writer.writerow(item)
def cleanup_posts():
subprocess.call(["sh", "cleanup_posts.sh"])
def cleanup_comments():
subprocess.call(["sh", "cleanup_comments.sh"]) |
ligo-cbc/pycbc | pycbc/tmpltbank/option_utils.py | Python | gpl-3.0 | 57,143 | 0.004777 | # Copyright (C) 2013 Ian W. Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import argparse
import logging
import textwrap
import numpy
import os
from pycbc.tmpltbank.lambda_mapping import get_ethinca_orders, pycbcValidOrdersHelpDescriptions
from pycbc import pnutils
from pycbc.tmpltbank.em_progenitors import load_ns_sequence
from pycbc.types import positive_float, nonnegative_float
class IndentedHelpFormatterWithNL(argparse.ArgumentDefaultsHelpFormatter):
"""
This class taken from
https://groups.google.com/forum/#!topic/comp.lang.python/bfbmtUGhW8I
and is used to format the argparse help messages to deal with line breaking
nicer. Specfically the pn-order help is large and looks crappy without this.
This function is (C) Tim Chase
"""
def format_description(self, description):
"""
No documentation
"""
if not description: return ""
desc_width = self.width - self.current_indent
indent = " "*self.current_indent
# the above is still the same
bits = description.split('\n')
formatted_bits = [
textwrap.fill(bit,
desc_width,
initial_indent=indent,
subsequent_indent=indent)
for bit in bits]
result = "\n".join(formatted_bits) + "\n"
return result
def format_option(self, option):
"""
No documentation
"""
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
# Everything is the same up through here
help_lines = []
for para in help_text.split("\n"):
help_lines.extend(textwrap.wrap(para, self.help_width))
# Everything is the same after here
result.append("%*s%s\n" % (
indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def get_options_from_group(option_group):
"""
Take an option group and return all the options | that are defined in that
group.
"""
option_list = option_group._group_actions
command_lines = []
for option in option_list:
option_strings = option.option_strings
for string in option_strings:
if string.startswith('--'):
command_lines.append(string)
return command_lines
def insert_base_bank_options(parser):
"""
Adds essential common options for template bank generation to an
ArgumentPars | er instance.
"""
def match_type(s):
err_msg = "must be a number between 0 and 1 excluded, not %r" % s
try:
value = float(s)
except ValueError:
raise argparse.ArgumentTypeError(err_msg)
if value <= 0 or value >= 1:
raise argparse.ArgumentTypeError(err_msg)
return value
parser.add_argument(
'-m', '--min-match', type=match_type, required=True,
help="Generate bank with specified minimum match. Required.")
parser.add_argument(
'-O', '--output-file', required=True,
help="Output file name. Required.")
parser.add_argument('--f-low-column', type=str, metavar='NAME',
help='If given, store the lower frequency cutoff into '
'column NAME of the single-inspiral table.')
def insert_metric_calculation_options(parser):
"""
Adds the options used to obtain a metric in the bank generation codes to an
argparser as an OptionGroup. This should be used if you want to use these
options in your code.
"""
metricOpts = parser.add_argument_group(
"Options related to calculating the parameter space metric")
metricOpts.add_argument("--pn-order", action="store", type=str,
required=True,
help="Determines the PN order to use. For a bank of "
"non-spinning templates, spin-related terms in the "
"metric will be zero. REQUIRED. "
"Choices: %s" %(pycbcValidOrdersHelpDescriptions))
metricOpts.add_argument("--f0", action="store", type=positive_float,
default=70.,\
help="f0 is used as a dynamic scaling factor when "
"calculating integrals used in metric construction. "
"I.e. instead of integrating F(f) we integrate F(f/f0) "
"then rescale by powers of f0. The default value 70Hz "
"should be fine for most applications. OPTIONAL. "
"UNITS=Hz. **WARNING: If the ethinca metric is to be "
"calculated, f0 must be set equal to f-low**")
metricOpts.add_argument("--f-low", action="store", type=positive_float,
required=True,
help="Lower frequency cutoff used in computing the "
"parameter space metric. REQUIRED. UNITS=Hz")
metricOpts.add_argument("--f-upper", action="store", type=positive_float,
required=True,
help="Upper frequency cutoff used in computing the "
"parameter space metric. REQUIRED. UNITS=Hz")
metricOpts.add_argument("--delta-f", action="store", type=positive_float,
required=True,
help="Frequency spacing used in computing the parameter "
"space metric: integrals of the form \int F(f) df "
"are approximated as \sum F(f) delta_f. REQUIRED. "
"UNITS=Hz")
metricOpts.add_argument("--write-metric", action="store_true",
default=False, help="If given write the metric components "
"to disk as they are calculated.")
return metricOpts
def verify_metric_calculation_options(opts, parser):
"""
Parses the metric calculation options given and verifies that they are
correct.
Parameters
----------
opts : argparse.Values instance
Result of parsing the input options with OptionParser
parser : object
The OptionParser instance.
"""
if not opts.pn_order:
parser.error("Must supply --pn-order")
class metricParameters(object):
"""
This class h |
mdsitton/pract2d | pract2d/core/context.py | Python | bsd-2-clause | 1,065 | 0.002817 | import sdl2 as sdl
class Context(object):
def __init__(self, major, minor, msaa=2):
self.major = major
self.minor = minor
self.msaa = msaa
self.context = None
self._window = None
sdl.SDL_GL_SetAttribute(sdl.SDL_GL_DOUBLEBUFFER, 1)
sdl.SDL_GL_SetAttribute(sdl.SDL_GL_CONTEXT_MAJOR_VERSI | ON, major)
sdl.SDL_GL_SetAttribute(sdl.SDL_GL_CONTEXT_MINOR_VERSION, minor)
sdl.SDL_GL_SetAttribute(sdl.SDL_GL_CONTEXT_PROFILE_MASK, sdl.SDL_GL_CONTEXT_PROFILE_CORE)
if msaa < 0:
sdl.SDL_GL_SetAttribute(sdl.SDL_GL_MULTISAMPLEBUFFERS, 1)
sdl.SDL_GL_SetAttribute(sdl.SDL_GL_MULTISAMPLESAMPLES, msaa)
def destroy(self):
| sdl.SDL_GL_DeleteContext(self.context)
@property
def window(self):
return self._window
@window.setter
def window(self, win):
self._window = win
if self.context == None:
# Create context if not already created
self.context = sdl.SDL_GL_CreateContext(self._window.window)
|
HEP-DL/root2hdf5 | root2hdf5/plugins/larcv/segment.py | Python | gpl-3.0 | 1,556 | 0.025064 | from root2hdf5.data_types.base import BaseData
import numpy as np
import logging
class SegmentData(BaseData):
logger = logging.getLogger('root2hdf5.data_types.segment')
tree_name = 'image2d_segment_hires_crop_tree'
def __init__(self, _file, output_file):
super(SegmentData, self).__init__(_file)
from larcv import larcv
self.array_converter = larcv.as_ndarray
self.logger.info("Setting Up Segment Data Stream")
self.dataset = output_file.create_dataset("image2d/segment",
(10,3,576,576),
maxshape=(None,3,576,576),
chunks=(10,3,576,576),
dtype='f',compression="gzip")
self.dataset.attrs['name'] = 'image2d_segment_hires_crop_tree'
self.dataset.attrs['index0_name'] = 'eventN'
self.dataset.attrs['index1_name'] = 'layerN'
self.dataset.attrs['index | 3_name'] = 'pixelX'
self.dataset.attrs['index4_name'] = 'pixelY'
self.buffer = np.ndarray((10,3,576,576), dtype='H')
self.buffer_index=0
def process_branch(self, branch):
for layer in range(3):
layerimage = self.array_converter(branch.at(la | yer))
layerimage.resize(576,576)
self.buffer[self.buffer_index, layer] = layerimage
self.buffer_index+=1
if self.event_index %10==0:
self.buffer_index=0
self.dataset.resize( (self.event_index+10,3,576,576) )
self.dataset[self.event_index:self.event_index+10,:,:,:] = self.buffer |
heinrichgh/fintrack | app/forms.py | Python | gpl-2.0 | 1,523 | 0.004596 | __author__ = 'heinrich'
from flask.ext.wtf import Form
from wtforms import StringField, PasswordField, FloatField, DateField, SelectField
from wtforms.validators import DataRequired, EqualTo, Email
import datetime
class RegisterForm(Form):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password | ', validators=[DataRequired()])
passwordConfirm = PasswordField('Confirm Password', validators=[DataRequired(), EqualTo('password', 'Passwords did not match')])
name = StringField('Name', validators=[DataRequired()])
surname = StringField('Surname')
class LoginForm(Form):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
class IncomeForm(Form):
amount = FloatField('Amount', validators=[Da | taRequired()])
date = DateField('Date', validators=[DataRequired()], default=datetime.date.today())
incomeType = SelectField('Income Type', validators=[DataRequired()], coerce=int)
class IncomeTypeForm(Form):
type = StringField('Income Type', validators=[DataRequired()])
class ExpenditureForm(Form):
amount = FloatField('Amount', validators=[DataRequired()])
date = DateField('Date', validators=[DataRequired()], default=datetime.date.today())
expenditureType = SelectField('Expenditure Type', validators=[DataRequired()], coerce=int)
class ExpenditureTypeForm(Form):
type = StringField('Expenditure Type', validators=[DataRequired()]) |
hsr-ba-fs15-dat/opendatahub | src/main/python/hub/serializers.py | Python | mit | 6,460 | 0.002786 | # -*- coding: utf-8 -*-
# Get rid of "FormatSerializer:Method 'create' is abstract in class 'BaseSerializer' but is not overridden"
# FormatSerializer is read only anyway
# pylint: disable=abstract-method
from __future__ import unicode_literals
from rest_framework import serializers
from rest_framework.reverse import reverse
from django.db.models import Q
from authentication.serializers import UserDisplaySerializer
from hub.models import PackageModel, DocumentModel, FileGroupModel, FileModel, TransformationModel, UrlModel
"""
Django serializers.
"""
class PackageSerializer(serializers.HyperlinkedModelSerializer):
"""
Packages are either documents or transformations. Do some magic to differentiate between them (django/rest_framework
is really bad at this).
"""
owner = UserDisplaySerializer(read_only=True)
type = serializers.SerializerMethodField()
preview = serializers.SerializerMethodField()
template = serializers.SerializerMethodField()
class Meta(object):
""" Meta class for PackageSerializer. """
model = PackageModel
fields = ('id', 'url', 'name', 'description', 'private', 'owner', 'created_at', 'type', 'preview', 'template')
def get_template(self, obj):
if isinstance(obj, TransformationModel):
return obj.is_template
return False
def get_type(self, obj):
if isinstance(obj, DocumentModel):
return 'document'
elif isinstance(obj, TransformationModel):
return 'transformation'
return 'unknown'
def get_preview(self, obj):
request = self.context.get('request', None)
format = self.context.get('format', None)
return reverse('{}model-preview'.format(self.get_type(obj)), kwargs={'pk': obj.id}, request=request,
format=format)
class DocumentSerializer(serializers.HyperlinkedModelSerializer):
file_groups = serializers.HyperlinkedIdentityField('documentmodel-filegroup')
owner = UserDisplaySerializer(read_only=True)
preview = serializers.HyperlinkedIdentityField('documentmodel-preview')
class Meta(object):
""" Meta class for DocumentSerializer. """
model = DocumentModel
fields = ('id', 'url', 'name', 'description', 'file_groups', 'private', 'owner', 'created_at', 'preview')
def to_representation(self, instance):
ret = super(DocumentSerializer, self).to_representation(instance)
ret['type'] = 'document'
return ret
class FileSerializer(serializers.HyperlinkedModelSerializer):
file_format = serializers.CharField(source='format')
class Meta(object):
""" Meta class for FileSerializer. """
model = FileModel
fields = ('id', 'url', 'file_name', 'file_format', 'file_group')
class UrlSerializer(serializers.HyperlinkedModelSerializer):
source_url = serializers.URLField()
url_format = serializers.CharField(source='format')
class Meta(object):
""" Meta class for UrlSerializer. """
model = UrlModel
fields = ('id', 'url', 'source_url', 'url_format', 'refresh_after', 'type', 'file_group')
class TransformationIdSerializer(serializers.Serializer):
id = serializers.IntegerField(read_only=True)
url = serializers.HyperlinkedIdentityField(view_name='transformationmodel-detail')
name = serializers.CharField(read_only=True)
class Meta(object):
fields = ('id', 'url', 'name')
class RelatedTransformationMixin(object):
def _get_related_transformations(self, obj, request):
filter = Q(private=False)
if request.user:
filter |= Q(owner=request.user.id)
related_transformations = obj.related_transformations.filter(filter)
serializer = TransformationIdSerializer(related_transformations, many=T | rue, context={'request': request})
return serializer.data
class FileGroupSerializer(serializers.HyperlinkedModelSerializer, RelatedTransformationMixin):
files = FileSerializer(many=True, read_only=True)
urls = UrlSerializer(many=True, read_only=True)
document = DocumentSerializer(read_only=True)
related_trans | formations = serializers.SerializerMethodField()
data = serializers.HyperlinkedIdentityField('filegroupmodel-data')
token = serializers.HyperlinkedIdentityField('filegroupmodel-token')
preview = serializers.HyperlinkedIdentityField('filegroupmodel-preview')
class Meta(object):
""" Meta class for FileGroupSerializer. """
model = FileGroupModel
fields = ('id', 'url', 'document', 'files', 'urls', 'data', 'preview', 'related_transformations', 'token')
depth = 1
def get_related_transformations(self, obj):
return self._get_related_transformations(obj, self.context['request'])
class FormatSerializer(serializers.Serializer):
name = serializers.CharField(read_only=True)
label = serializers.CharField(read_only=True)
description = serializers.CharField(read_only=True)
example = serializers.CharField(read_only=True)
extension = serializers.CharField(read_only=True)
class TransformationSerializer(serializers.HyperlinkedModelSerializer, RelatedTransformationMixin):
referenced_file_groups = serializers.HyperlinkedIdentityField('transformationmodel-filegroups')
referenced_transformations = serializers.HyperlinkedIdentityField('transformationmodel-transformations')
token = serializers.HyperlinkedIdentityField('transformationmodel-token')
related_transformations = serializers.SerializerMethodField()
owner = UserDisplaySerializer(read_only=True)
data = serializers.HyperlinkedIdentityField('transformationmodel-data')
preview = serializers.HyperlinkedIdentityField('transformationmodel-preview')
class Meta(object):
""" Meta class for TransformationSerializer. """
model = TransformationModel
fields = ('id', 'url', 'name', 'description', 'transformation', 'private', 'owner', 'data', 'is_template',
'preview', 'referenced_file_groups', 'referenced_transformations', 'related_transformations', 'token')
def to_representation(self, instance):
ret = super(TransformationSerializer, self).to_representation(instance)
ret['type'] = 'transformation'
return ret
def get_related_transformations(self, obj):
return self._get_related_transformations(obj, self.context['request'])
|
WCCCEDU/twitter-commons | src/python/twitter/common/rpc/finagle/__init__.py | Python | apache-2.0 | 1,073 | 0 | # ==================================================================================================
# Copyright 2013 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl | icable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITH | OUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from twitter.common.rpc.finagle.protocol import (
TFinagleProtocol,
TFinagleProtocolWithClientId)
__all__ = [
'TFinagleProtocol',
'TFinagleProtocolWithClientId'
]
|
MMShreyas/AVL-Trees | avl.py | Python | gpl-3.0 | 4,346 | 0.015416 | import bst
def height(node):
if node is None:
return -1
else:
return node.height
def update_height(node):
node.height = max(height(node.left), height(node.right)) + 1
class AVL(bst.BST):
"""
AVL binary search tree implementation.
Supports insert, find, and delete-min operations in O(lg n) time.
"""
def left_rotate(self, x):
y = x.right
y.parent = x.parent
if y.parent is None:
self.root = y
else:
if y.parent.left is x:
y.parent.left = y
elif y.parent.right is x:
y.parent.right = y
x.right = y.left
if x.right is not None:
x.right.parent = x
y.left = x
x.parent = y
update_height(x)
update_height(y)
def right_rotate(self, x):
y = x.left
y.parent = x.parent
if y.parent is None:
self.root = y
else:
if y.parent.left is x:
y.parent.left = y
elif y.parent.right is x:
y.parent.right = y
x.left = y.right
if x.left is not None:
x.left.parent = x
y.right = x
x.parent = y
update_height(x)
update_height(y)
def insert(self, t):
"""Insert key t into this tree, modifying it in-place."""
print "Inserting",t,"\n"
node = bst.BST.insert(self, t)
self.rebalance(node)
print self
print("\n\n")
def rebalance(self, node):
while node is not None:
update_height(node)
if height(node.left) >= 2 + height(node.right):
| print "Rebalancing Left Subtree of",node.key
| if height(node.left.left) >= height(node.left.right):
print "Height of left Subtree",height(node.left.left),">=","Height of Right Subtree",height(node.left.right)
print "Applying Right Rotate at",node.key
self.right_rotate(node)
else:
print "Applying Left Rotate at",node.left.key
self.left_rotate(node.left)
print "Applying Right Rotate at",node.key
self.right_rotate(node)
elif height(node.right) >= 2 + height(node.left):
print "Rebalancing Right Subtree of",node.key
if height(node.right.right) >= height(node.right.left):
print "Height of left Subtree",height(node.right.left),"<=","Height of Right Subtree",height(node.right.right)
print "Applying left Rotate at",node.key
self.left_rotate(node)
else:
print "Applying Right Rotate at",node.right.key
self.right_rotate(node.right)
print "Applying Left Rotate at",node.key
self.left_rotate(node)
node = node.parent
def delete_min(self):
node, parent = bst.BST.delete_min(self)
self.rebalance(parent)
#raise NotImplemented('AVL.delete_min')
def delete(self, t):
node, parent = bst.BST.delete(self,t)
#self.rebalance(parent)
#node = self.root
#while node is not None:
#if t == node.key:
#print "Search Success"
#if(node.left == None and node.right == None):
#self.left = None
#self.right = None
#self.parent = None
#node.parent.left = None
#return
#if(node.parent.left):
#node.parent.left = node.right
#if(node.parent.right):
#node.parent.right = node.left
#return node
#elif t < node.key:
# node = node.left
#else:
# node = node.right
# node, parent = bst.BST.delete_min(self)
#self.rebalance(parent)
#def test(args=None):
# bst.test(args, BSTtype=AVL)
#if __name__ == '__main__': test()
t = AVL()
t.insert(1)
t.insert(6)
t.insert(9)
t.insert(30)
t.insert(156)
t.insert(20)
t.insert(5)
t.insert(19)
t.insert(18)
t.insert(154)
t.insert(158)
t.insert(157)
t.insert(125)
#t.delete(158)
t.insert(155)
t.insert(180)
t.delete(30)
print t
|
bittssystem/version3 | dashboard.py | Python | mit | 4,121 | 0.000485 | """
This file was generated with the customdashboard management command and
contains the class for the main dashboard.
To activate your index dashboard add the following to your settings.py::
GRAPPELLI_INDEX_DASHBOARD = 'version3.dashboard.CustomIndexDashboard'
"""
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from grappelli.dashboard import modules, Dashboard
from grappelli.dashboard.utils import get_admin_site_name
class CustomIndexDashboard(Dashboard):
"""
Custom index dashboard for www.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
# append a group for "Administration" & "Applications"
# self.children.app | end(modules.Group(
# _('Group: Administration & Applications'),
# column=1,
# collapsible=True,
# children = [
# modules.AppList(
# _('Administration'),
# column=1,
| # collapsible=False,
# models=('django.contrib.*',),
# ),
# modules.AppList(
# _('Applications'),
# column=1,
# css_classes=('collapse closed',),
# exclude=('django.contrib.*',),
# )
# ]
# ))
# append an app list module for "Applications"
# self.children.append(modules.AppList(
# _('AppList: Applications'),
# collapsible=True,
# column=1,
# css_classes=('collapse closed',),
# exclude=('django.contrib.*',),
# ))
# append an app list module for "Administration"
# self.children.append(modules.ModelList(
# _('ModelList: Administration'),
# column=1,
# collapsible=False,
# models=('django.contrib.*',),
# ))
# append another link list module for "support".
# self.children.append(modules.LinkList(
# _('Media Management'),
# column=2,
# children=[
# {
# 'title': _('FileBrowser'),
# 'url': '/admin/filebrowser/browse/',
# 'external': False,
# },
# ]
# ))
# append another link list module for "support".
# self.children.append(modules.LinkList(
# _('Support'),
# column=2,
# children=[
# {
# 'title': _('Django Documentation'),
# 'url': 'http://docs.djangoproject.com/',
# 'external': True,
# },
# {
# 'title': _('Grappelli Documentation'),
# 'url': 'http://packages.python.org/django-grappelli/',
# 'external': True,
# },
# {
# 'title': _('Grappelli Google-Code'),
# 'url': 'http://code.google.com/p/django-grappelli/',
# 'external': True,
# },
# ]
# ))
# append a feed module
# self.children.append(modules.Feed(
# _('Latest Django News'),
# column=2,
# feed_url='http://www.djangoproject.com/rss/weblog/',
# limit=5
# ))
# append a recent actions module
# self.children.append(modules.RecentActions(
# _('Recent Actions'),
# limit=5,
# collapsible=False,
# column=3,
# ))
self.children.append(modules.ModelList(
title='Office Files / Parties',
column=1,
models=('bittscmsapp.models.CoreInstruction',
'bittscmsapp.models.CoreParty',)
))
self.children.append(modules.ModelList(
title='Lookup Values',
collapsible=True,
column=2,
models=()
))
|
rtfd/readthedocs.org | readthedocs/oauth/migrations/0008_add-project-relation.py | Python | mit | 565 | 0.00177 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-22 20:10
import | django.db.models.deletion
from django.db import migra | tions, models
class Migration(migrations.Migration):
dependencies = [
('oauth', '0007_org_slug_nonunique'),
]
operations = [
migrations.AddField(
model_name='remoterepository',
name='project',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='remote_repository', to='projects.Project'),
),
]
|
oadam/proprio | bank_import/views.py | Python | mit | 9,434 | 0.000318 | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from forms import GenerateForm, SubmitForm
from django.http import HttpResponse, Http404, HttpResponseBadRequest
from django.utils.translation import ugettext as _, ugettext_lazy
from django.db import transaction
from main.models import Tenant, Payment
from from_settings import get_element
from whoosh.filedb.filestore import RamStorage
from whoosh.fields import TEXT, NUMERIC, Schema
from whoosh.query import Term, Or
from openpyxl import Workbook, load_workbook
from openpyxl.worksheet.datavalidation import DataValidation
from openpyxl.writer.excel import save_virtual_workbook
import json
import re
import datetime
import itertools
from collections import defaultdict
from models import ImportedLine
IMPORTER_SETTINGS = 'PROPRIO_IMPORT_PARSERS'
MIN_SCORE = 3.0
XLSX_CONTENT_TYPE = (
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
@login_required
def forms(request):
upload_form = GenerateForm()
submit_form = SubmitForm()
context = {'generate_form': upload_form, 'submit_form': submit_form}
return render(request, 'bank_import/upload.html', context)
@login_required
def generate(request):
if request.method != 'POST':
raise Http404("use POST")
form = GenerateForm(request.POST, request.FILES)
if not form.is_valid():
return HttpResponseBadRequest(form.errors.as_json())
data = form.cleaned_data
importer = get_element(IMPORTER_SETTINGS, data['type'])
parsed_file = importer.parse(data['file'])
parsed_file = remove_saved_lines(parsed_file)
mapping_worbook = generate_mapping_file(parsed_file)
today = datetime.datetime.today()
filename = today.strftime("mapping-%m-%d_%H:%M:%S.xlsx")
response = HttpResponse(
mapping_worbook,
content_type=XLSX_CONTENT_TYPE)
cd = 'attachment; filename="{}"'.format(filename)
response['Content-Disposition'] = cd
return response
@login_required
def submit(request):
if request.method != 'POST':
raise Http404("use POST")
form = SubmitForm(request.POST, request.FILES)
if not form.is_valid():
return HttpResponseBadRequest(form.errors.as_json())
| data = form.c | leaned_data
submit_mapping(data['file'])
return HttpResponse(_("import is successful").capitalize())
class TenantPaymentMapper:
type = 'tenant_payment'
caption = ugettext_lazy('Tenant payment')
def __init__(self):
tenants = Tenant.objects.all().order_by('name')
self.tenants = {t.id: t for t in tenants}
def get_all_values(self):
return self.tenants.keys()
def get_caption(self, value):
return self.tenants[value].name
def get_long_caption(self, value):
return u'{}: {}'.format(self.caption, self.get_caption(value))
def save(self, value, line):
tenant = self.tenants[value]
payment = Payment(
tenant=tenant,
date=line.date,
amount=line.amount,
description=line.caption)
payment.save()
class TenantNameGuesser:
def __init__(self):
tenants = Tenant.objects.all().order_by('name')
tenant_schema = Schema(name=TEXT(stored=True), id=NUMERIC(stored=True))
tenant_storage = RamStorage()
tenant_ix = tenant_storage.create_index(tenant_schema)
tenant_writer = tenant_ix.writer()
for t in tenants:
tenant_writer.add_document(id=t.id, name=t.name.lower())
tenant_writer.commit()
self.index = tenant_ix
def guess(self, line):
with self.index.searcher() as searcher:
words = re.split('\W+', line.caption)
query = Or([Term("name", t.lower()) for t in words])
result = searcher.search(query)
matches = [
(('tenant_payment', r['id'],), r.score,)
for r in result]
return matches
mapper_factories = (lambda: TenantPaymentMapper(),)
guesser_factories = (lambda: TenantNameGuesser(),)
def value_to_combo_entry(mapper, value):
id = json.dumps((mapper.type, value,))
caption = mapper.get_long_caption(value)
return (id, caption,)
def guess(guessers, mappers, line):
guesses_map = defaultdict(int)
for g in guessers:
guess = g.guess(line)
for value, score in guess:
guesses_map[value] += score
guesses = sorted(guesses_map.items(), key=lambda g: -g[1])
if not guesses:
return None
else:
(mapper_type, value), score = guesses[0]
if score < MIN_SCORE:
return None
else:
mapper = mappers[mapper_type]
return value_to_combo_entry(mapper, value)
def remove_saved_lines(lines):
saved = ImportedLine.objects.all()
all_ids = set([(l.date, l.amount, l.caption,) for l in saved])
return [l for l in lines
if (l.date, l.amount, l.caption,) not in all_ids]
def fill_all_mappings(worksheet, mappers):
hardcoded_choices = [
('', _('Decide later')),
('HIDE', _('Hide line definitively')),
]
mapper_choices = [
value_to_combo_entry(m, v)
for m in mappers
for v in m.get_all_values()]
current_row = 1
caption_occurences = defaultdict(int)
for id, caption in itertools.chain(hardcoded_choices, mapper_choices):
caption_occurences[caption] += 1
occurences = caption_occurences[caption]
if occurences > 1:
caption += '_{}'.format(occurences)
worksheet.cell(column=2, row=current_row).value = id
worksheet.cell(column=1, row=current_row).value = caption
current_row += 1
def parse_caption_to_id(all_mapping_worksheet):
result = {}
for row in range(1, all_mapping_worksheet.max_row + 1):
caption = all_mapping_worksheet.cell(row=row, column=1).value
if caption is None:
# skip empty rows (max_row is not very reliable)
continue
id = all_mapping_worksheet.cell(row=row, column=2).value
# for Decide later mapping
if id is None:
id = ''
result[caption] = id
return result
def get_mappers_and_guessers():
mappers = [m() for m in mapper_factories]
mappers_map = {m.type: m for m in mappers}
guessers = [g() for g in guesser_factories]
return (mappers_map, guessers,)
def generate_mapping_file(lines):
wb = Workbook()
main_sheet = wb.active
main_sheet.title = _('mapping')
mapping_sheet = wb.create_sheet()
mapping_sheet.title = _('possible_mappings')
mappers, guessers = get_mappers_and_guessers()
fill_all_mappings(mapping_sheet, mappers.values())
caption_to_id = parse_caption_to_id(mapping_sheet)
id_to_caption = dict(reversed(item) for item in caption_to_id.items())
wb.create_named_range('all_captions', mapping_sheet, 'A1:A1048576')
dv = DataValidation(
type="list",
formula1='all_captions',
allow_blank=True,
showDropDown=True)
dv.ranges.append('D1:D1048576')
main_sheet.add_data_validation(dv)
main_sheet['A1'] = _('date').capitalize()
main_sheet['B1'] = _('amount').capitalize()
main_sheet['C1'] = _('caption').capitalize()
main_sheet['D1'] = _('mapping').capitalize()
for i in range(len(lines)):
line = lines[i]
best_guess = guess(guessers, mappers, line)
if best_guess is None:
best_id = '' # decide later
else:
best_id, useless_caption = best_guess
rownum = i+2
main_sheet.cell(column=1, row=rownum).value = line.date
main_sheet.cell(column=2, row=rownum).value = line.amount
main_sheet.cell(column=3, row=rownum).value = line.caption
main_sheet.cell(column=4, row=rownum).value = id_to_caption[best_id]
return save_virtual_workbook(wb)
@transaction.atomic
def submit_lines(lines):
mapper_map, _guessers = get_mappers_and_guessers()
for line in lines:
mapping = line.mapping
# skip non-mapped lines
if mapping == '':
continue
# save the mapping |
rogerscristo/BotFWD | env/lib/python3.6/site-packages/telegram/inline/inlinequeryresultdocument.py | Python | mit | 4,538 | 0.002644 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This | program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the | Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram InlineQueryResultDocument"""
from telegram import InlineQueryResult
class InlineQueryResultDocument(InlineQueryResult):
"""
Represents a link to a file. By default, this file will be sent by the user with an optional
caption. Alternatively, you can use :attr:`input_message_content` to send a message with the
specified content instead of the file. Currently, only .PDF and .ZIP files can be sent
using this method.
Attributes:
type (:obj:`str`): 'document'.
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
title (:obj:`str`): Title for the result.
caption (:obj:`str`): Optional. Caption, 0-200 characters
document_url (:obj:`str`): A valid URL for the file.
mime_type (:obj:`str`): Mime type of the content of the file, either "application/pdf"
or "application/zip".
description (:obj:`str`): Optional. Short description of the result.
reply_markup (:class:`telegram.InlineKeyboardMarkup`): Optional. Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`): Optional. Content of the
message to be sent instead of the file.
thumb_url (:obj:`str`): Optional. URL of the thumbnail (jpeg only) for the file.
thumb_width (:obj:`int`): Optional. Thumbnail width.
thumb_height (:obj:`int`): Optional. Thumbnail height.
Args:
id (:obj:`str`): Unique identifier for this result, 1-64 bytes.
title (:obj:`str`): Title for the result.
caption (:obj:`str`, optional): Caption, 0-200 characters
document_url (:obj:`str`): A valid URL for the file.
mime_type (:obj:`str`): Mime type of the content of the file, either "application/pdf"
or "application/zip".
description (:obj:`str`, optional): Short description of the result.
reply_markup (:class:`telegram.InlineKeyboardMarkup`): Optional. Inline keyboard attached
to the message.
input_message_content (:class:`telegram.InputMessageContent`): Optional. Content of the
message to be sent instead of the file.
thumb_url (:obj:`str`, optional): URL of the thumbnail (jpeg only) for the file.
thumb_width (:obj:`int`, optional): Thumbnail width.
thumb_height (:obj:`int`, optional): Thumbnail height.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self,
id,
document_url,
title,
mime_type,
caption=None,
description=None,
reply_markup=None,
input_message_content=None,
thumb_url=None,
thumb_width=None,
thumb_height=None,
**kwargs):
# Required
super(InlineQueryResultDocument, self).__init__('document', id)
self.document_url = document_url
self.title = title
self.mime_type = mime_type
# Optionals
if caption:
self.caption = caption
if description:
self.description = description
if reply_markup:
self.reply_markup = reply_markup
if input_message_content:
self.input_message_content = input_message_content
if thumb_url:
self.thumb_url = thumb_url
if thumb_width:
self.thumb_width = thumb_width
if thumb_height:
self.thumb_height = thumb_height
|
slyfocks/pigs | shortcodes.py | Python | apache-2.0 | 1,664 | 0.001803 | from itertools import product, islice
from joblib import Parallel, delayed
import json
import requests
from requests.exceptions import ConnectionError, Timeout
from simplejson import JSONDecodeError
import string
CLIENT_ID = 'a48b4d0f6e0745e2ba68902a1f68f8d5'
URL = 'https://api.instagram.com/oembed/media/?url=http://instagram.com/p/{sc}'
def generate(length=4, start=3500000, end=4000000):
return islice((''.join(entry) for entry in product(string.ascii_letters + string.digits, repeat=length)),
start, end)
def _validate(shortcode, verbose=True):
url = URL.format(sc=shortcode)
try:
data = requests.get(url, timeout=60).json()
except (IOError, TypeError, JSONDecodeError, Timeout, ConnectionError):
return
try:
url, user, title = data['url'], data['author_name'], data['title']
except KeyError:
return
print(user)
return shortcode, dict(url=url, user=user, title=title)
def validate(threads=500, verbose=True):
if verbose:
v_int = 100
else:
v_int = 0
try:
with open('codes.json') as file:
prev_data = json.load(file)
except FileNotFoundError:
prev_data = dict()
data = Parallel(n_jobs=threads, verbose=v_int)(delayed(_validate)(shortcode=shor | tcode, verbose=verbose)
for shortco | de in generate())
data = [entry for entry in data if entry] + list(prev_data.items())
with open('codes.json', 'w') as writefile:
json.dump(dict(data), writefile, indent=4, sort_keys=True, separators=(',', ': '))
if __name__ == "__main__":
validate()
|
chengjunjian/kpush | backend/kpush/share/extensions.py | Python | mit | 176 | 0 | # -*- coding: utf-8 -*-
"""
所有的插 | 件,只是生成,初始化统一放到applicat | ion里
"""
from flask_admin import Admin
admin = Admin(template_mode='bootstrap3')
|
phenoxim/nova | nova/tests/unit/cmd/test_baseproxy.py | Python | apache-2.0 | 3,676 | 0.000272 | # Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fixtures
import mock
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from six.moves import StringIO
from nova.cmd import baseproxy
from nova import config
from nova.console import websocketproxy
from nova import test
from nova import version
@mock.patch.object(config, 'parse_args', new=lambda *args, **kwargs: None)
class BaseProxyTestCase(test.NoDBTestCase):
def setUp(self):
super(BaseProxyTestCase, self).setUp()
self.stderr = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stderr', self.stderr))
@mock.patch('os.path.exists', return_value=False)
# NOTE(mriedem): sys.exit raises TestingException so we can actually exit
# the test normally.
@mock.patch('sys.exit', side_effect=test.TestingException)
def test_proxy_ssl_without_cert(self, mock_exit, mock_exists):
self.flags(ssl_only=True)
self.assertRaises(test.TestingException, baseproxy.proxy,
'0.0.0.0', '6080')
mock_exit.assert_called_once_with(-1)
self.assertEqual(self.stderr.getvalue(),
"SSL only and self.pem not found\n")
@mock.patch('os.path.exists', return_value=False)
@mock.patch('sys.exit', side_effect=test.TestingException)
def test_proxy_web_dir_does_not_exist(self, mock_exit, mock_exists):
self.flags(web='/my/fake/webserver/')
self.assertRaises(test.TestingException, baseproxy.proxy,
'0.0.0.0', '6080')
mock_exit.assert_called_once_with(-1)
@mock.patch('os.path.exists', return_value=True)
@mock.patch.object(logging, 'setup')
@mock.patch.object(gmr.TextGuruMeditation, 'setup_autorun')
@mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.__init__',
r | eturn_value=None)
@mock.patch('nova.console.websocketproxy.NovaWebSocketProxy.start_server')
def test_proxy(self, mock_start, mock_init, mock_gmr, mock_log,
m | ock_exists):
baseproxy.proxy('0.0.0.0', '6080')
mock_log.assert_called_once_with(baseproxy.CONF, 'nova')
mock_gmr.mock_assert_called_once_with(version)
mock_init.assert_called_once_with(
listen_host='0.0.0.0', listen_port='6080', source_is_ipv6=False,
cert='self.pem', key=None, ssl_only=False,
daemon=False, record=None, security_proxy=None, traffic=True,
web='/usr/share/spice-html5', file_only=True,
RequestHandlerClass=websocketproxy.NovaProxyRequestHandler)
mock_start.assert_called_once_with()
@mock.patch('os.path.exists', return_value=False)
@mock.patch('sys.exit', side_effect=test.TestingException)
def test_proxy_exit_with_error(self, mock_exit, mock_exists):
self.flags(ssl_only=True)
self.assertRaises(test.TestingException, baseproxy.proxy,
'0.0.0.0', '6080')
self.assertEqual(self.stderr.getvalue(),
"SSL only and self.pem not found\n")
mock_exit.assert_called_once_with(-1)
|
timothyparez/PyBitmessage | src/bitmessageqt/retranslateui.py | Python | mit | 900 | 0.005556 | from os import path
from PyQt4 import QtGui
from debug import logger
import widgets
class RetranslateMixin(object):
def retranslateUi(self):
defaults = QtGui.QWidget()
widgets.load(self.__class__.__name__.lower() + '.ui', defaults)
for attr, value in defaults.__dict__.iteritems():
setTextMethod = getattr(value, "setText", None)
if c | allable(setTextMethod):
getattr(self, attr).setText(getattr(defaults, | attr).text())
elif isinstance(value, QtGui.QTableWidget):
for i in range (value.columnCount()):
getattr(self, attr).horizontalHeaderItem(i).setText(getattr(defaults, attr).horizontalHeaderItem(i).text())
for i in range (value.rowCount()):
getattr(self, attr).verticalHeaderItem(i).setText(getattr(defaults, attr).verticalHeaderItem(i).text())
|
ati-ozgur/KDD99ReviewArticle | HelperCodes/create_table_metaDatasetsUsed.py | Python | mit | 1,724 | 0.011021 |
import ReviewHelper
import pandas as pd
df = ReviewHelper.get_pandas_data_frame_created_from_bibtex_file()
# find problematic ones
df[df.metaDatasetsUsed.isnull()]
list1 = df.metaDatasetsUsed.str.split(",").tolist()
df1 = pd.DataFrame(list1)
for i in range(df1.columns.size):
df1[i] = df1[i].str.strip()
stacked = df1.stack()
stacked_value_counts = stacked.value_counts()
greater_than = stacked_value_counts[stacked_value_count | s > 3]
table_content_inside=""
list_ids_dataset_names = ["KDD99","NSL-KDD","DARPA","Kyoto","ISCX"]
table_content_inside=""
for dataset_name in greater_than.index:
dataset_count = greater_than[dataset_name]
dataset_name_in_table = dataset_name
dataset_name_in_table = dataset_name
if(dataset_name in list_ids_dataset_names):
dataset_name_in_table = "\\rowcolor{Gray}\n" + dataset_name + "* "
line = "{dataset_name} | & {dataset_count} \\\\ \n".format(
dataset_name = dataset_name_in_table
,dataset_count = dataset_count
)
table_content_inside = table_content_inside + line
table_content_start = """
\\begin{table}[!ht]
\\centering
\\caption{ \\textbf{Most used Datasets}. * denotes IDS datasets. Datasets that are used less than three is not included.}
\\label{table-metaDatasetsUsed}
\\begin{tabular}{ll}
\\toprule
\\textbf{Dataset Name } & \\textbf{Article Count} \\\\
\\midrule
"""
table_content_end = """
\\bottomrule
\\end{tabular}
\\end{table}
"""
table_content_full = table_content_start + table_content_inside + table_content_end
#print table_content_full
filename = "../latex/table-metaDatasetsUsed.tex"
target = open(filename, 'w')
target.write(table_content_full)
target.close()
|
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/keras/api/keras/applications/xception/__init__.py | Python | bsd-2-clause | 1,148 | 0.002613 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwa | re
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| # ==============================================================================
"""Xception Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras.applications.xception import decode_predictions
from tensorflow.contrib.keras.python.keras.applications.xception import preprocess_input
from tensorflow.contrib.keras.python.keras.applications.xception import Xception
del absolute_import
del division
del print_function
|
srkiyengar/NewGripper | src/shutter.py | Python | mit | 509 | 0.011788 | impo | rt serial
import struct
import time
# j = 2 means open, j = 1 means close shutter
def command_shutter(port, j):
# first, start the serial port to communicate with the arduino
if port.isOpen():
print "port open"
port.write(struct.pack('>B', j))
| return 1
else:
return 0
#while(1 == 1):
#cover_or_not = int(input('Enter a number. 1 will cover the Lenses of the NDI, while 2 will open the blinds.'))
#data.write(struct.pack('>B',cover_or_not)) |
ArcherSys/ArcherSys | skulpt/test/run/t54.py | Python | mit | 21 | 0.047619 | a,b = "OK"
print a+ | b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.