repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
RobertoPrevato/Humbular
|
tools/knight/core/literature/scribe.py
|
Python
|
mit
| 1,844
| 0.003254
|
import io
import sys
isPython3 = sys.version_info >= (3, 0)
class Scribe:
@staticmethod
def read(path):
with io.open(path, mode="rt", encoding="utf-8") as f:
s = f.read()
# go to beginning
f.seek(0)
return s
@staticmethod
def read_beginning(path, lines):
with io.open(path, mode="rt", encoding="utf-8") as f:
s = f.read(lines)
# go to beginning
f.seek(0)
return s
@staticmethod
def read_lines(path):
with io.open(path, mode="rt", encoding="utf-8") as f:
content = f.readlines()
return content
@staticmethod
def write(contents, path):
if isPython3:
with open(path, mode="wt", encoding="utf-8") as f:
|
# truncate previous contents
f.truncate()
f.write(contents)
else:
with io.open(path, mode="wt", encoding="utf-8") as f:
# truncate previous contents
f.truncate()
f.write(contents.decode("utf8"))
@staticmethod
def write_lines(lines, path):
if isP
|
ython3:
with open(path, mode="wt", encoding="utf-8") as f:
f.writelines([l + "\n" for l in lines])
else:
with io.open(path, mode="wt") as f:
for line in lines:
f.writelines(line.decode("utf8") + "\n")
@staticmethod
def add_content(contents, path):
if isPython3:
with open(path, mode="a", encoding="utf-8") as f:
f.writelines(contents)
else:
with io.open(path, mode="a") as f:
f.writelines(contents.decode("utf8"))
|
batxes/4Cin
|
SHH_WT_models_highres/SHH_WT_models_highres_final_output_0.1_-0.1_5000/SHH_WT_models_highres8869.py
|
Python
|
gpl-3.0
| 88,246
| 0.024511
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((21.3344, 1612.28, 1073.33), (0.7, 0.7, 0.7), 182.271)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((-370.128, 1424.7, 1039.57), (0.7, 0.7, 0.7), 258.199)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((-9.86769, 1430.49, 1197.54), (0.7, 0.7, 0.7), 123.897)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((-335.643, 1219.94, 1339.47), (0.7, 0.7, 0.7), 146.739)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((-659.866, 1028.66, 1601.7), (0.7, 0.7, 0.7), 179.098)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((-224.883, 1361.96, 1780.46), (0.7, 0.7, 0.7), 148.854)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((159.739, 1600.77, 2012.27), (0.7, 0.7, 0.7), 196.357)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((-67.3813, 1146.81, 2144.53), (0.7, 0.7, 0.7), 166.873)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((-295.063, 708.196, 2372.36), (0.7, 0.7, 0.7), 95.4711)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((-63.4141, 1048.45, 2427.44), (0.7, 0.7, 0.7), 185.401)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((107.516, 1486.57, 2316.03), (0.7, 0.7, 0.7), 151.984)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((345.072, 1994.19, 2101.19), (0.7, 0.7, 0.7), 185.612)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((173.245, 2366.66, 2036.51), (0.7, 0.7, 0.7), 210.273)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((-132.675, 2484.43, 2064.09), (0.7, 0.7, 0.7), 106.892)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((-372.068, 2864.75, 2099.2), (0.7, 0.7, 0.7), 202.025)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((-625.119, 3336.83, 1863.01), (0.7, 0.7, 0.7), 192.169)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((-801.778, 3816.38, 1491.49), (0.7, 0.7, 0.7), 241.11)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marke
|
r((-698.587, 4126.53, 1126.53), (0.7, 0.7, 0.7), 128.465)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((-753.257, 4285.81, 653.058), (0.7, 0.7, 0.7), 217.38)
if "particle_19 geomet
|
ry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((-1168.05, 4431.5, 116.214), (0.7, 0.7, 0.7), 184.555)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((-630.978, 4254.47, 410.959), (0.7, 0.7, 0.7), 140.055)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((-459.569, 4367.7, 797.015), (0.7, 0.7, 0.7), 169.708)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((-412.194, 4684.06, 1073.17), (0.7, 0.7, 0.7), 184.639)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((-90.1353, 4740.74, 1036.61), (0.7, 0.7, 0.7), 119.286)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((25.4796, 4730.36, 747.398), (0.7, 0.7, 0.7), 147.754)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((-61.715, 4453.87, 593.142), (0.7, 0.7, 0.7), 171.4)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((7.71647, 4193.41, 925.277), (0.7, 0.7, 0.7), 156.341)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((350.69, 3728.63, 1043.25), (0.7, 0.7, 0.7), 186.501)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((711.286, 3347.48, 1184.17), (0.7, 0.7, 0.7), 308.325)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((735.997, 3030.5, 1495.42), (0.7, 0.7, 0.7), 138.617)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((823.424, 2951.07, 1777.37), (0.7, 0.7, 0.7), 130.03)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((710.071, 3219.23, 1628.56), (0.7, 0.7, 0.7), 156.552)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_s
|
dios-game/dios-cocos
|
src/oslibs/cocos/cocos-src/tools/jenkins-scripts/configs/cocos-2dx-pull-request-build-comment-trigger.py
|
Python
|
mit
| 201
| 0.014925
|
import os
#os.system('git check
|
out develop')
#os.system('git pull or
|
igin develop')
ret = os.system('python -u tools/jenkins-scripts/job-comment-trigger.py')
if ret == 0:
exit(0)
else:
exit(1)
|
kalebhartje/schoolboost
|
cms/djangoapps/course_creators/tests/test_admin.py
|
Python
|
agpl-3.0
| 2,658
| 0.002634
|
"""
Tests course_creators.admin.py.
"""
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.admin.sites
|
import AdminSite
from django.http import HttpRequest
import mock
from course_creators.admin import CourseCreatorAdmin
from course_creators.models import CourseCreator
from auth.authz import is_user_in_creator_group
class CourseCreatorAdminTest(Test
|
Case):
"""
Tests for course creator admin.
"""
def setUp(self):
""" Test case setup """
self.user = User.objects.create_user('test_user', 'test_user+courses@edx.org', 'foo')
self.table_entry = CourseCreator(user=self.user)
self.table_entry.save()
self.admin = User.objects.create_user('Mark', 'admin+courses@edx.org', 'foo')
self.admin.is_staff = True
self.request = HttpRequest()
self.request.user = self.admin
self.creator_admin = CourseCreatorAdmin(self.table_entry, AdminSite())
def test_change_status(self):
"""
Tests that updates to state impact the creator group maintained in authz.py.
"""
def change_state(state, is_creator):
""" Helper method for changing state """
self.table_entry.state = state
self.creator_admin.save_model(self.request, self.table_entry, None, True)
self.assertEqual(is_creator, is_user_in_creator_group(self.user))
with mock.patch.dict('django.conf.settings.MITX_FEATURES', {"ENABLE_CREATOR_GROUP": True}):
# User is initially unrequested.
self.assertFalse(is_user_in_creator_group(self.user))
change_state(CourseCreator.GRANTED, True)
change_state(CourseCreator.DENIED, False)
change_state(CourseCreator.GRANTED, True)
change_state(CourseCreator.PENDING, False)
change_state(CourseCreator.GRANTED, True)
change_state(CourseCreator.UNREQUESTED, False)
def test_add_permission(self):
"""
Tests that staff cannot add entries
"""
self.assertFalse(self.creator_admin.has_add_permission(self.request))
def test_delete_permission(self):
"""
Tests that staff cannot delete entries
"""
self.assertFalse(self.creator_admin.has_delete_permission(self.request))
def test_change_permission(self):
"""
Tests that only staff can change entries
"""
self.assertTrue(self.creator_admin.has_change_permission(self.request))
self.request.user = self.user
self.assertFalse(self.creator_admin.has_change_permission(self.request))
|
go1dshtein/pgv
|
setup.py
|
Python
|
gpl-2.0
| 968
| 0
|
#!/usr/bin/env python
from setuptools import setup
setup(name='pgv',
version='0.0.2',
description="PostgreSQL schema versioning tool",
long_description=open("README.rst").read(),
author='Kirill Goldshtein',
author_email='goldshtein.kirill@gmail.com',
packages=['pgv', 'pgv.utils', 'pgv.vcs_provider'],
package_dir={'pgv': 'pgv'},
package_data={'pgv': ['data/init.sql']},
install_requires=['GitPython >= 0.3.1', 'psycopg2', "PyYAML"],
test_suite='tests',
scripts=['bin/pgv'],
l
|
icense='GPLv2',
url='https://github.com/go1dshtein/pgv',
classifiers=['Intended Audience :: Developers',
|
'Environment :: Console',
'Programming Language :: Python :: 2.7',
'Natural Language :: English',
'Development Status :: 1 - Planning',
'Operating System :: Unix',
'Topic :: Utilities'])
|
kdunn926/eunomia-django
|
Financers/views.py
|
Python
|
apache-2.0
| 856
| 0.025701
|
# Create your views here.
from django.shortcuts import render, redirect
from django.views import generic
|
from Financers.models import Financers
import re
import random
def financer_detail(request, name):
template_name = 'financer_detail.html'
financer = ''
if name != "Unknown":
name = name.replace(',', '')
financer = Financers().getFinancersContributions(name)
profile = {}
print financer
profile['name'] = name
context = {'profile': profile}
return render(request, template_name, context)
def finaner_contributions(request,
|
name):
template_name = 'financer_contributions.html'
print name
candidates = Financers().getFinancersContributions(name)
print candidates
profile = {}
profile['name'] = name
#profile['financers_list'] = financers_list
context = {'profile': profile}
return render(request, template_name, context)
|
rafamanzo/colab
|
colab/plugins/management/commands/import_proxy_data.py
|
Python
|
gpl-2.0
| 1,015
| 0
|
#!/usr/bin/env python
import importlib
impo
|
rt inspect
from django.core.management.base import BaseCommand
from django.conf import settings
from colab.plugins.utils.proxy_data_api import ProxyDataAPI
class Command(BaseCommand):
help = "Import proxy data into colab database"
def handle(self, *args, **kwargs):
print "Executing extraction command..."
for module_name in settings.PROXIED_APPS.keys():
module_path = \
'colab.plugins
|
.{}.data_api'.format(module_name.split('.')[-1])
module = importlib.import_module(module_path)
for module_item_name in dir(module):
module_item = getattr(module, module_item_name)
if not inspect.isclass(module_item):
continue
if issubclass(module_item, ProxyDataAPI):
if module_item != ProxyDataAPI:
api = module_item()
api.fetch_data()
break
|
bonnieblueag/farm_log
|
livestock/views.py
|
Python
|
gpl-3.0
| 571
| 0.005254
|
from django.shortcuts import render
from core.views import get_add_model_form
from livestock.models import EggCollection, AnimalReport
from livestock.forms import EggCollectionForm, AnimalReportF
|
orm
ADD_LIVESTOCK_TEMPLATE = 'livestock/add_livestock_model.html'
def add_egg_collection(request):
return get_add_model_form(request, ADD_LIVESTOCK_TEMPLATE, EggCollection, 'Eggs', 'datetime', EggCollectionForm)
def add_animal_report(request):
|
return get_add_model_form(request, ADD_LIVESTOCK_TEMPLATE, AnimalReport, 'Animal Report', 'datetime', AnimalReportForm)
|
frePPLe/frePPLe
|
djangosettings.py
|
Python
|
agpl-3.0
| 19,163
| 0.001148
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2015 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
r"""
Main Django configuration file.
"""
import os
import sys
from django.utils.translation import gettext_lazy as _
try:
DEBUG = "runserver" in sys.argv
except Exception:
DEBUG = False
DEBUG_JS = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
# ================= START UPDATED BLOCK BY WINDOWS INSTALLER =================
# Make this unique, and don't share it with anybody.
SECRET_KEY = "%@mzit!i8b*$zc&6oev96=RANDOMSTRING"
# FrePPLe only supports the postgresql database.
# Create additional entries in this dictionary to define scenario schemas.
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
# Database name
"NAME": "frepple",
# Role name when using md5 authentication.
# Leave as an empty string when using peer or
# ident authencation.
"USER": "frepple",
# Role password when using md5 authentication.
# Leave as an empty string when using peer or
# ident authencation.
"PASSWORD": "frepple",
# When using TCP sockets specify the hostname,
# the ip4 address or the ip6 address here.
# Leave as an empty string to use Unix domain
# socket ("local" lines in pg_hba.conf).
"HOST": "",
# Specify the port number when using a TCP socket.
"PORT": "",
"OPTIONS": {},
"CONN_MAX_AGE": 60,
"TEST": {
"NAME": "test_frepple" # Database name used when running the test suite.
},
"FILEUPLOADFOLDER": os.path.normpath(
os.path.join(FREPPLE_LOGDIR, "data", "default")
),
# Role name for executing custom reports and processing sql data files.
# Make sure this role has properly restricted permissions!
# When left unspecified, SQL statements run with the full read-write
# permissions of the user specified above. Which can be handy, but is not secure.
"SQL_ROLE": "report_role",
"SECRET_WEBTOKEN_KEY": SECRET_KEY,
},
"scenario1": {
"ENGINE": "django.db.backends.postgresql",
# Database name
"NAME": "scenario1",
# Role name when using md5 authentication.
# Leave as an empty string when using peer or
# ident authencation.
"USER": "frepple",
# Role password when using md5 authentication.
# Leave as an empty string when using peer or
# ident authencation.
"PASSWORD": "frepple",
# When using TCP sockets specify the hostname,
# the ip4 address or the ip6 address here.
# Leave as an empty string to use Unix domain
# socket ("local" lines in pg_hba.conf).
"HOST": "",
# Specify the port number when using a TCP socket.
"PORT": "",
"OPTIONS": {},
"CONN_MAX_AGE": 60,
"TEST": {
"NAME": "test_scenario1" # Database name used when running the test suite.
},
"FILEUPLOADFOLDER": os.path.normpath(
os.path.join(FREPPLE_LOGDIR, "data", "scenario1")
),
# Role name for executing custom reports and processing sql data files.
# Make sure this role has properly restricted permissions!
# When left unspecified, SQL statements run with the full read-write
# permissions of the user specified above. Which can be handy, but is not secure.
"SQL_ROLE": "report_role",
"SECRET_WEBTOKEN_KEY": SECRET_KEY,
},
"scenario2": {
"ENGINE": "django.db.backends.postgresql",
# Database name
"NAME": "scenario2",
# Role name when using md5 authentication.
# Leave as an empty string when using peer or
# ident authencation.
"USER": "frepple",
# Role password when using md5 authentication.
# Leave as an empty string when using peer or
# ident authencation.
"PASSWORD": "frepple",
# When using TCP sockets specify the hostname,
# the ip4 address or the ip6 address here.
# Leave as an empty string to use Unix domain
# socket ("local" lines in pg_hba.conf).
"HOST": "",
# Specify the port number when using a TCP socket.
"PORT": "",
"OPTIONS": {},
"CONN_MAX_AGE": 60,
"TEST": {
"NAME": "test_scenario2" # Database name used when running the test suite.
},
"FILEUPLOADFOLDER": os.path.normpath(
os.path.join(FREPPLE_LOGDIR, "data", "scenario2")
),
# Role name for executing custom reports and processing sql data files.
# Make sure this role has properly restricted permissions!
# When left unspecified, SQL statements run with the full read-write
# permissions of the user specified above. Which can be handy, but is not secure.
"SQL_ROLE": "report_role",
"SECRET_WEBTOKEN_KEY": SECRET_KEY,
},
"scenario3": {
"ENGINE": "django.db.backends.postgresql",
# Database name
"NAME": "scenario3",
# Role name when using md5 authentication.
# Leave as an empty string when using peer or
# ident authencation.
"USER": "frepple",
# Role password when using md5 authentication.
# Leave as an empty string when using peer or
# ident authencation.
"PASSWORD": "frepple",
# When using TCP sockets specify the hostname,
# the ip4 address or the ip6 address here.
# Leave as an empty string to use Unix domain
# socket ("local" lines in pg_hba.conf).
"HOST": "",
# Specify the port number when using a TCP socket.
"PORT": "",
"OPTIONS": {},
"CONN_MAX_AGE": 60,
"TEST": {
"NAME": "test_scenario3" # Database name used when running the test suite.
},
"FILEUPLOADFOLDER": os.path.normpath(
os.path.join(FREPPLE_LOGDIR, "data", "scenario3")
),
# Role name for executing custom reports and processing sql data files.
# Make sure this role has properly restricted permissions!
# When left unspecified, SQL statements run with the full read-write
# permissions of the user specified above. Which can be handy, but is not secure.
"SQL_ROLE": "report_role",
"SECRET_WEBTOKEN_KEY": SECRET_KEY,
},
}
LANGUAGE_CODE = "en"
# Google analytics code to report usage statistics to.
# The value None disables this feature.
GOOGLE_ANALYTICS = None # "UA-1950616-4"
# ================= END UPDATED BLOCK BY WINDOWS INSTALLER =================
# A list of user names thatcan generate database dumps and download them.
# Since a database
|
dump exposes all data, enabling this functionality should only be done
# for system administrators that know what they are doing.
SUPPORT_USERS = []
# If passwords are set in this file they will be used instead of the ones set in the database parameters table
ODOO_PASSWORDS = {"default": "", "scenario1": "", "scenario2": "", "scenario3": ""}
# Local time zone for this i
|
nstallation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zon
|
landism/pants
|
src/python/pants/backend/jvm/tasks/nailgun_task.py
|
Python
|
apache-2.0
| 4,969
| 0.00644
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.base.exceptions import TaskError
from pants.java import util
from pants.java.executor import SubprocessExecutor
from pants.java.jar.jar_dependency import JarDependency
from pants.java.nailgun_executor import NailgunExecutor, NailgunProcessGroup
from pants.pantsd.subsystem.subprocess import Subprocess
from pants.task.task import Task, TaskBase
class NailgunTaskBase(JvmToolTaskMixin, TaskBase):
ID_PREFIX = 'ng'
@classmethod
def register_options(cls, register):
super(NailgunTaskBase, cls).register_options(register)
register('--use-nailgun', type=bool, default=True,
help='Use nailgun to make repeated invocations of this task quicker.')
register('--nailgun-timeout-seconds', advanced=True, default=10, type=float,
help='Timeout (secs) for nailgun startup.')
register('--nailgun-connect-attempts', advanced=True, default=5, type=int,
help='Max attempts for nailgun connects.')
cls.register_jvm_tool(register,
'nailgun-server',
classpath=[
JarDependency(org='com.martiansoftware',
name='nailgun-server',
rev='
|
0.9.1'),
])
@classmethod
def subsystem_dependencies(cls):
return super(NailgunTaskBase, cls).subsystem_dependencies() + (Subprocess
|
.Factory,)
def __init__(self, *args, **kwargs):
"""
:API: public
"""
super(NailgunTaskBase, self).__init__(*args, **kwargs)
id_tuple = (self.ID_PREFIX, self.__class__.__name__)
self._identity = '_'.join(id_tuple)
self._executor_workdir = os.path.join(self.context.options.for_global_scope().pants_workdir,
*id_tuple)
def create_java_executor(self):
"""Create java executor that uses this task's ng daemon, if allowed.
Call only in execute() or later. TODO: Enforce this.
"""
if self.get_options().use_nailgun:
classpath = os.pathsep.join(self.tool_classpath('nailgun-server'))
return NailgunExecutor(self._identity,
self._executor_workdir,
classpath,
self.dist,
connect_timeout=self.get_options().nailgun_timeout_seconds,
connect_attempts=self.get_options().nailgun_connect_attempts)
else:
return SubprocessExecutor(self.dist)
def runjava(self, classpath, main, jvm_options=None, args=None, workunit_name=None,
workunit_labels=None, workunit_log_config=None):
"""Runs the java main using the given classpath and args.
If --no-use-nailgun is specified then the java main is run in a freshly spawned subprocess,
otherwise a persistent nailgun server dedicated to this Task subclass is used to speed up
amortized run times.
:API: public
"""
executor = self.create_java_executor()
# Creating synthetic jar to work around system arg length limit is not necessary
# when `NailgunExecutor` is used because args are passed through socket, therefore turning off
# creating synthetic jar if nailgun is used.
create_synthetic_jar = not self.get_options().use_nailgun
try:
return util.execute_java(classpath=classpath,
main=main,
jvm_options=jvm_options,
args=args,
executor=executor,
workunit_factory=self.context.new_workunit,
workunit_name=workunit_name,
workunit_labels=workunit_labels,
workunit_log_config=workunit_log_config,
create_synthetic_jar=create_synthetic_jar,
synthetic_jar_dir=self._executor_workdir)
except executor.Error as e:
raise TaskError(e)
# TODO(John Sirois): This just prevents ripple - maybe inline
class NailgunTask(NailgunTaskBase, Task):
"""
:API: public
"""
pass
class NailgunKillall(Task):
"""Kill running nailgun servers."""
@classmethod
def register_options(cls, register):
super(NailgunKillall, cls).register_options(register)
register('--everywhere', type=bool,
help='Kill all nailguns servers launched by pants for all workspaces on the system.')
def execute(self):
NailgunProcessGroup().killall(everywhere=self.get_options().everywhere)
|
fossilet/project-euler
|
pe1.py
|
Python
|
mit
| 1,110
| 0.023423
|
#! /usr/bin/env python3
# encoding: utf-8
'''
http://projecteuler.net/problem=1
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
'''
# Since May 22 2012
from projecteuler import calctime
n, a, b = 100000, 3, 5
# v1
def v1():
summ = 0
for i in range(n):
if i%a == 0 or i%b == 0:
summ += i
return summ
|
# v2
# Generator expression faster than for loop
def v2():
return sum( i for i in range(n) if i%a == 0 or i%b == 0 )
# v3
# Almost as fast as v2
def v3():
return sum( i for i in range(n) if not i%a or not i%b )
# v4
# Almost as fast as v
|
2
def v4():
return sum( i for i in range(n) if not (i%a and i%b) )
# v5
# Time is O(1), the fastest
def v5():
n = 999
return sum((n//k*k+k)*(n//k)/2*v for k,v in {a:1, b:1, a*b:-1}.items())
if __name__ == '__main__':
for i in range(1, 6):
fname = 'v%d' % i
print(locals()[fname]())
calctime('%s()'% fname, 'from __main__ import %s' % fname, 50)
|
swoodford/twitter
|
status-tweet.py
|
Python
|
apache-2.0
| 663
| 0.001508
|
#!/usr/bin/env python
# This script will tweet the text that is passed as an argument
# Requires Twython, API credentials set as env vars
# Usage: python status-tweet.py "Hello Everyone, this is my
|
Raspberry Pi tweeting you more nonsense"
import sys
import os
from twython import Twython
import twitter_api_creds
# Set Twitter Credentials from environment variables
CONSUMER_KEY = os.getenv("CONSUMER_KEY")
CONSUMER_SECRET = os.getenv("CONSUMER_SECRET")
A
|
CCESS_KEY = os.getenv("ACCESS_KEY")
ACCESS_SECRET = os.getenv("ACCESS_SECRET")
api = Twython(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)
# Tweet
api.update_status(status=sys.argv[1][:140])
|
mohamed-ali-affes/oojuba
|
src/blog/models.py
|
Python
|
mit
| 476
| 0
|
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.Forei
|
gnKey('auth.User')
title = models.CharField(m
|
ax_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
ali/mopidy
|
mopidy/mpd/protocol/playback.py
|
Python
|
apache-2.0
| 13,957
| 0
|
from __future__ import absolute_import, unicode_literals
from mopidy.core import PlaybackState
from mopidy.internal import deprecation
from mopidy.mpd import exceptions, protocol
@protocol.commands.add('consume', state=protocol.BOOL)
def consume(context, state):
"""
*musicpd.org, playback section:*
``consume {STATE}``
Sets consume state to ``STATE``, ``STATE`` should be 0 or
1. When consume is activated, each song played is removed from
playlist.
"""
context.core.tracklist.set_consume(state)
@protocol.commands.add('crossfade', seconds=protocol.UINT)
def crossfade(context, seconds):
"""
*musicpd.org, playback section:*
``crossfade {SECONDS}``
Sets crossfading between songs.
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('mixrampdb')
def mixrampdb(context, d
|
ecibels):
"""
*musicpd.org, playback section:*
``mixrampdb {deciBels}``
Sets the threshold at which songs will be overlapped. Like crossfa
|
ding but
doesn't fade the track volume, just overlaps. The songs need to have
MixRamp tags added by an external tool. 0dB is the normalized maximum
volume so use negative values, I prefer -17dB. In the absence of mixramp
tags crossfading will be used. See http://sourceforge.net/projects/mixramp
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('mixrampdelay', seconds=protocol.UINT)
def mixrampdelay(context, seconds):
"""
*musicpd.org, playback section:*
``mixrampdelay {SECONDS}``
Additional time subtracted from the overlap calculated by mixrampdb. A
value of "nan" disables MixRamp overlapping and falls back to
crossfading.
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('next')
def next_(context):
"""
*musicpd.org, playback section:*
``next``
Plays next song in the playlist.
*MPD's behaviour when affected by repeat/random/single/consume:*
Given a playlist of three tracks numbered 1, 2, 3, and a currently
playing track ``c``. ``next_track`` is defined at the track that
will be played upon calls to ``next``.
Tests performed on MPD 0.15.4-1ubuntu3.
====== ====== ====== ======= ===== ===== ===== =====
Inputs next_track
------------------------------- ------------------- -----
repeat random single consume c = 1 c = 2 c = 3 Notes
====== ====== ====== ======= ===== ===== ===== =====
T T T T 2 3 EOPL
T T T . Rand Rand Rand [1]
T T . T Rand Rand Rand [4]
T T . . Rand Rand Rand [4]
T . T T 2 3 EOPL
T . T . 2 3 1
T . . T 3 3 EOPL
T . . . 2 3 1
. T T T Rand Rand Rand [3]
. T T . Rand Rand Rand [3]
. T . T Rand Rand Rand [2]
. T . . Rand Rand Rand [2]
. . T T 2 3 EOPL
. . T . 2 3 EOPL
. . . T 2 3 EOPL
. . . . 2 3 EOPL
====== ====== ====== ======= ===== ===== ===== =====
- When end of playlist (EOPL) is reached, the current track is
unset.
- [1] When *random* and *single* is combined, ``next`` selects
a track randomly at each invocation, and not just the next track
in an internal prerandomized playlist.
- [2] When *random* is active, ``next`` will skip through
all tracks in the playlist in random order, and finally EOPL is
reached.
- [3] *single* has no effect in combination with *random*
alone, or *random* and *consume*.
- [4] When *random* and *repeat* is active, EOPL is never
reached, but the playlist is played again, in the same random
order as the first time.
"""
return context.core.playback.next().get()
@protocol.commands.add('pause', state=protocol.BOOL)
def pause(context, state=None):
"""
*musicpd.org, playback section:*
``pause {PAUSE}``
Toggles pause/resumes playing, ``PAUSE`` is 0 or 1.
*MPDroid:*
- Calls ``pause`` without any arguments to toogle pause.
"""
if state is None:
deprecation.warn('mpd.protocol.playback.pause:state_arg')
playback_state = context.core.playback.get_state().get()
if (playback_state == PlaybackState.PLAYING):
context.core.playback.pause().get()
elif (playback_state == PlaybackState.PAUSED):
context.core.playback.resume().get()
elif state:
context.core.playback.pause().get()
else:
context.core.playback.resume().get()
@protocol.commands.add('play', songpos=protocol.INT)
def play(context, songpos=None):
"""
*musicpd.org, playback section:*
``play [SONGPOS]``
Begins playing the playlist at song number ``SONGPOS``.
The original MPD server resumes from the paused state on ``play``
without arguments.
*Clarifications:*
- ``play "-1"`` when playing is ignored.
- ``play "-1"`` when paused resumes playback.
- ``play "-1"`` when stopped with a current track starts playback at the
current track.
- ``play "-1"`` when stopped without a current track, e.g. after playlist
replacement, starts playback at the first track.
*BitMPC:*
- issues ``play 6`` without quotes around the argument.
"""
if songpos is None:
return context.core.playback.play().get()
elif songpos == -1:
return _play_minus_one(context)
try:
tl_track = context.core.tracklist.slice(songpos, songpos + 1).get()[0]
return context.core.playback.play(tl_track).get()
except IndexError:
raise exceptions.MpdArgError('Bad song index')
def _play_minus_one(context):
playback_state = context.core.playback.get_state().get()
if playback_state == PlaybackState.PLAYING:
return # Nothing to do
elif playback_state == PlaybackState.PAUSED:
return context.core.playback.resume().get()
current_tl_track = context.core.playback.get_current_tl_track().get()
if current_tl_track is not None:
return context.core.playback.play(current_tl_track).get()
tl_tracks = context.core.tracklist.slice(0, 1).get()
if tl_tracks:
return context.core.playback.play(tl_tracks[0]).get()
return # Fail silently
@protocol.commands.add('playid', tlid=protocol.INT)
def playid(context, tlid):
"""
*musicpd.org, playback section:*
``playid [SONGID]``
Begins playing the playlist at song ``SONGID``.
*Clarifications:*
- ``playid "-1"`` when playing is ignored.
- ``playid "-1"`` when paused resumes playback.
- ``playid "-1"`` when stopped with a current track starts playback at the
current track.
- ``playid "-1"`` when stopped without a current track, e.g. after playlist
replacement, starts playback at the first track.
"""
if tlid == -1:
return _play_minus_one(context)
tl_tracks = context.core.tracklist.filter({'tlid': [tlid]}).get()
if not tl_tracks:
raise exceptions.MpdNoExistError('No such song')
return context.core.playback.play(tl_tracks[0]).get()
@protocol.commands.add('previous')
def previous(context):
"""
*musicpd.org, playback section:*
``previous``
Plays previous song in the playlist.
*MPD's behaviour when affected by repeat/random/single/consume:*
Given a playlist of three tracks numbered 1, 2, 3, and a currently
playing track ``c``. ``previou
|
tdimiduk/groupeng
|
src/__init__.py
|
Python
|
agpl-3.0
| 724
| 0
|
#!/usr/bin/python
# Copyright 2011, Thomas G. Dimiduk
#
# This file is part of GroupEng.
#
# Holopy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Holopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License fo
|
r more details.
#
# You should have received a copy of t
|
he GNU Affero General Public License
# along with GroupEng. If not, see <http://www.gnu.org/licenses/>.
|
luotao1/Paddle
|
python/paddle/fluid/tests/unittests/test_imperative_signal_handler.py
|
Python
|
apache-2.0
| 5,136
| 0
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import signal
import unittest
import multiprocessing
import time
import paddle.compat as cpt
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
def set_child_signal_handler(self, child_pid):
core._set_process_pids(id(self), tuple([child_pid]))
current_handler = signal.getsignal(signal.SIGCHLD)
if not callable(current_handler):
current_handler = None
def __handler__(signum, frame):
core._throw_error_if_process_failed()
if current_handler is not None:
current_handler(signum, frame)
signal.signal(signal.SIGCHLD, __handler__)
class DygraphDataLoaderSingalHandler(unittest.TestCase):
def func_child_process_exit_with_error(self):
def __test_process__():
core._set_process_signal_handler()
sys.exit(1)
def try_except_exit():
exception = None
try:
test_process = multiprocessing.Process(target=__test_process__)
test_process.start()
set_child_signal_handler(id(self), test_process.pid)
time.sleep(5)
except SystemError as ex:
self.assertIn("Fatal", cpt.get_exception_message(ex))
exception = ex
return exception
try_time = 10
exception = None
for i in range(try_time):
exception = try_except_exit()
if exception is not None:
break
self.assertIsNotNone(exception)
def test_child_process_exit_with_error(self):
with _test_eager_guard():
self.func_child_process_exit_with_error()
self.func_child_process_exit_with_error()
def func_child_process_killed_by_sigsegv(self):
def __test_process__():
core._set_process_signal_handler()
os.kill(os.getpid(), signal.SIGSEGV)
def try_except_exit():
exception = None
try:
test_process = multiprocessing.Process(target=__test_process__)
test_process.start()
set_child_signal_handler(id(self), test_process.pid)
time.sleep(5)
except SystemError as ex:
self.assertIn("Segmentation fault",
cpt.get_exception_message(ex))
exception = ex
return exception
try_time = 10
exception = None
for i in range(try_time):
exception = try_except_exit()
if exception is not None:
break
self.assertIsNotNone(exception)
def test_child_process_killed_by_sigsegv(self):
with _test_eager_guard():
self.func_child_process_killed_by_sigsegv()
self.func_child_process_killed_by_sigsegv()
def func_child_process_killed_by_sigbus(self):
def __test_process__():
core._set_process_signal_handler()
os.kill(os.getpid(), signal.SIGBUS)
def try_except_exit():
exception = None
try:
test_process = multiprocessing.Process(target=__test_process__)
test_process.start()
set_child_signal_handler(id(self), test_process.pid)
time.sleep(5)
except SystemError as ex:
self.assertIn("Bus error", cpt.get_exception_message(ex))
exception = ex
return exception
try_time = 10
exception = None
for i in range(try_time):
exception = try_except_exit()
if exception is not None:
break
self.assertIsNotNone(exception)
def test_child_process_killed_by_sigbus(self):
with _test_eager_guard()
|
:
self.func_child_process_killed_by_sigbus()
self.func_child_process_killed_by_sigbus()
def func_child_process_killed_by_sigterm(self):
def __test_process__():
|
core._set_process_signal_handler()
time.sleep(10)
test_process = multiprocessing.Process(target=__test_process__)
test_process.daemon = True
test_process.start()
set_child_signal_handler(id(self), test_process.pid)
time.sleep(1)
def test_child_process_killed_by_sigterm(self):
with _test_eager_guard():
self.func_child_process_killed_by_sigterm()
self.func_child_process_killed_by_sigterm()
if __name__ == '__main__':
unittest.main()
|
reneisrael/coursera
|
Rock-paper-scissors-lizard-Spock.py
|
Python
|
gpl-2.0
| 1,947
| 0.007191
|
# Rock-paper-scissors-lizard-Spock
import random
# helper functions
def name_to_number(name):
if name == "rock":
name = 0
elif name == "Spock":
name = 1
elif name == "paper":
name = 2
elif name == "lizard":
name = 3
elif name == "scissors":
name = 4
else:
print "No correct name given:"
return name
def number_to_name(number):
if number == 0:
number = "rock"
elif number == 1:
number = "Spock"
elif number == 2:
number = "paper"
elif number == 3:
number = "lizard"
elif number == 4:
number = "scissors"
else:
print "No correct number given:"
return number
def rpsls(player_choice):
# print a blank line to separate consecutive games
print
# print out the message for the player's choice
print "Player chooses", player_choice
# convert the player's choice to player_number using the function name_to_number()
player_number = name_to_number(player_choice)
# compute random guess for comp_number using random.randrange()
comp_number = random.randrange(0,5)
# convert comp_number to comp_choice using the function number
|
_to_name()
comp_choice = number_to_name(comp_number)
# print out the message for computer's choice
print "Computer chooses", comp_choice
# compute difference of comp_number and player_nu
|
mber modulo five
a = (player_number - comp_number) % 5
# use if/elif/else to determine winner, print winner message
if player_number == comp_number :
print "Player and computer tie!"
elif a <= 2:
print "Player wins!"
else :
print "Computer wins!"
# test your code - THESE CALLS MUST BE PRESENT IN YOUR SUBMITTED CODE
rpsls("rock")
rpsls("Spock")
rpsls("paper")
rpsls("lizard")
rpsls("scissors")
# always remember to check your completed program against the grading rubric
|
denys-duchier/Scolar
|
safehtml.py
|
Python
|
gpl-2.0
| 361
| 0.030471
|
from stripogram import html2text, html2safehtml
# permet de conse
|
rver quelques tags html
def HTML2SafeHTML( text, convert_br=True ):
text = html2safehtml( text, valid_tags=('b', 'a', 'i', 'br', 'p'))
if convert_br:
return newline_to_br(text)
else:
return text
def new
|
line_to_br( text ):
return text.replace( '\n', '<br/>' )
|
Doctor-love/kkross
|
playground/kkross/modules/module_loader.py
|
Python
|
gpl-2.0
| 919
| 0.003264
|
from kkross.exceptions import ModuleError, LoaderError
from kkross.module import Module
from glob import glob
from yaml import safe_load
import os
def module_loader(module_dirs):
'''Loads module YAML from ...'''
module_paths = []
for module_dir in module_dirs:
|
module_paths.extend(glob(os.path.join(module_dir, '*.y*ml')))
if not modul
|
e_paths:
return []
modules = []
for module_path in module_paths:
try:
module_raw = safe_load(open(module_path, 'r'))
except Exception as error_msg:
raise LoaderError(
'Failed to load module YAML data from "%s": "%s"' % (module_path, error_msg))
try:
modules.append(Module(module_raw))
except Exception as error_msg:
raise
raise LoaderError('Failed to load module from "%s": %s' % (module_path, error_msg))
return modules
|
gawel/irc3
|
examples/wsgiapp.py
|
Python
|
mit
| 726
| 0
|
# -*- coding: utf
|
-8 -*-
import asyncio
from aiohttp import wsgi
from irc3 import plugin
import json
@plugin
class Webapp:
requires = ['irc3.plugins.userlist']
def __init__(self, bot):
def server():
return wsgi.WSGIServerHttpProtocol(self.wsgi)
self.bot = bot
loop = asyncio.get_event_loop()
self.bot.log.info('Starting webapp')
asyncio.Task(loop.create_server(
server, '127.0.0.1', 5000))
def wsgi(self, environ, start_respon
|
se):
start_response('200 OK', [('Content-Type', 'application/json')])
plugin = self.bot.get_plugin('userlist')
data = json.dumps(list(plugin.channels.keys()))
return [data.encode('utf8')]
|
cournape/numscons
|
numscons/scons-local/scons-local-1.2.0/SCons/Node/Python.py
|
Python
|
bsd-3-clause
| 4,216
| 0.002135
|
"""scons.Node.Python
Python nodes.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/Python.py 2009/09/04 16:33:07 david"
import SCons.Node
class ValueNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig']
def str_to_node(self, s):
return Value(s)
class ValueBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
class Value(SCons.Node.Node):
"""A class for Python variables, typically passed on the command line
or generated by a script, but not from a file or some other source.
"""
NodeInfo = ValueNodeInfo
BuildInfo = ValueBuildInfo
def __init__(self, value, built_value=None):
SCons.Node.Node.__init__(self)
self.value = value
if built_value is not None:
self.built_value = built_value
def str_for_display(self):
return repr(self.value)
def __str__(self):
return str(self.value)
def make_ready(self):
self.get_csig()
def build(self, **kw):
if not hasattr(self, 'built_value'):
apply (SCons.Node.Node.build, (self,), kw)
is_up_to_date = SCons.Node.Node.children_are_up_to_date
def
|
is_under(self, dir):
# Make Value nodes get built regardless of
# what directory scons was run from. Value nodes
# are outside the filesystem:
return 1
def write(self, built_value):
"""Set the value of the
|
node."""
self.built_value = built_value
def read(self):
"""Return the value. If necessary, the value is built."""
self.build()
if not hasattr(self, 'built_value'):
self.built_value = self.value
return self.built_value
def get_text_contents(self):
"""By the assumption that the node.built_value is a
deterministic product of the sources, the contents of a Value
are the concatenation of all the contents of its sources. As
the value need not be built when get_contents() is called, we
cannot use the actual node.built_value."""
###TODO: something reasonable about universal newlines
contents = str(self.value)
for kid in self.children(None):
contents = contents + kid.get_contents()
return contents
get_contents = get_text_contents ###TODO should return 'bytes' value
def changed_since_last_build(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def get_csig(self, calc=None):
"""Because we're a Python value node and don't have a real
timestamp, we get to ignore the calculator and just use the
value contents."""
try:
return self.ninfo.csig
except AttributeError:
pass
contents = self.get_contents()
self.get_ninfo().csig = contents
return contents
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Ye-Yong-Chi/xortool
|
xortool/libcolors.py
|
Python
|
mit
| 2,323
| 0.001291
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import os
BASH_ATTRIBUTES = {"regular": "0",
"bold": "1", "underline": "4", "strike": "9",
"light": "1", "dark": "2",
"invert": "7"} # invert bg and fg
BASH_COLORS = {"black": "30", "red": "31", "green": "32", "yellow": "33",
"blue": "34", "purple": "35", "cyan": "36", "white": "37"}
BASH_BGCOLORS = {"black": "40", "red": "41", "green": "42", "y
|
ellow": "43",
"blue": "44", "purple": "45", "cyan": "46", "white": "47"}
def
|
_main():
header = color("white", "black", "dark")
print
print header + " " + "Colors and backgrounds: " + color()
for c in _keys_sorted_by_values(BASH_COLORS):
c1 = color(c)
c2 = color("white" if c != "white" else "black", bgcolor=c)
print (c.ljust(10) +
c1 + "colored text" + color() + " " +
c2 + "background" + color())
print
print header + " " + "Attributes: " + color()
for c in _keys_sorted_by_values(BASH_ATTRIBUTES):
c1 = color("red", attrs=c)
c2 = color("white", attrs=c)
print (c.ljust(13) +
c1 + "red text" + color() + " " +
c2 + "white text" + color())
print
return
def color(color=None, bgcolor=None, attrs=None):
if not is_bash():
return ""
ret = "\x1b[0"
if attrs:
for attr in attrs.lower().split():
attr = attr.strip(",+|")
if attr not in BASH_ATTRIBUTES:
raise ValueError("Unknown color attribute: " + attr)
ret += ";" + BASH_ATTRIBUTES[attr]
if color:
if color in BASH_COLORS:
ret += ";" + BASH_COLORS[color]
else:
raise ValueError("Unknown color: " + color)
if bgcolor:
if bgcolor in BASH_BGCOLORS:
ret += ";" + BASH_BGCOLORS[bgcolor]
else:
raise ValueError("Unknown background color: " + bgcolor)
return ret + "m"
def is_bash():
return os.environ.get("SHELL", "unknown").endswith("bash")
def _keys_sorted_by_values(adict):
"""Return list of the keys of @adict sorted by values."""
return sorted(adict, key=adict.get)
if __name__ == "__main__":
_main()
|
AriZuu/micropython
|
tests/float/complex1.py
|
Python
|
mit
| 2,231
| 0.003586
|
# test basic complex number functionality
# constructor
print(complex(1))
print(complex(1.2))
print(complex(1.2j))
print(complex("1"))
print(complex("1.2"))
print(complex("1.2j"))
print(complex(1, 2))
print(complex(1j, 2j))
# unary ops
print(bool(1j))
print(+(1j))
print(-(1 + 2j))
# binary ops
print(1j + False)
print(1j + True)
print(1j + 2)
print(1j + 2j)
print(1j - 2)
print(1j - 2j)
print(1j * 2)
print(1j * 2j)
print(1j / 2)
print((1j / 2j).real)
print(1j / (1 + 2j))
ans = 0j ** 0; print("%.5g %.5g" % (ans.real, ans.imag))
ans = 0j ** 1; print("%.5g %.5g" % (ans.real, ans.imag))
ans = 0j ** 0j; print("%.5g %.5g" % (ans.real, ans.imag))
ans = 1j ** 2.5; print("%.5g %.5g" % (ans.real, ans.imag))
ans = 1j ** 2.5j; print("%.5g %.5g" % (ans.real, ans.imag))
# comparison
print(1j == 1)
print(1j == 1j)
# comparison of nan is special
nan = float('nan') * 1j
print(nan == 1j)
print(nan == nan)
# builtin abs
print(abs(1j))
print("%.5g" % abs(1j + 2))
# builtin hash
print(hash(1 + 0j))
print(type(hash(1j)))
# float on lhs should delegate to complex
print(1.2 + 3j)
# negative base and fractional power should create a complex
ans = (-1) ** 2.3; print("%.5g %.5g" % (ans.real,
|
ans.imag))
ans = (-1.2) ** -3.4; print("%.5g %.5g" % (ans.real, ans.imag))
# check printing of inf/nan
print(float('nan') * 1j)
print(float('-nan') * 1j)
print(float('inf') * (1 + 1j))
print(float('-inf') * (1 + 1j))
# can't assign to attributes
try:
(1j).imag = 0
except AttributeError:
print('AttributeError')
# can't
|
convert rhs to complex
try:
1j + []
except TypeError:
print("TypeError")
# unsupported unary op
try:
~(1j)
except TypeError:
print("TypeError")
# unsupported binary op
try:
1j // 2
except TypeError:
print("TypeError")
# unsupported binary op
try:
1j < 2j
except TypeError:
print("TypeError")
#small int on LHS, complex on RHS, unsupported op
try:
print(1 | 1j)
except TypeError:
print('TypeError')
# zero division
try:
1j / 0
except ZeroDivisionError:
print("ZeroDivisionError")
# zero division via power
try:
0j ** -1
except ZeroDivisionError:
print("ZeroDivisionError")
try:
0j ** 1j
except ZeroDivisionError:
print("ZeroDivisionError")
|
shaise/FreeCAD_FastenersWB
|
Init.py
|
Python
|
gpl-2.0
| 1,063
| 0.007526
|
# -*- coding: utf-8 -*-
###################################################################################
#
# Init.py
#
# Copyright 2015 Shai Seger <shaise at gmail dot com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Genera
|
l Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1
|
301, USA.
#
#
###################################################################################
# print "Fasteners workbench Loaded"
|
mdeff/ntds_2017
|
projects/reports/arab_springs/lib/models.py
|
Python
|
mit
| 40,998
| 0.002829
|
from . import graph
import tensorflow as tf
import sklearn
import scipy.sparse
import numpy as np
import os, time, collections, shutil
#NFEATURES = 28**2
#NCLASSES = 10
# Common methods for all models
class base_model(object):
def __init__(self):
self.regularizers = []
# High-level interface which runs the constructed computational graph.
def predict(self, data, labels=None, sess=None):
loss = 0
size = data.shape[0]
predictions = np.empty(size)
sess = self._ge
|
t_sess
|
ion(sess)
for begin in range(0, size, self.batch_size):
end = begin + self.batch_size
end = min([end, size])
batch_data = np.zeros((self.batch_size, data.shape[1]))
tmp_data = data[begin:end,:]
if type(tmp_data) is not np.ndarray:
tmp_data = tmp_data.toarray() # convert sparse matrices
batch_data[:end-begin] = tmp_data
feed_dict = {self.ph_data: batch_data, self.ph_dropout: 1}
# Compute loss if labels are given.
if labels is not None:
batch_labels = np.zeros(self.batch_size)
batch_labels[:end-begin] = labels[begin:end]
feed_dict[self.ph_labels] = batch_labels
batch_pred, batch_loss = sess.run([self.op_prediction, self.op_loss], feed_dict)
loss += batch_loss
else:
batch_pred = sess.run(self.op_prediction, feed_dict)
predictions[begin:end] = batch_pred[:end-begin]
if labels is not None:
return predictions, loss * self.batch_size / size
else:
return predictions
def evaluate(self, data, labels, sess=None):
"""
Runs one evaluation against the full epoch of data.
Return the precision and the number of correct predictions.
Batch evaluation saves memory and enables this to run on smaller GPUs.
sess: the session in which the model has been trained.
op: the Tensor that returns the number of correct predictions.
data: size N x M
N: number of signals (samples)
M: number of vertices (features)
labels: size N
N: number of signals (samples)
"""
t_process, t_wall = time.process_time(), time.time()
predictions, loss = self.predict(data, labels, sess)
#print(predictions)
ncorrects = sum(predictions == labels)
accuracy = 100 * sklearn.metrics.accuracy_score(labels, predictions)
f1 = 100 * sklearn.metrics.f1_score(labels, predictions, average='weighted')
string = 'accuracy: {:.2f} ({:d} / {:d}), f1 (weighted): {:.2f}, loss: {:.2e}'.format(
accuracy, ncorrects, len(labels), f1, loss)
if sess is None:
string += '\ntime: {:.0f}s (wall {:.0f}s)'.format(time.process_time()-t_process, time.time()-t_wall)
return string, accuracy, f1, loss
def fit(self, train_data, train_labels, val_data, val_labels):
t_process, t_wall = time.process_time(), time.time()
sess = tf.Session(graph=self.graph)
shutil.rmtree(self._get_path('summaries'), ignore_errors=True)
writer = tf.summary.FileWriter(self._get_path('summaries'), self.graph)
shutil.rmtree(self._get_path('checkpoints'), ignore_errors=True)
os.makedirs(self._get_path('checkpoints'))
path = os.path.join(self._get_path('checkpoints'), 'model')
sess.run(self.op_init)
# Training.
accuracies = []
losses = []
indices = collections.deque()
num_steps = int(self.num_epochs * train_data.shape[0] / self.batch_size)
for step in range(1, num_steps+1):
# Be sure to have used all the samples before using one a second time.
if len(indices) < self.batch_size:
indices.extend(np.random.permutation(train_data.shape[0]))
idx = [indices.popleft() for i in range(self.batch_size)]
batch_data, batch_labels = train_data[idx,:], train_labels[idx]
if type(batch_data) is not np.ndarray:
batch_data = batch_data.toarray() # convert sparse matrices
feed_dict = {self.ph_data: batch_data, self.ph_labels: batch_labels, self.ph_dropout: self.dropout}
learning_rate, loss_average = sess.run([self.op_train, self.op_loss_average], feed_dict)
# Periodical evaluation of the model.
if step % self.eval_frequency == 0 or step == num_steps:
epoch = step * self.batch_size / train_data.shape[0]
print('step {} / {} (epoch {:.2f} / {}):'.format(step, num_steps, epoch, self.num_epochs))
print(' learning_rate = {:.2e}, loss_average = {:.2e}'.format(learning_rate, loss_average))
string, accuracy, f1, loss = self.evaluate(val_data, val_labels, sess)
accuracies.append(accuracy)
losses.append(loss)
print(' validation {}'.format(string))
print(' time: {:.0f}s (wall {:.0f}s)'.format(time.process_time()-t_process, time.time()-t_wall))
# Summaries for TensorBoard.
summary = tf.Summary()
summary.ParseFromString(sess.run(self.op_summary, feed_dict))
summary.value.add(tag='validation/accuracy', simple_value=accuracy)
summary.value.add(tag='validation/f1', simple_value=f1)
summary.value.add(tag='validation/loss', simple_value=loss)
writer.add_summary(summary, step)
# Save model parameters (for evaluation).
self.op_saver.save(sess, path, global_step=step)
print('validation accuracy: peak = {:.2f}, mean = {:.2f}'.format(max(accuracies), np.mean(accuracies[-10:])))
writer.close()
sess.close()
t_step = (time.time() - t_wall) / num_steps
return accuracies, losses, t_step
def get_var(self, name):
sess = self._get_session()
var = self.graph.get_tensor_by_name(name + ':0')
val = sess.run(var)
sess.close()
return val
# Methods to construct the computational graph.
def build_graph(self, M_0):
"""Build the computational graph of the model."""
self.graph = tf.Graph()
with self.graph.as_default():
# Inputs.
with tf.name_scope('inputs'):
self.ph_data = tf.placeholder(tf.float32, (self.batch_size, M_0), 'data')
self.ph_labels = tf.placeholder(tf.int32, (self.batch_size), 'labels')
self.ph_dropout = tf.placeholder(tf.float32, (), 'dropout')
# Model.
op_logits = self.inference(self.ph_data, self.ph_dropout)
self.op_loss, self.op_loss_average = self.loss(op_logits, self.ph_labels, self.regularization)
self.op_train = self.training(self.op_loss, self.learning_rate,
self.decay_steps, self.decay_rate, self.momentum)
self.op_prediction = self.prediction(op_logits)
# Initialize variables, i.e. weights and biases.
self.op_init = tf.global_variables_initializer()
# Summaries for TensorBoard and Save for model parameters.
self.op_summary = tf.summary.merge_all()
self.op_saver = tf.train.Saver(max_to_keep=5)
self.graph.finalize()
def inference(self, data, dropout):
"""
It builds the model, i.e. the computational graph, as far as
is required for running the network forward to make predictions,
i.e. return logits given raw data.
data: size N x M
N: number of signals (samples)
M: number of vertices (features)
training: we may want to discriminate the two, e.g. for dropout.
True: the model is built for training.
False: the model is built for evaluation.
"""
# TODO: optimizations for sparse data
logits = self._inference(data, dropout)
return logits
def probabilities(self, log
|
lunixbochs/SublimeXiki
|
edit.py
|
Python
|
mit
| 2,725
| 0.000367
|
# edit.py
# buffer editing for both ST2 and ST3 that "just works"
import inspect
import sublime
import sublime_plugin
try:
sublime.sublimexiki_edit_storage
except AttributeError:
sublime.sublimexiki_edit_storage = {}
def run_callback(func, *args, **kwargs):
spec = inspect.getfullargspec(func)
if spec.args or spec.varargs:
func(*args, **kwargs)
else:
func()
class EditFuture:
def __init__(self, func):
self.func = func
def resolve(self, view, edit):
return self.func(view, edit)
class EditStep:
def __init__(self, cmd, *args):
self.cmd = cmd
self.args = args
def run(self, view, edit):
if self.cmd == 'callback':
return run_callback(self.args[0], view, edit)
funcs = {
'insert': view.insert,
'erase': view.erase,
'replace': view.replace,
}
func = funcs.get(self.cmd)
if func:
args = self.resolve_args(view, edit)
func(edit, *args)
def resolve_args(self, view, edit):
args = []
for arg in self.args:
if isinstance(arg, EditFuture):
arg = arg.resol
|
ve(view, edit)
args.app
|
end(arg)
return args
class Edit:
def __init__(self, view):
self.view = view
self.steps = []
def __nonzero__(self):
return bool(self.steps)
@classmethod
def future(self, func):
return EditFuture(func)
def step(self, cmd, *args):
step = EditStep(cmd, *args)
self.steps.append(step)
def insert(self, point, string):
self.step('insert', point, string)
def erase(self, region):
self.step('erase', region)
def replace(self, region, string):
self.step('replace', region, string)
def sel(self, start, end=None):
if end is None:
end = start
self.step('sel', start, end)
def callback(self, func):
self.step('callback', func)
def run(self, view, edit):
for step in self.steps:
step.run(view, edit)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
view = self.view
if sublime.version().startswith('2'):
edit = view.begin_edit()
self.run(edit)
view.end_edit(edit)
else:
key = str(hash(tuple(self.steps)))
sublime.sublimexiki_edit_storage[key] = self.run
view.run_command('apply_sublimexiki_edit', {'key': key})
class apply_sublimexiki_edit(sublime_plugin.TextCommand):
def run(self, edit, key):
sublime.sublimexiki_edit_storage.pop(key)(self.view, edit)
|
der-michik/c3bottles
|
c3bottles/views/forms.py
|
Python
|
mit
| 1,168
| 0.000856
|
from flask_wtf import FlaskForm
from wtforms.fields import StringField, PasswordField, HiddenField, IntegerField, BooleanField
from wtforms.validators import DataRe
|
quired
from wtforms.widgets import HiddenInput
class LoginForm(FlaskForm):
username = StringField("username", validators=[DataRequired()])
password = PasswordField("password", validators=[DataRequired()])
back = HiddenField("back")
args = HiddenField("args")
class UserIdForm(FlaskForm):
user_id = IntegerField("user_id", widget=HiddenInput())
class Permissions
|
Form(UserIdForm):
can_visit = BooleanField("can_visit")
can_edit = BooleanField("can_edit")
is_admin = BooleanField("is_admin")
class PasswordForm(UserIdForm):
password_1 = PasswordField("password_1", validators=[DataRequired()])
password_2 = PasswordField("password_2", validators=[DataRequired()])
class UserCreateForm(FlaskForm):
username = StringField("username", validators=[DataRequired()])
password = PasswordField("password", validators=[DataRequired()])
can_visit = BooleanField("can_visit")
can_edit = BooleanField("can_edit")
is_admin = BooleanField("is_admin")
|
marthall/accounting
|
moneymaker/admin.py
|
Python
|
mit
| 496
| 0
|
from django.contrib import admin
from moneymaker.models import Product
from moneymaker.models import Income
from moneymaker.models import Expense
f
|
rom moneymaker.models import ExpenseCategory
from moneymaker.models import IncomeCategory
class ExpenseAdmin(admin.ModelAdmin):
list_display = ('date', '__unicode__')
admin.site.register(Product)
admin.site.register(Income)
admin.site.register(Expense, ExpenseAdmin)
admin.site
|
.register(ExpenseCategory)
admin.site.register(IncomeCategory)
|
mdcic/ssp
|
ssp/netconfig.py
|
Python
|
gpl-3.0
| 5,352
| 0.037937
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012-2013 Yury Konovalov <YKonovalov@gmail.com>
#
# This file is part of SSP.
#
# SSP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SSP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SSP. If not, see <http://www.gnu.org/licenses/>.
"""Gives a facility to identify local node configuration"""
import logging
import subprocess
import time
import sys,os,re
import array
from IPy import IP
__all__ = [ "NETCONFIG" ]
LOG = logging.getLogger("ssp.netconfig")
__NETWORKS={'cluster' :{'ipv4':'192.168.50.0/24'},
'management':{'ipv4':'172.24.8.0/23'},
'storage' :{'ipv4':'192.168.50.0/24'},
'vnetwork' :{'ipv4':'172.24.0.0/24'},
'croc-main' :{'ipv4':'172.25.6.0/24'},
'croc-test' :{'ipv4':'172.24.8.0/23'}}
__SPECIAL_IPV4={'slp-multicast': {'ipv4':'239.255.255.253', 'type': 'multicast'},
'ipmi-broadcast': {'ipv4':'255.255.255.255', 'type': 'broadcast'},
'upnp-multicast': {'ipv4':'239.255.255.250', 'type': 'multicast'},
'ganglia-multicast': {'ipv4':'239.2.11.71', 'type': 'multicast'}}
def Request_decorator(func):
"""Logs all requests."""
def decorator(self, *args, **kwargs):
if kwargs:
LOG.info("%s(%s)%s called.", func.func_name, args, kwargs)
else:
LOG.info("%s(%s) called.", func.func_name, args)
return func(self, *args, **kwargs)
return decorator
def get_attrset_of_networks(networks={}, attr='iface_name'):
"""Return a set() of specific attribute composed of the specified networks."""
attrs=set()
for net in networks.keys():
for nettype in networks[net].keys():
if attr in networks[net][nettype].keys():
attrs.add(networks[net][nettype][attr])
return attrs
def get_ifaces():
"""Return a list of all local configured interfaces."""
ifaces=set()
try:
s = subprocess.Popen(["ip","r","s","scope","link"], stderr=open('/dev/null', 'w'), stdout=subprocess.PIPE).communicate()[0]
for r in re.finditer(r"(?m)^.*\s+dev\s+(?P<iface>[^\s]+).*$",s):
ifaces.add(r.groupdict()['iface'])
return ifaces
except
|
:
LOG.error("Cannot run ping command.")
raise
def get_iface_by_route(ip):
"""Return the local interface name for spe
|
cific IP address. None for indirectly routed addresses"""
try:
s = subprocess.Popen(["ip","r","g",ip], stderr=open('/dev/null', 'w'), stdout=subprocess.PIPE).communicate()[0]
for r in re.finditer(r"(?m)^"+ip+"\s+dev\s+(?P<iface>[^\s]+).*$",s):
return r.groupdict()['iface']
except:
LOG.error("Cannot run 'ip r g' command.")
raise
def check_actual_iface(network, iface):
"""Return the local interface IP/prefix and broadcast address for specific network."""
net=network.net()
brd=str(network.broadcast())
try:
s = subprocess.Popen(["ip","a","s","dev",iface], stderr=open('/dev/null', 'w'), stdout=subprocess.PIPE).communicate()[0]
for r in re.finditer(r"(?m)^\s+inet\s+(?P<net>[^\s]+)\s+brd\s+(?P<brd>[^\s]+).*$",s):
n=r.groupdict()
localif=IP(n['net'],make_net=True)
if localif.net() == net:
if n['brd'] != brd:
LOG.error("Interface (%s) should have broadcast (%s), but set to (%s).",iface, brd, n['brd'])
return n['net'],n['brd']
elif network[1] in localif:
LOG.error("Interface (%s) configured incorrectly. Preffix on net (%s) is set to (%s).",iface, net, localif)
return n['net'],n['brd']
LOG.error("Broadcast is not set on interface (%s) or this is a bug. Direct route for (%s) exists, but I can't find this network with broadcast (%s).",iface, str(net), brd)
return None,None
except:
LOG.error("Cannot run 'ip a s dev' command.")
raise
def get_all_global_to_local_networks_projection():
"""Returns dict of all defined global networks as they seen on local host (if any). Return an empty dict if none is actually configured"""
return get_global_to_local_networks_projection(set(__NETWORKS.keys()))
def get_global_to_local_networks_projection(networkset=set()):
"""Returns dict of specified networks as they seen on local host (if any). Return an empty dict if none of specified networks is actually configured"""
networks={}
for name in networkset.intersection(__NETWORKS.keys()):
networks[name]={}
for nettype in __NETWORKS[name].keys():
net=__NETWORKS[name][nettype]
try:
IPnet=IP(net)
first=str(IPnet[1])
broadcast=str(IPnet.broadcast())
version=str(IPnet.version())
except:
LOG.error("Wrong configuration. Bad network: %s", str(net))
continue
iface=get_iface_by_route(first)
if iface:
v='IPv'+version
networks[name][v]={}
# Should be part of the dict
networks[name][v]['net']=net
networks[name][v]['broadcast']=broadcast
networks[name][v]['firstIP']=first
# Actual configuration part of the dict
networks[name][v]['iface_name']=iface
networks[name][v]['iface_ip_and_preffix'],networks[name][v]['iface_broadcast']=check_actual_iface(IPnet,iface)
else:
del networks[name]
return networks
|
abdollatiif/NetCoding
|
wifi/bindings/callbacks_list.py
|
Python
|
gpl-2.0
| 1,855
| 0.00593
|
callback_classes = [
['void', 'ns3::Ptr<ns3::Packet const>', 'double', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'double', 'ns3::WifiMode', 'ns3::WifiPreamble', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::WifiMacHeader const*', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::WifiMacHeader const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Packet>', 'ns3::Mac48Address', 'ns3::Mac48A
|
ddress', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', '
|
ns3::Mac48Address', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Mac48Address', 'unsigned char', 'bool', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
acq4/acq4
|
acq4/util/igorpro.py
|
Python
|
mit
| 10,366
| 0.002315
|
from __future__ import print_function
import sys
import win32com.client
import pywintypes
import pythoncom
import numpy as np
import subprocess as sp
import concurrent.futures
import atexit
import json
import zmq
import os
from six.moves import range
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from acq4.util import Qt
from pyqtgraph.util.mutex import Mutex
"""
Thanks to: Jason Yamada-Hanff https://github.com/yamad/igor-mode
Main documentation: Igor Pro Folder\Miscellaneous\Windows Automation\Automation Server.ihf
* Use fprintf to return data:
igor('fprintf 0, "%d", 1+3')
* Access waves:
df = i.app.DataFolder("root:MIES:ITCDevices:ITC1600:Device0")
wave = df.Wave('OscilloscopeData')
# get data type and array shape
typ, rows, cols, layers, chunks = wave.GetDimensions()
dtype = dtypes[typ]
shape = [rows, cols, layers, chunks]
ndim = shape.index(0)
shape = shape[:ndim]
# get [(slope, intercept), ...] scale factors for each axis
scaling = [wave.GetScaling(ax) for ax in range(len(shape))]
np.array(wave.GetNumericWaveData(typ))
* Access global variables:
df = i.app.DataFolder("root")
var = df.Variable("myvar")
var.GetNumericValue()
var.GetStringValue()
"""
dtypes = {
0x02: 'float32',
0x04: 'float64',
0x08: 'byte',
0x10: 'short',
0x20: 'long',
0x48: 'ubyte',
0x50: 'ushort',
0x60: 'ulong',
0x01: 'complex',
0x00: 'str',
}
class IgorCallError(Exception):
FAILED = 1
TIMEDOUT = 2
def __init__(self, message, errno=1):
self.errno = errno
super(IgorCallError, self).__init__(message)
class IgorThread(Qt.QThread):
_newRequest = Qt.Signal(object)
def __init__(self, useZMQ=False):
Qt.QThread.__init__(self)
self.moveToThread(self)
if useZMQ:
self.igor = ZMQIgorBridge()
else:
self.igor = IgorBridge()
self._newRequest.connect(self._processRequest)
self.start()
atexit.register(self.quit)
def __call__(self, *args, **kwds):
return self._sendRequest('__call__', args, kwds)
def getWave(self, *args, **kwds):
return self._sendRequest('getWave', args, kwds)
def getVariable(self, *args, **kwds):
return self._sendRequest('getVariable', args, kwds)
def _sendRequest(self, req, args, kwds):
if isinstance(self.igor, ZMQIgorBridge):
return getattr(self.igor, req)(*args)
else:
fut = concurrent.futures.Future()
self._newRequest.emit((fut, req, args, kwds))
return fut
def _processRequest(self, req):
fut, method, args, kwds = req
try:
result = getattr(self.igor, method)(*args, **kwds)
fut.set_result(result)
except Exception as exc:
fut.set_exception(exc)
def run(self):
pythoncom.CoInitialize()
Qt.QThread.run(self)
class IgorBridge(object):
def __init__(self):
self.app = None
def tryReconnect(func):
def _tryReconnect(self, *args, **kwds):
if self.app is None:
self.connect()
try:
return func(self, *args, **kwds)
except pywintypes.com_error as exc:
if exc.args[0] == -2147023174:
# server unavailable; try reconnecting
self.connect()
return func(self, *args, **kwds)
else:
raise
return _tryReconnect
@staticmethod
def igorProcessExists():
"""Return True if an Igor process is currently running.
"""
return 'Igor.exe' in sp.check_output(['wmic', 'process', 'get', 'description,executablepath'])
def connect(self):
self.app = None
# Need to check for running process to avoid starting a new one.
if self.igorProcessExists():
self.app = win32com.client.gencache.EnsureDispatch("IgorPro.Application")
else:
raise Exception("No Igor process found.")
@tryReconnect
def __call__(self, cmd, *args, **kwds):
"""Make an Igor function call.
Any keyword arguments are optional parameters.
"""
cmd = self.formatCall(cmd, *args, **kwds)
err, errmsg, hist, res = self.app.Execute2(1, 0, cmd, 0, "", "", "")
if err != 0:
raise RuntimeError("Igor call returned error code %d: %s" % (err, errmsg))
return res
def formatCall(self, cmd, *args, **kwds):
for kwd, val in kwds.items():
if isinstance(val, int):
args.append("{}={:d}".format(kwd, val))
elif isinstance(val, float):
args.append("{}={:f}".format(kwd, val))
else:
raise TypeError("Invalid value: {}".format(val))
return "{}({})".format(cmd, ", ".join(["{}"]*len(args)).format(*args))
@tryReconnect
def getWave(self, folder, waveName):
df = self.app.DataFolder(folder)
wave = df.Wave(waveName)
# get data type and array shape
typ, rows, cols, layers, chunks = wave.GetDimensions()
dtype = dtypes[typ]
shape = [rows, cols, layers, chunks]
ndim = shape.index(0)
shape = shape[:ndim]
# get [(slope, intercept), ...] scale factors for each axis
# could use this to return a metaarray..
scaling = [wave.GetScaling(ax) for ax in range(len(shape))]
data = np.array(wave.GetNumericWaveData(typ))
return data, scaling
@tryReconnect
def getVariable(self, folder, varName):
df = self.app.DataFolder(folder)
var = df.Variable(varName)
typ = var.get_DataType()
if dtypes[typ] == 'str':
return var.GetStringValue()
else:
r,i = var.getNumericValue()
if dtypes[typ] == 'complex':
return complex(r, i)
else:
return r
class ZMQIgorBridge(object):
"""Bridge to Igor via ZMQ DEALER/ROUTER."""
_context = zmq.Context()
_types = {"NT_FP32": np.float32,
"NT_FP64": np.float64}
def __init__(self, host="tcp://localhost", port=5670):
super(ZMQIgorBridge, self).__init__()
self._unresolvedFutures = {}
self._currentMessageID = 0
self.address = "{}:{}".format(host, port)
self._socket = self._context.socket(zmq.DEALER)
self._socket.setsockopt(zmq.IDENTITY, "igorbridge")
self._socket.setsockopt(zmq.SNDTIMEO, 1000)
self._socket.setsockopt(zmq.RCVTIMEO, 0)
self._socket.connect(self.address)
self._pollTimer = Qt.QTimer()
self._pollTimer.timeout.connect(self._checkRecv)
self._pollTimer.start(100)
def __call__(self, cmd, *args):
# TODO: Handle optional values whenever they become supported in Igor
messageID = self._getMessageID()
future = concurrent.futures.Future()
call = self.formatCall(cmd, params=args, messageID=messageID)
try:
self._socket.send_multipart(call)
self._unresolvedFutures[messageID] = future
except zmq.error.Again:
self._unresolvedFutures.pop(messageID)
|
future.set_exception(IgorCallError("Send timed out",
IgorCallError.TIMEDOUT))
return future
def _checkRecv(self):
try:
reply = json.loads(self._socket.recv_multipart()[-1])
messageID = reply.get("messageID", None)
future = self._unresolvedFutures.get(messageID, None)
if future is None:
raise RuntimeError("No future found for messageID {}".format(messageID))
try:
reply
|
= self.parseReply(reply)
future.set_result(reply)
except IgorCallError as e:
future.set_exception(e)
except zmq.error.Again:
pass
def _getMessageID(self):
mid = self._currentMessageID
self._currentMessageID += 1
return str(mid)
def formatCall(self, cmd, params, messa
|
unlessbamboo/grocery-shop
|
language/python/src/flask/flaskr/flaskr.py
|
Python
|
gpl-3.0
| 3,444
| 0
|
# -*- coding: utf-8 -*-
"""
Flaskr
~~~~~~
A microblog example application written as Flask tutorial with
Flask and sqlite3.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from sqlite3 import dbapi2 as sqlite3
from flask import (Flask, request, session,
redirect, url_for, abort,
render_template, flash, _app_ctx_stack)
# configuration
DATABASE = '/tmp/flaskr.db'
# 调试模式
DEBUG = True
# 安全会话
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
# create our little application :)
# 设置环境变量:Flassk_settings,指向预加载的配置文件
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def init_db():
"""Creates the database tables."""
# 手动创建应用环境,在with语句的内部,g和app关联,之后自动销毁
with app.app_context():
db = get_db()
# 打开应用提供的资源,从资源所在处打开文件并读取
# 使用游标cursor来执行sql脚本
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
1,config配置对象
"""
top = _app_ctx_stack.top
if not hasattr(top, 'sqlite_db'):
sqlite_db = sqlite3.connect(app.config['DATABASE'])
sqlite_db.row_factory = sqlite3.Row
top.sqlite_db = sqlite_db
return top.sqlite_db
@app.teardown_appcontext
def close_db_connection(exception):
"""Closes the database again at the end of the request."""
top = _app_ctx_stack.top
if hasattr(top, 'sqlite_db'):
top.sqlite_db.close()
@app.route('/')
def show_entries():
"""show_entries:显示所有
|
的db条目"""
db = get_db()
cur = db.execute('select title, text from entries order by id desc')
entries = cur.fetchall()
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
db.commit()
# 向下一次请求发送提示消息
fla
|
sh('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
# 如果登录失败,提示错误信息
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
# 删除会话中的提示信息
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
# 初始化数据库
init_db()
# 将当前文件作为一个独立应用来执行
app.run(host='0.0.0.0')
|
NullHypothesis/active-probing-tools
|
source_port_analysis.py
|
Python
|
gpl-3.0
| 1,711
| 0.00526
|
#!/usr/bin/env python
import sys
import time
import scapy.all as scapy
SEQ_MAX = 2**32 - 1
previous_packets = set()
def is_retransmission(packet):
packet_key = "%d%d%d" % (packet[scapy.TCP].sport,
packet[scapy.TCP].dport,
packet[scapy.TCP].seq)
if packet_key in previous_packets:
return True
else:
previous_packets.add(packet_key)
return False
def analyse_pcap(pcap_file):
old_port =
|
0
for packet in scapy.PcapReader(pcap_file):
# Weed out SYN retransmissions.
if (not scapy.TCP in packet) or (not packet[scapy.TCP].flags == 2):
continue
if packet[scapy.IP].src == "211.155.86.135":
continue
print packet[scapy.IP].src
continue
if is_retransmission(packet):
continue
t = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(packet.time))
#diff = packet[scapy.TCP].sport - ol
|
d_port
#if diff < 0:
# diff = 65535 - abs(diff)
#print diff
#old_port = packet[scapy.TCP].sport
#for opt_name, opt_val in packet[scapy.TCP].options:
# if opt_name == "Timestamp":
#print packet.time, opt_val[0]
if packet[scapy.IP].src == "211.155.86.135":
print "%s, %d, 1" % (t, packet[scapy.TCP].dport)
else:
print "%s, %d, 0" % (t, packet[scapy.TCP].dport)
def main():
if len(sys.argv) != 2:
print >> sys.stderr, "\nUsage: %s PCAP_FILE\n" % sys.argv[0]
return 1
pcap_file = sys.argv[1]
analyse_pcap(pcap_file)
return 0
if __name__ == "__main__":
sys.exit(main())
|
ShashkovS/plus_reader
|
setup.py
|
Python
|
mit
| 3,445
| 0.000872
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ p
|
ip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'plus_reader'
DESCRIPTION = 'Recognition of tables with pluses or marks'
URL = 'https://github.com/ShashkovS/plus_reader'
EMAIL = 'sh57@yandex.ru'
AUTHOR = 'Sergey Shashkov'
# What packages are required for this module to be executed?
REQUIRED = [
'numpy', 'PyPDF2',
'opencv-python', 'pillow',
]
# The rest you shouldn't have to tou
|
ch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.rst' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
class PublishCommand(Command):
"""Support setup.py publish."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(exclude=('tests',)),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
entry_points={
'console_scripts': ['read_pluses=read_pluses.__main__:main'],
},
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2.6',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
# 'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'publish': PublishCommand,
},
)
|
pbarton666/buzz_bot
|
djangoproj/djangoapp/crawler/findDate.py
|
Python
|
mit
| 12,201
| 0.016638
|
'''Tools to guess a blog's posting date without knowing the blog site's layout, using BeautifulSoup.
FindDateInSoup::findDate_main(<BS object>, <ancestorDepth = integer> returns a datetime object or None
It begins from the BS object passed in and works its way up the parse tree until it finds a date or it reaches an imposed
limit on the "ancestorDepth", or how many levels up it can search.
This approach has issues - particulary speed. Each BS object encapsulates all subordinate objects so as we move up the tree
the string to search grows geometrically. Also, the same (previously unfruitful) text is searched again at each level.
I've played with the idea of extracting a previously-searched element to prevent revisition, but this has the problem of 'fratricide' -
if two relevant content chunks appear in the same post, the process of searching for the date can extract the others. Revisit this
because it probably makes sense to simply reparse the url every time we look for a date as reparsing can be much faster.
'''
#standard modules
import re
from datetime import datetime
import os
import logging
#parsing modules
from BeautifulSoup import BeautifulSoup
import philparser
parser = philparser.DateParser()
#log settings
LOG_NAME = "master.log"
LOG_LEVEL = logging.DEBUG
class FindDateInSoup():
def __init__(self, ancestorDepth=100):
self.depth = ancestorDepth
self.parser = parser
self._set_logger()
def invoke_date_parser(self, txt):
#invokes our date parser to figure out whether there's a date in this chunk of code.
try:
dateFound = self.parser.grabDate(txt)
return dateFound
except:
msg = "parser had a problem with %s"%str(txt)
return msg
def NOTUSEDfindDate_mainMethod3(self, parseObj, objContents=None):
'''
This strips the content out of the content object then uses string methods to pass chunks of the original html
to the date parser. We'll start from the location of the content in the html and work up until we find a date.
'''
chunkToParse =800 #size of the piece to send to the date parser
mydate = None
try:
#make a string version of the BS objects contents
#objContents =''
#for t in parseObj.findAllPrevious():
# objContents = objContents+unicode(t)
#find this in the original html
last = objContents.find(unicode(parseObj))
done = False
while not done:
#chop the html into chunks; this keeps parser from barfing, also date is probably close to content
first = max(0, last - chunkToParse)
ret = self.invoke_date_parser(objContents[first:last])
#reset 'last' to provide a bit of overlap; this in case we chopped a good date in half
last = first + 20
#we'll return the last date found (most proximate to the content bit)
if len(ret) > 0:
mydate = ret[len(ret)-1]
print "found %s"%(str(mydate))
return mydate
if first == 0:
done = True
#If we've encoundered an issue, we no longer have a parsable object (doesn't matter why), so give up
except:
return mydate
def NOTUSEDfindDate_mainMethod2(self, obj, objContents=None):
'''
Trys to find a valid date in the parse tree. This method walks up the parse tree, deleting any
branch that does not yield a date. This works fine if we don't care about the parse tree. I can't seem
to be able to deep copy it, so any deletion on Copy A affects Copy B. Re-parsing for every content bit is too
expensive.
'''
chunkToParse =400 #size of the piece to send to the date parser
mydate = None
targetObj = obj
curDepth = 0
#
while curDepth <= self.depth:
try:
oldObj = targetObj
targetObj = targetObj.findPrevious()
#kill the old object and all its kids
try:
kids = oldObj.childGenerator()
for k in kids:
k.extract()
oldObj.extract()
except:
pass
objContents =""
for t in targetObj:
objContents = "%s %s"%(objContents, t)
if len(objContents) > 6: #don't bother parsing really small pieces smaller than '1/1/01'
ret = self.invoke_date_parser(objContents[:chunkToParse])
if len(ret) > 0:
mydate = ret[0]
return mydate
curDepth +=1
#If we've encoundered an issue, we no longer have a parsable object (doesn't matter why), so give up
except:
return mydate
def findDate_main(self, obj, objContents=None):
'''
Trys to find a valid date in the parse tree. I haven't figured out a fool-proof way to do this given the diversity
of blog architectures out there.
'''
chunkToParse =400 #size of the piece to send to the date parser
mydate = None
targetObj = obj
curDepth = 0
#
while curDepth <= self.depth:
try:
oldObj = targetObj
targetObj = targetObj.findPrevious()
#oldObj.extract()
objContents =""
'''
The parent object (objContents) is an "instance" object - basicly an iterator object that spawns navigable strings.
We'll concantenate it into a string, then pick off chunks to send to the date parser for analysis (this keeps the
parser from barfing on big strings; also the date is most likely to be at the top so we don't need to analyze the whole.
'''
for t in targetObj:
objContents = "%s %s"%(objContents, t)
#the parser is a bit inefficient, so we'll do a couple first order checks before we pass this along
intInside = re.compile("\d").search(objContents, 1) #searches for one instance of any integer; None type if not found
if len(objContents) > 6 and intInside: #also screen out chunks smaller than '1/1/01'
ret = self.invoke_date_parser(objContents[:chunkToParse])
if len(ret) > 0:
mydate = ret[0]
return mydate
curDepth +=1
#If we've encoundered an issue, we no longer have a parsable object (doesn't matter why), so give up
except:
return mydate
def _set_logger(self):
#this sets up the logging parameters. The log will appear at ./logs/master.log (or whatever is in the settings
# at the top of this module).
LOGDIR = os.path.join(os.path.dirname(__file__), 'logs').replace('\\','/')
log_filename = LOGDIR
|
+ '/' + LOG_NAME
logging.basicCon
|
fig(level=LOG_LEVEL,
format='%(module)s %(funcName)s %(lineno)d %(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_filename,
filemode='w')
def buildTestHtml():
#Builds a faux html string for testing. NB, for the links to be found they need to have one or more of the qualifying words
# e.g., 'blog' (these are
|
georgejlee/croptrends
|
browser/admin.py
|
Python
|
mit
| 91
| 0.010989
|
fro
|
m browser.models import Crop
from django.contrib import admin
admin.site
|
.register(Crop)
|
ChristianKniep/QNIB
|
serverfiles/usr/local/lib/networkx-1.6/networkx/algorithms/bipartite/projection.py
|
Python
|
gpl-2.0
| 15,424
| 0.008169
|
# -*- coding: utf-8 -*-
"""Create one-mode (unipartite) projections from bipartite graphs.
"""
import networkx as nx
# Copyright (C) 2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['project',
'projected_graph',
'weighted_projected_graph',
'collaboration_weighted_projected_graph',
'overlap_weighted_projected_graph',
'generic_weighted_projected_graph']
def projected_graph(B, nodes, multigraph=False):
r"""Return the graph that is the projection of the bipartite graph B
onto the specified nodes.
The nodes retain their names and are connected in the resulting
graph if have an edge to a common node in the original graph.
Parameters
----------
B : NetworkX graph
The input graph should be bipartite.
nodes : list or iterable
Nodes to project onto (the "bottom" nodes).
multigraph: bool (default=False)
If True return a multigraph where the multiple edges represent multiple
shared neighbors. They edge key in the multigraph is assigned to the
label of the neighbor.
Returns
-------
Graph : NetworkX graph or multigraph
A graph that is the projection onto the given nodes.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> B = nx.path_graph(4)
>>> G = bipartite.projected_graph(B, [1,3])
>>> print(G.nodes())
[1, 3]
>>> print(G.edges())
[(1, 3)]
If nodes `a`, and `b` are connected through both nodes 1 and 2 then
building a multigraph results in two edges in the projection onto
[`a`,`b`]:
>>> B = nx.Graph()
>>
|
> B.add_edges_from([('a', 1), ('b', 1), ('a', 2), ('b', 2)])
>>> G = bipartite.projected_graph(B, ['a', 'b'], multigraph=True)
>>> print(G.edges(keys=True))
[('a', 'b', 1), ('a', 'b', 2)]
Notes
------
No attempt is made to verify that the input grap
|
h B is bipartite.
Returns a simple graph that is the projection of the bipartite graph B
onto the set of nodes given in list nodes. If multigraph=True then
a multigraph is returned with an edge for every shared neighbor.
Directed graphs are allowed as input. The output will also then
be a directed graph with edges if there is a directed path between
the nodes.
The graph and node properties are (shallow) copied to the projected graph.
See Also
--------
is_bipartite,
is_bipartite_node_set,
sets,
weighted_projected_graph,
collaboration_weighted_projected_graph,
overlap_weighted_projected_graph,
generic_weighted_projected_graph
"""
if B.is_multigraph():
raise nx.NetworkXError("not defined for multigraphs")
if B.is_directed():
directed=True
if multigraph:
G=nx.MultiDiGraph()
else:
G=nx.DiGraph()
else:
directed=False
if multigraph:
G=nx.MultiGraph()
else:
G=nx.Graph()
G.graph.update(B.graph)
G.add_nodes_from((n,B.node[n]) for n in nodes)
for u in nodes:
nbrs2=set((v for nbr in B[u] for v in B[nbr])) -set([u])
if multigraph:
for n in nbrs2:
if directed:
links=set(B[u]) & set(B.pred[n])
else:
links=set(B[u]) & set(B[n])
for l in links:
if not G.has_edge(u,n,l):
G.add_edge(u,n,key=l)
else:
G.add_edges_from((u,n) for n in nbrs2)
return G
def weighted_projected_graph(B, nodes, ratio=False):
r"""Return a weighted unipartite projection of B onto the nodes of
one bipartite node set.
The weighted projected graph is the projection of the bipartite
network B onto the specified nodes with weights representing the
number of shared neighbors or the ratio between actual shared
neighbors and possible shared neighbors if ratio=True [1]_. The
nodes retain their names and are connected in the resulting graph
if they have an edge to a common node in the original graph.
Parameters
----------
B : NetworkX graph
The input graph should be bipartite.
nodes : list or iterable
Nodes to project onto (the "bottom" nodes).
ratio: Bool (default=False)
If True, edge weight is the ratio between actual shared neighbors
and possible shared neighbors. If False, edges weight is the number
of shared neighbors.
Returns
-------
Graph : NetworkX graph
A graph that is the projection onto the given nodes.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> B = nx.path_graph(4)
>>> G = bipartite.weighted_projected_graph(B, [1,3])
>>> print(G.nodes())
[1, 3]
>>> print(G.edges(data=True))
[(1, 3, {'weight': 1})]
>>> G = bipartite.weighted_projected_graph(B, [1,3], ratio=True)
>>> print(G.edges(data=True))
[(1, 3, {'weight': 0.5})]
Notes
------
No attempt is made to verify that the input graph B is bipartite.
The graph and node properties are (shallow) copied to the projected graph.
See Also
--------
is_bipartite,
is_bipartite_node_set,
sets,
collaboration_weighted_projected_graph,
overlap_weighted_projected_graph,
generic_weighted_projected_graph
projected_graph
References
----------
.. [1] Borgatti, S.P. and Halgin, D. In press. "Analyzing Affiliation
Networks". In Carrington, P. and Scott, J. (eds) The Sage Handbook
of Social Network Analysis. Sage Publications.
"""
if B.is_multigraph():
raise nx.NetworkXError("not defined for multigraphs")
if B.is_directed():
pred=B.pred
G=nx.DiGraph()
else:
pred=B.adj
G=nx.Graph()
G.graph.update(B.graph)
G.add_nodes_from((n,B.node[n]) for n in nodes)
n_top = float(len(B) - len(nodes))
for u in nodes:
unbrs = set(B[u])
nbrs2 = set((n for nbr in unbrs for n in B[nbr])) - set([u])
for v in nbrs2:
vnbrs = set(pred[v])
common = unbrs & vnbrs
if not ratio:
weight = len(common)
else:
weight = len(common) / n_top
G.add_edge(u,v,weight=weight)
return G
def collaboration_weighted_projected_graph(B, nodes):
r"""Weighted unipartite projection of B onto the nodes of
one bipartite node set using the collaboration model.
The collaboration weighted projection is the projection of the
bipartite network B onto the specified nodes with weights assigned
using Newman's collaboration model [1]_:
.. math::
w_{v,u} = \sum_k \frac{\delta_{v}^{w} \delta_{w}^{k}}{k_w - 1}
where `v` and `u` are nodes from the same bipartite node set,
and `w` is a node of the opposite node set.
The value `k_w` is the degree of node `w` in the bipartite
network and `\delta_{v}^{w}` is 1 if node `v` is
linked to node `w` in the original bipartite graph or 0 otherwise.
The nodes retain their names and are connected in the resulting
graph if have an edge to a common node in the original bipartite
graph.
Parameters
----------
B : NetworkX graph
The input graph should be bipartite.
nodes : list or iterable
Nodes to project onto (the "bottom" nodes).
Returns
-------
Graph : NetworkX graph
A graph that is the projection onto the given nodes.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> B = nx.path_graph(5)
>>> B.add_edge(1,5)
>>> G = bipartite.collaboration_weighted_projected_graph(B, [0, 2, 4, 5])
>>> print(G.nodes())
[0, 2, 4, 5]
>>> for edge in G.edges(data=True): print(edge)
...
(0, 2, {'weight': 0.5})
(0, 5, {'weight': 0.5})
(2, 4, {'weight': 1.0})
(2, 5, {'weight': 0.5
|
mandli/multilayer-examples
|
1d/plot_shelf_contour.py
|
Python
|
mit
| 3,904
| 0.024334
|
#!/usr/bin/env python
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from clawpack.pyclaw.solution import Solution
import clawpack.visclaw.data as data
rho = [1025.0,1045.0]
eta_init = [0.0,-300.0]
def plot_contour(data_dir="./_output",out_dir='./',num_layers=2,num_frames=1000,ref_lines=[-130e3,-30e3],color=True):
"""Plot a contour plot of a shelf based simluation
Note that to get a nice contour you may have to change the number of output
times a solution is written out in `shelf.py`
"""
# Create plot data
plot_data = data.ClawPlotData()
plot_data.outdir = data_dir
# Read in bathymetry
sol = [Solution(0,path=data_dir,read_aux=True)]
b = sol[0].state.aux[0,:]
# Extract x coordinates, this assumes that these do not change through the
# simluation (they should not)
x = sol[0].state.grid.dimensions[0].centers
# Read in all solutions
print "Reading in solutions..."
for frame in xrange(1,num_frames):
try:
sol.append(Solution(frame,path=data_dir))
except IOError:
# We have reached the last frame before given num_frames reached
num_frames = frame - 1
break
print "Found %s frames to plot." % num_frames
# Create plotting arrays
print "Constructing plotting variables..."
eta = np.ndarray((num_frames,num_layers,len(x)))
t = np.ndarray((num_frames))
for frame in xrange(num_frames):
# Append data to eta and t lists
t[frame] = sol[frame].t / 3600.0
# Calculate from the bottom up
layer_index = 2 * (num_layers-1)
eta[frame,num_layers - 1,:] = sol[frame].q[layer_index,:] / rho[-1] + b
# Calculate the rest of the layers
for layer in xrange(num_layers-2,-1,-1):
layer_index = 2 * layer
eta[frame,layer,:] = sol[frame].q[layer_index,:] / rho[layer] + eta[frame,layer+1,:]
# Create mesh grid for plot
X,T = np.meshgrid(x,t)
# Plot the contours of each layer
clines = np.linspace(.025,.4,8)
title = ['top','internal']
print "Creating plots..."
fig = plt.figure(figsize=[10,8])
for layer in xrange(num_layers):
axes = fig.add_subplot(1,num_layers,layer+1)
# Plot positive and negative contours
eta_plot = eta[:,layer,:] - eta_init[layer]
plot = axes.contour(X,T,eta_plot, clines,colors='r')
plot = axes.contour(X,T,eta_plot,-clines,colors='b')
for ref_line in ref_lines:
axes.plot([ref_line,ref_line],[0,2],'k:')
# X ticks and labels
axes.set_xticks([-300e3,-200e3,-100e3,-30e3])
axes.set_xticklabels([300,200,100,30],fontsize=15)
axes.set_xlabel("Kilometers offshore",fontsize=15)
axes.set_xlim([-200e3,0.0])
# First plot from left to right, write y ticks
if layer == 0:
plt.yticks(fontsize=15)
axes.set_ylabel("Hours",fontsize=20)
else:
# Remove tick labels
axes.set_yticklabels(['' for label in axes.get_yticklabels()])
axes.set_title("Contours of %s surface" % title[layer],fontsize=15)
file_name = os.path.join(out_dir,"contour.png")
print "Writing out to %s" % file_name
plt.savefig(file_name)
if __
|
name__=="__main__":
if len(sys.argv) > 1:
plot_contour(sys.argv[1],ref_lines=[-130e3,-30e3],num_frames=300)
else:
ref_lines = ( [-30e3], [-130e3,-30e3] )
for (i,shelf_type) in enumerate(['jump_shelf','sloped_shelf']):
path = os.path.join(os.environ['DATA_PATH'],shelf_type,'ml_e2_n2000_output')
out_path = os.path.join(os.environ['DATA_PATH'],shelf_type,'ml_e2_n2000_plots')
plot_contour(path,out_dir=out_
|
path,ref_lines=ref_lines[i])
|
nidhididi/CloudBot
|
plugins/time_plugin.py
|
Python
|
gpl-3.0
| 1,073
| 0.00466
|
import time
from cloudbot import hook
@hook.command(autohelp=False)
def beats(text):
"""bea
|
ts -- Gets the current time in .beats (Swatch Internet Time). """
if text.lower() == "wut":
return "Instead of hours and minutes, the mean solar day is divided " \
"up into 1000 parts called \".beats\". Each .beat lasts 1 minute and" \
" 26.4 seconds. Times are
|
notated as a 3-digit number out of 1000 af" \
"ter midnight. So, @248 would indicate a time 248 .beats after midni" \
"ght representing 248/1000 of a day, just over 5 hours and 57 minute" \
"s. There are no timezones."
elif text.lower() == "guide":
return "1 day = 1000 .beats, 1 hour = 41.666 .beats, 1 min = 0.6944 .beats, 1 second = 0.01157 .beats"
t = time.gmtime()
h, m, s = t.tm_hour, t.tm_min, t.tm_sec
utc = 3600 * h + 60 * m + s
bmt = utc + 3600 # Biel Mean Time (BMT)
beat = bmt / 86.4
if beat > 1000:
beat -= 1000
return "Swatch Internet Time: @%06.2f" % beat
|
nirmeshk/oh-mainline
|
mysite/missions/models.py
|
Python
|
agpl-3.0
| 1,422
| 0.000703
|
# This file is part of OpenHatch.
# Copyright (C) 2010 John Stumpo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warran
|
ty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR P
|
URPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
import mysite.search.models
class Step(models.Model):
name = models.CharField(max_length=255, unique=True)
class StepCompletion(mysite.search.models.OpenHatchModel):
person = models.ForeignKey('profile.Person')
step = models.ForeignKey('Step')
# Current mission status (True - user have completed it, False - reseted)
is_currently_completed = models.BooleanField(default=True)
class Meta:
unique_together = ('person', 'step')
class IrcMissionSession(models.Model):
person = models.ForeignKey('profile.Person', null=True)
nick = models.CharField(max_length=255, unique=True)
password = models.CharField(max_length=255)
|
blstream/ut-arena
|
ut_arena_py_api/ut_arena/envs/test/settings.py
|
Python
|
apache-2.0
| 31
| 0.032258
|
fr
|
om ut_arena.settings impo
|
rt *
|
dselsam/lean-python-bindings
|
lean/lang/env.py
|
Python
|
apache-2.0
| 1,567
| 0
|
import lean
import lang.expr as expr
# =========================================================
# Declaration Views
class DeclView(lean.declaration):
def __init__(self, decl):
self.decl = decl
def destruct(self):
# type: DeclView -> (lean.name, ?, ?, lean.expr, lean.expr)
return (self.decl.get_name(),
self.decl.get_univ_params(),
self.decl.get_num_univ_params(),
self.decl.get_type(),
self.decl.get_value())
def mentions(self, d_thm):
v = self.decl.get_value()
return expr.gather_theorem(d_thm, v)
# =========================================================
# Environment Views
class EnvView(lean.environment):
def __init__(self, env):
# type: lean.environment -> None
self.env = env
def get_decls(self, f=None):
# type: (lean.declaration -> bool) -> [lean.declaration]
decls = []
self.env.for_each_declaration(lambda decl: decls.append(decl))
if f:
decls = filter(lambda decl: f(decl), decls)
return decls
def get_theorems(self):
# type: (lean.declaration -> bool) -> [lean.declaration]
return self.get_decls(lambda decl: decl.is_theorem())
def thm_dic
|
t_of_decls(self, decls):
# type: [lean.declaration] -> dict<lean.name, lean.expr>
d_thm = {}
for decl in decls:
if decl.is_theorem():
n, up, nup, t, v = DeclV
|
iew(decl).destruct()
d_thm[n] = v
return d_thm
|
openaps/dexcom_reader
|
dexcom_reader/packetwriter.py
|
Python
|
mit
| 1,120
| 0
|
import struct
from . import crc16
class PacketWriter:
MAX_PAYLOAD = 1584
MIN_LEN = 6
MAX_LEN = 1590
SOF = 0x01
OFFSET_SOF = 0
OFFSET_LENGTH = 1
OFFSET_CMD = 3
OFFSET_PAYLOAD = 4
def __init__(self):
self._packet = None
def Clear(self):
self._packet = None
def NewSOF(self, v):
self._packet[0] = chr(v)
def PacketString(self):
return "".join(self._packet)
def AppendCrc(self):
self.SetLength()
ps = self.PacketString()
crc = crc1
|
6.crc16(ps, 0, len(ps))
for
|
x in struct.pack("H", crc):
self._packet.append(x)
def SetLength(self):
self._packet[1] = chr(len(self._packet) + 2)
def _Add(self, x):
try:
len(x)
for y in x:
self._Add(y)
except: # noqa: E722
self._packet.append(x)
def ComposePacket(self, command, payload=None):
assert self._packet is None
self._packet = ["\x01", None, "\x00", chr(command)]
if payload:
self._Add(payload)
self.AppendCrc()
|
OpenGenus/cosmos
|
code/filters/src/median_filter/median_filter.py
|
Python
|
gpl-3.0
| 1,398
| 0.003577
|
##Author - Sagar Vakkala (@codezoned)
import numpy
from PIL import Image
def median_filter(data, filter_size):
temp_arr = []
index = filter_size // 2
data_final = []
data_final = numpy.zeros((len(data), len(data[0])))
for i in range(len(data)):
# Iterate over the Image Array
for j in range(len(data[0])):
for z in range(filter_size):
if i + z - index < 0 or i + z - index > len(data) - 1:
for c in range(filter_size):
temp_arr.append(0)
else:
if j + z - index < 0 or j + index > len(data[0]) - 1:
temp_arr.append(0)
else:
for k in range(filter_size):
temp_arr.append(data[i + z - index][j + k - index])
temp_arr.sort()
data_final[i][j] = temp_arr[len(temp_arr) // 2]
temp_arr = []
return data_final
def main():
##Replace someImage with the noise image
img = Image.open("someImage.png").convert("L")
arr = numpy.array(img)
##3 defines the no of layers for the filter
##It also defines that
|
the filter will be 3x3 size. Change 3 to 5 for a 5x5 filter
removed_noise = median_filter(arr, 3)
img = Image.fromarray(removed_n
|
oise)
img.show()
if __name__ == "__main__":
main()
|
Andrew-McNab-UK/DIRAC
|
WorkloadManagementSystem/PilotAgent/pilotTools.py
|
Python
|
gpl-3.0
| 18,034
| 0.032051
|
########################################################################
# $Id$
########################################################################
""" A set of common tools to be used in pilot commands
"""
import sys
import time
import os
import pickle
import getopt
import imp
import types
import urllib2
import signal
__RCSID__ = '$Id$'
def printVersion( log ):
log.info( "Running %s" % " ".join( sys.argv ) )
try:
with open( "%s.run" % sys.argv[0], "w" ) as fd:
pickle.dump( sys.argv[1:], fd )
except OSError:
pass
log.info( "Version %s" % __RCSID__ )
def pythonPathCheck():
try:
os.umask( 18 ) # 022
pythonpath = os.getenv( 'PYTHONPATH', '' ).split( ':' )
print 'Directories in PYTHONPATH:', pythonpath
for p in pythonpath:
if p == '':
continue
try:
if os.path.normpath( p ) in sys.path:
# In case a given directory is twice in PYTHONPATH it has to removed only once
sys.path.remove( os.path.normpath( p ) )
except Exception, x:
print x
print "[EXCEPTION-info] Failing path:", p, os.path.normpath( p )
print "[EXCEPTION-info] sys.path:", sys.path
raise x
except Exception, x:
print x
print "[EXCEPTION-info] sys.executable:", sys.executable
print "[EXCEPTION-info] sys.version:", sys.version
print "[EXCEPTION-info] os.uname():", os.uname()
raise x
def alarmTimeoutHandler( *args ):
raise Exception( 'Timeout' )
def retrieveUrlTimeout( url, fileName, log, timeout = 0 ):
"""
Retrieve remote url to local file, with timeout wrapper
"""
urlData = ''
if timeout:
signal.signal( signal.SIGALRM, alarmTimeoutHandler )
# set timeout alarm
signal.alarm( timeout + 5 )
try:
remoteFD = urllib2.urlopen( url )
expectedBytes = 0
# Sometimes repositories do not return Content-Length parameter
try:
expectedBytes = long( remoteFD.info()[ 'Content-Length' ] )
except Exception as x:
expectedBytes = 0
data = remoteFD.read()
if fileName:
with open( fileName + '-local', "wb" ) as localFD:
localFD.write( data )
else:
urlData += data
remoteFD.close()
if len( data ) != expectedBytes and expectedBytes > 0:
log.error( 'URL retrieve: expected size does not match the received one' )
return False
if timeout:
signal.alarm( 0 )
if fileName:
return True
else:
return urlData
except urllib2.HTTPError, x:
if x.code == 404:
log.error( "URL retrieve: %s does not exist" % url )
if timeout:
signal.alarm( 0 )
return False
except urllib2.URLError:
log.error( 'Timeout after %s seconds on transfer request for "%s"' % ( str( timeout ), url ) )
return False
except Exception, x:
if x == 'Timeout':
log.error( 'Timeout after %s seconds on transfer request for "%s"' % ( str( timeout ), url ) )
if timeout:
signal.alarm( 0 )
raise x
class ObjectLoader( object ):
""" Simplified class for loading objects from a DIRAC installation.
Example:
ol = ObjectLoader()
object, modulePath = ol.loadObject( 'pilot', 'LaunchAgent' )
"""
def __init__( self, baseModules, log ):
""" init
"""
self.__rootModules = baseModules
self.log = log
def loadModule( self, modName, hideExceptions = False ):
""" Auto search which root module has to be used
"""
for rootModule in self.__rootModules:
impName = modName
if rootModule:
impName = "%s.%s" % ( rootModule, impName )
self.log.debug( "Trying to load %s" % impName )
module, parentPath = self.__recurseImport( impName, hideExceptions = hideExceptions )
#Error. Something cannot be imported. Return error
if module is None:
return None, None
#Huge success!
else:
return module, parentPath
#Nothing found, continue
#Return nothing found
return None, None
def __recurseImport( self, modName, parentModule = None, hideExceptions = False ):
""" Internal function to load modules
"""
if type( modName ) in types.StringTypes:
modName = modName.split( '.' )
try:
if parentModule:
impData = imp.find_module( modName[0], parentModule.__path__ )
else:
impData = imp.find_module( modName[0] )
impModule = imp.load_module( modName[0]
|
, *impData )
if impData[0]:
impData[0].close()
except ImportError, excp:
if str( excp ).find( "No module named %s" % modName[0] ) == 0:
return None, None
errMsg = "Can't load %s in %s" % ( ".".join( modName ), parentModule.__path__[0] )
if not hideExceptions:
self.log.exception( errMsg )
return N
|
one, None
if len( modName ) == 1:
return impModule, parentModule.__path__[0]
return self.__recurseImport( modName[1:], impModule,
hideExceptions = hideExceptions )
def loadObject( self, package, moduleName, command ):
""" Load an object from inside a module
"""
loadModuleName = '%s.%s' % ( package, moduleName )
module, parentPath = self.loadModule( loadModuleName )
if module is None:
return None, None
try:
commandObj = getattr( module, command )
return commandObj, os.path.join( parentPath, moduleName )
except AttributeError, e:
self.log.error( 'Exception: %s' % str(e) )
return None, None
def getCommand( params, commandName, log ):
""" Get an instantiated command object for execution.
Commands are looked in the following modules in the order:
1. <CommandExtension>Commands
2. pilotCommands
3. <Extension>.WorkloadManagementSystem.PilotAgent.<CommandExtension>Commands
4. <Extension>.WorkloadManagementSystem.PilotAgent.pilotCommands
5. DIRAC.WorkloadManagementSystem.PilotAgent.<CommandExtension>Commands
6. DIRAC.WorkloadManagementSystem.PilotAgent.pilotCommands
Note that commands in 3.-6. can only be used of the the DIRAC installation
has been done. DIRAC extensions are taken from -e ( --extraPackages ) option
of the pilot script.
"""
extensions = params.commandExtensions
modules = [ m + 'Commands' for m in extensions + ['pilot'] ]
commandObject = None
# Look for commands in the modules in the current directory first
for module in modules:
try:
impData = imp.find_module( module )
commandModule = imp.load_module( module, *impData )
commandObject = getattr( commandModule, commandName )
except Exception, _e:
pass
if commandObject:
return commandObject( params ), module
if params.diracInstalled:
diracExtensions = []
for ext in params.extensions:
if not ext.endswith( 'DIRAC' ):
diracExtensions.append( ext + 'DIRAC' )
else:
diracExtensions.append( ext )
diracExtensions += ['DIRAC']
ol = ObjectLoader( diracExtensions, log )
for module in modules:
commandObject, modulePath = ol.loadObject( 'WorkloadManagementSystem.PilotAgent',
module,
commandName )
if commandObject:
return commandObject( params ), modulePath
# No command could be instantitated
return None, None
class Logger( object ):
""" Basic logger object, for use inside the pilot. Just using print.
"""
def __init__( self, name = 'Pilot', debugFlag = False, pilotOutput = 'pilot.out' ):
self.debugFlag = debugFlag
self.name = name
self.out = pilotOutput
def __outputMessage( self, msg, level, header ):
if self.out:
with open( self.out, 'a' ) as outputFile:
for _line in msg.split( "\n" ):
if header:
outLine = "%s UTC %s [%s] %s" % ( time.strftime( '%Y-%m-%d %H:%M:%S', time.gmtime() ),
level,
self.name,
_line )
print outLine
if self.out:
outputFile.write( outLine + '\n' )
else:
|
macobo/python-grader
|
grader/utils.py
|
Python
|
mit
| 1,858
| 0.001615
|
""" An utility module containing utility functions used by the grader module
and some useful pre-test hooks.
"""
import json
import traceback
def import_module(path, name=None):
if name is None:
name = path
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(name, path)
module = loader.load_module(name)
return module
def is_function(value):
|
try:
return hasattr(value, '__call__')
except:
return False
## Function descriptions
def beautifyDescription(description):
""" Converts docstring of a function to a test description
by removing excess whitespace and joining the answer on one
line """
|
lines = (line.strip() for line in description.split('\n'))
return " ".join(filter(lambda x: x, lines))
def setDescription(function, description):
import grader
old_description = grader.get_test_name(function)
if old_description in grader.testcases:
grader.testcases.remove(old_description)
description = beautifyDescription(description)
function.__doc__ = description
grader.testcases.add(description, function)
## Json managing
def load_json(json_string):
" Loads json_string into an dict "
return json.loads(json_string)
def dump_json(ordered_dict):
" Dumps the dict to a string, indented "
return json.dumps(ordered_dict, indent=4)
def get_error_message(exception):
type_ = type(exception)
return "{}: {}".format(type_.__name__, str(exception))
def get_traceback(exception):
type_, value, tb = type(exception), exception, exception.__traceback__
return "".join(traceback.format_exception(type_, value, tb))
def read_code(path):
import tokenize
# encoding-safe open
with tokenize.open(path) as sourceFile:
contents = sourceFile.read()
return contents
|
beeverycreative/beeconnect
|
Loaders/PrinterInfoLoader.py
|
Python
|
gpl-2.0
| 5,218
| 0.012457
|
#!/usr/bin/env python3
"""
* Copyright (c) 2015 BEEVC - Electronic Systems This file is part of BEESOFT
* software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either
* version 3 of the License, or (at your option) any later version. BEESOFT is
* distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
* PARTICULAR PURPOSE. See the GNU General Public License for more details. You
* should have received a copy of the GNU General Public License along with
* BEESOFT. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Marcos Gomes"
__license__ = "MIT"
import json
import FileFinder
import pygame
class PrinterInfoLoader():
interfaceJson = None
lblJson = None
lblValJson = None
lblFont = None
lblFontColor = None
lblXPos = None
lblYPos = None
lblText = None
lblValFont = None
lblValFontColor = None
lblValXPos = None
lblValFont = None
lblValFontColor = None
displayWidth = 480
displayHeight = 320
"""*************************************************************************
Init Method
Inits current screen components
*************************************************************************"""
def __init__(self, interfaceJson, dispWidth, dispHeight):
self.displayWidth = dispWidth
self.displayHeight = dispHeight
self.interfaceJson = interfaceJson
self.lblJson = json.loads(json.dumps(self.interfaceJson['Labels']))
self.lblValJson = json.loads(json.dumps(self.interfaceJson['ValuesSettings']))
"""
Values Labels Configuration
"X":"220",
"FontType":"Bold",
"FontSize":"12",
"FontColor":"0,0,0"
"""
self.lblValXPos = int(float(self.lblValJson['X'])*self.displayWidth)
lblValFontType = self.lblValJson['FontType']
lblValFontSize = int(float(self.lblValJson['FontSize'])*self.displayHeight)
self
|
.lblValFont = self.GetFont(lblValFontType,lblValFontSize)
lblValFColor = self.lblValJson['FontColor']
splitColor = lblValFColor.split(",")
self.lblValFontColor = pygame.Color(int(splitColor[0]),int(splitColor[1]),int(splitColor[2]))
"""
Load Labels Configuration
"""
self.lblText
|
= []
self.lblXPos = []
self.lblYPos = []
self.lblFont = []
self.lblFontColor = []
for lbl in self.lblJson:
lblFontType = lbl['FontType']
lblFontSize = int(float(lbl['FontSize'])*self.displayHeight)
lblFColor = lbl['FontColor']
self.lblXPos.append(int(float(lbl['X'])*self.displayWidth))
self.lblYPos.append(int(float(lbl['Y'])*self.displayHeight))
self.lblText.append(lbl['Text'])
font = self.GetFont(lblFontType,lblFontSize)
self.lblFont.append(font)
splitColor = lblFColor.split(",")
fontColor = pygame.Color(int(splitColor[0]),int(splitColor[1]),int(splitColor[2]))
self.lblFontColor.append(fontColor)
return
"""
GetFont
"""
def GetFont(self,fontType,fontSize):
r"""
GetFont method
Receives as arguments:
fontType - Regular,Bold,Italic,Light
fontSize - font size
Returns:
pygame font object
"""
ff = FileFinder.FileFinder()
font = None
if fontType == "Regular":
font = pygame.font.Font(ff.GetAbsPath("/Fonts/DejaVuSans-Regular.ttf"),fontSize)
elif fontType == "Bold":
font = pygame.font.Font(ff.GetAbsPath("/Fonts/DejaVuSans-Bold.ttf"),fontSize)
elif fontType == "Italic":
font = pygame.font.Font(ff.GetAbsPath("/Fonts/DejaVuSans-Italic.ttf"),fontSize)
elif fontType == "Light":
font = pygame.font.Font(ff.GetAbsPath("/Fonts/DejaVuSans-Light.ttf"),fontSize)
return font
"""
GetlblText(self)
returns the list with the label text
"""
def GetlblText(self):
return self.lblText
"""
GetlblFont
"""
def GetlblFont(self):
return self.lblFont
"""
GetlblFontColor
"""
def GetlblFontColor(self):
return self.lblFontColor
"""
GetlblXPos
"""
def GetlblXPos(self):
return self.lblXPos
"""
GetlblYPos
"""
def GetlblYPos(self):
return self.lblYPos
"""
GetlblValFont
"""
def GetlblValFont(self):
return self.lblValFont
"""
GetlblValFontColor
"""
def GetlblValFontColor(self):
return self.lblValFontColor
"""
GetlblValXPos
"""
def GetlblValXPos(self):
return self.lblValXPos
|
willseward/cattle
|
tests/integration/cattletest/core/test_docker.py
|
Python
|
apache-2.0
| 31,065
| 0.000032
|
import re
import uuid as py_uuid
from common_fixtures import * # NOQA
TEST_IMAGE = 'ibuildthecloud/helloworld'
TEST_IMAGE_LATEST = TEST_IMAGE + ':latest'
TEST_IMAGE_UUID = 'docker:' + TEST_IMAGE
if_docker = pytest.mark.skipif("os.environ.get('DOCKER_TEST') == 'false'",
reason='DOCKER_TEST is not set')
@pytest.fixture(scope='session')
def docker_client(super_client):
for host in super_client.list_host(state='active', remove_null=True,
kind='docker'):
keys = super_client.list_api_key(accountId=host.accountId)
if len(keys) == 0:
key = super_client.create_api_key(accountId=host.accountId)
key = super_client.wait_success(key)
keys = [key]
return api_client(keys[0].publicValue, keys[0].secretValue)
raise Exception('Failed to find docker host, please register one')
@if_docker
def test_docker_create_only(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(name='test',
imageUuid=uuid,
startOnCreate=False)
try:
container = docker_client.wait_success(container)
assert container is not None
assert 'container' == container.type
image = super_client.reload(container).image()
assert image.instanceKind == 'container'
image_mapping = filter(
lambda m: m.storagePool().external,
image.imageStoragePoolMaps()
)
assert len(image_mapping) == 0
assert not image.isPublic
assert image.name == '{}'.format(image.data.dockerImage.fullName,
image.data.dockerImage.id)
assert image.name == TEST_IMAGE_LATEST
assert image.data.dockerImage.repository == 'helloworld'
assert image.data.dockerImage.namespace == 'ibuildthecloud'
assert image.data.dockerImage.tag == 'latest'
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_create_only_from_sha(docker_client, super_client):
image_name = 'tianon/true@sha256:662fc60808e6d5628a090e39' \
'b4bcae694add28a626031cc889109c2cf2af5d73'
uuid = 'docker:' + image_name
container = docker_client.create_container(name='test-sha256',
imageUuid=uuid,
startOnCreate=False)
try:
container = docker_client.wait_success(container)
assert container is not None
assert 'container' == container.type
image = super_client.reload(container).image()
assert image.instanceKind == 'container'
image_mapping = filter(
lambda m: m.storagePool().external,
image.imageStoragePoolMaps()
)
assert len(image_mapping) == 0
assert not image.isPublic
assert image.name == '{}'.format(image.data.dockerImage.fullName,
image.data.dockerImage.id)
assert image.name == image_name
assert image.data.dockerImage.repository == 'true'
assert image.data.dockerImage.namespace == 'tianon'
assert image.data.dockerImage.tag == 'sha256:662fc60808e6d5628a090e' \
'39b4bcae694add28a626031cc8891' \
'09c2cf2af5d73'
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_create_with_start(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(name='test', imageUuid=uuid)
try:
assert container.state == 'creating'
container = super_client.wait_success(container)
assert container.state == 'running'
assert container.data.dockerContainer.Image == TEST_IMAGE_LATEST
assert len(container.volumes()) == 1
image = container.volumes()[0].image()
image = super_client.reload(image)
image_mapping = filter(
lambda m: not m.storagePool().external,
image.imageStoragePoolMaps()
)
assert len(image_mapping) == 1
assert image_mapping[0].imageId == image.id
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_build(docker_client, super_client):
uuid = 'image-' + random_str()
url = 'https://github.com/rancherio/tiny-build/raw/master/build.tar'
container = docker_client.create_container(name='test',
imageUuid='docker:' + uuid,
build={
'context': url,
})
try:
assert container.state == 'creating'
container = super_client.wait_success(container)
# This builds tianon/true which just dies
assert container.state == 'running' or container.state == 'stopped'
assert container.transitioning == 'no'
assert container.data.dockerContainer.Image == uuid + ':latest'
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_create_with_start_using_docker_io(docker_client, super_client):
image = 'docker.io/' + TEST_IMAGE
uuid = 'docker:' + image
container = docker_client.create_container(name='test', imageUuid=uuid)
container = super_client.wait_success(container)
assert container.state == 'running'
assert container.data.dockerContainer.Image == image + ':latest'
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_command(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(name='test',
imageUuid=uuid,
command=['sleep', '42'])
try:
container = super_client.wait_success(container)
assert container.data.dockerContainer.Command == 'sleep 42'
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_docker_command_args(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(name='test',
imageUuid=uuid,
command=['sleep', '1', '2',
'3'])
try:
container = super_client.wait_success(container)
assert container.data.dockerContainer.Command == 'sleep 1 2 3'
finally:
if container is not None:
docker_client.wait_success(docker_client.delete(container))
@if_docker
def test_short_lived_container(docker_client, super_client):
container = docker_client.create_container(imageUuid="docker:tianon/true")
container = wait_for_condition(
docker_client, container,
lambda x: x.state == 'stopped',
lambda x: 'State is: ' + x.state)
assert container.state == 'stopped'
assert container.transitioning == 'no'
@if_docker
def test_docker_stop(docker_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(name='test', imageUuid=uuid)
assert container.state == 'creating'
container = docker_client.wait_success(container)
assert container.state == 'running'
start = time.time()
container = container.stop(timeout=0)
assert container.state == 'stopping'
container = docker_client.wait_success(container)
delta =
|
time.time() - start
assert con
|
tainer.state == 'stopped'
assert delta < 10
@if_docker
def test_docker_purge(docke
|
liverbirdkte/searchlight
|
searchlight/tests/utils.py
|
Python
|
apache-2.0
| 18,006
| 0
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common utilities used in testing"""
import errno
import functools
import os
import shlex
import shutil
import socket
import subprocess
import fixtures
from oslo_config import cfg
from oslotest import moxstubout
import six
from s
|
ix.moves import BaseHTTPServer
import testtools
import webob
from searchlight.common import config
from s
|
earchlight.common import exception
from searchlight.common import property_utils
from searchlight.common import utils
from searchlight.common import wsgi
from searchlight import context
CONF = cfg.CONF
class BaseTestCase(testtools.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
# NOTE(bcwaldon): parse_args has to be called to register certain
# command-line options - specifically we need config_dir for
# the following policy tests
config.parse_args(args=[])
self.addCleanup(CONF.reset)
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = mox_fixture.stubs
self.stubs.Set(exception, '_FATAL_EXCEPTION_FORMAT_ERRORS', True)
self.test_dir = self.useFixture(fixtures.TempDir()).path
self.conf_dir = os.path.join(self.test_dir, 'etc')
utils.safe_mkdirs(self.conf_dir)
self.set_policy()
utils.register_plugin_opts()
def set_policy(self):
conf_file = "policy.json"
self.policy_file = self._copy_data_file(conf_file, self.conf_dir)
self.config(policy_file=self.policy_file, group='oslo_policy')
def set_property_protections(self, use_policies=False):
self.unset_property_protections()
conf_file = "property-protections.conf"
if use_policies:
conf_file = "property-protections-policies.conf"
self.config(property_protection_rule_format="policies")
self.property_file = self._copy_data_file(conf_file, self.test_dir)
self.config(property_protection_file=self.property_file)
def unset_property_protections(self):
for section in property_utils.CONFIG.sections():
property_utils.CONFIG.remove_section(section)
def _copy_data_file(self, file_name, dst_dir):
src_file_name = os.path.join('searchlight/tests/etc', file_name)
shutil.copy(src_file_name, dst_dir)
dst_file_name = os.path.join(dst_dir, file_name)
return dst_file_name
def set_property_protection_rules(self, rules):
with open(self.property_file, 'w') as f:
for rule_key in rules.keys():
f.write('[%s]\n' % rule_key)
for operation in rules[rule_key].keys():
roles_str = ','.join(rules[rule_key][operation])
f.write('%s = %s\n' % (operation, roles_str))
def config(self, **kw):
"""
Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
CONF.set_override(k, v, group)
class requires(object):
"""Decorator that initiates additional test setup/teardown."""
def __init__(self, setup=None, teardown=None):
self.setup = setup
self.teardown = teardown
def __call__(self, func):
def _runner(*args, **kw):
if self.setup:
self.setup(args[0])
func(*args, **kw)
if self.teardown:
self.teardown(args[0])
_runner.__name__ = func.__name__
_runner.__doc__ = func.__doc__
return _runner
class depends_on_exe(object):
"""Decorator to skip test if an executable is unavailable"""
def __init__(self, exe):
self.exe = exe
def __call__(self, func):
def _runner(*args, **kw):
cmd = 'which %s' % self.exe
exitcode, out, err = execute(cmd, raise_error=False)
if exitcode != 0:
args[0].disabled_message = 'test requires exe: %s' % self.exe
args[0].disabled = True
func(*args, **kw)
_runner.__name__ = func.__name__
_runner.__doc__ = func.__doc__
return _runner
def skip_if_disabled(func):
"""Decorator that skips a test if test case is disabled."""
@functools.wraps(func)
def wrapped(*a, **kwargs):
func.__test__ = False
test_obj = a[0]
message = getattr(test_obj, 'disabled_message',
'Test disabled')
if getattr(test_obj, 'disabled', False):
test_obj.skipTest(message)
func(*a, **kwargs)
return wrapped
def fork_exec(cmd,
exec_env=None,
logfile=None,
pass_fds=None):
"""
Execute a command using fork/exec.
This is needed for programs system executions that need path
searching but cannot have a shell as their parent process, for
example: searchlight-api. When searchlight-api starts, it sets itself as
the parent process for its own process group. Thus the pid that
a Popen process would have is not the right pid to use for killing
the process group. This patch gives the test env direct access
to the actual pid.
:param cmd: Command to execute as an array of arguments.
:param exec_env: A dictionary representing the environment with
which to run the command.
:param logile: A path to a file which will hold the stdout/err of
the child process.
:param pass_fds: Sequence of file descriptors passed to the child.
"""
env = os.environ.copy()
if exec_env is not None:
for env_name, env_val in exec_env.items():
if callable(env_val):
env[env_name] = env_val(env.get(env_name))
else:
env[env_name] = env_val
pid = os.fork()
if pid == 0:
if logfile:
fds = [1, 2]
with open(logfile, 'r+b') as fptr:
for desc in fds: # close fds
try:
os.dup2(fptr.fileno(), desc)
except OSError:
pass
if pass_fds and hasattr(os, 'set_inheritable'):
# os.set_inheritable() is only available and needed
# since Python 3.4. On Python 3.3 and older, file descriptors are
# inheritable by default.
for fd in pass_fds:
os.set_inheritable(fd, True)
args = shlex.split(cmd)
os.execvpe(args[0], args, env)
else:
return pid
def wait_for_fork(pid,
raise_error=True,
expected_exitcode=0):
"""
Wait for a process to complete
This function will wait for the given pid to complete. If the
exit code does not match that of the expected_exitcode an error
is raised.
"""
rc = 0
try:
(pid, rc) = os.waitpid(pid, 0)
rc = os.WEXITSTATUS(rc)
if rc != expected_exitcode:
raise RuntimeError('The exit code %d is not %d'
% (rc, expected_exitcode))
except Exception:
if raise_error:
raise
return rc
def execute(cmd,
raise_error=True,
no_ven
|
rosatolen/CTFd
|
serve.py
|
Python
|
apache-2.0
| 117
| 0
|
import sys
from CTFd import create_app
app = cr
|
eate_app()
app.run(debug=True, host="0.0.0.0", port=int(sys.arg
|
v[1]))
|
google/aiyprojects-raspbian
|
src/examples/vision/dish_classification.py
|
Python
|
apache-2.0
| 1,303
| 0.000767
|
#!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
r implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dish classification library demo."""
import argparse
from PIL import Image
from aiy.vision.inference import ImageInference
from aiy.vision.models import dish_classification
def main():
parser = argparse.ArgumentParser()
par
|
ser.add_argument('--input', '-i', dest='input', required=True)
args = parser.parse_args()
with ImageInference(dish_classification.model()) as inference:
image = Image.open(args.input)
classes = dish_classification.get_classes(
inference.run(image), top_k=5, threshold=0.1)
for i, (label, score) in enumerate(classes):
print('Result %d: %s (prob=%f)' % (i, label, score))
if __name__ == '__main__':
main()
|
chispita/epiwork
|
apps/sander/views.py
|
Python
|
agpl-3.0
| 486
| 0.012346
|
from urllib import urlencode
from urllib2 import urlopen
from django.shortcuts import render
from django.http import HttpResponse, Http404
from cms.
|
utils.html import clean_html
def index(request):
url = "http://results.influenzanet.info/results.php?" + urlencode(request.GET)
try:
content = urlopen(url, timeout=5).read()
except:
content = ""
#content = clean_html(content, full=False)
return render(request
|
, 'sander/sander.html' , locals())
|
avtomato/HackerRank
|
Python/_04_Sets/_04_Set .add()/solution.py
|
Python
|
mit
| 53
| 0
|
print(len({(inpu
|
t()) for _
|
in range(int(input()))}))
|
JackieLan/djangocms-container
|
app/myapp/urls.py
|
Python
|
mit
| 820
| 0.00122
|
from __future__ import print_function
#from cms.sitemaps import CMSSitemap
from django.conf.urls import * # NOQA
from django.conf.urls.i18n import i18n_patterns
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
urlpatterns = i18n_patterns('',
url(r'^admin/', include(admin.site.urls)), # NOQA
url(r'^', include('cms.urls')),
# url(r'^taggit_autos
|
uggest/', include('taggit_autosuggest.urls')),
)
# This is only needed when using runserver.
if settings.DEBUG:
urlpatterns = patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', # NO
|
QA
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
) + staticfiles_urlpatterns() + urlpatterns # NOQA
|
ltowarek/budget-supervisor
|
third_party/nordigen/test/test_token_api.py
|
Python
|
mit
| 896
| 0
|
# coding: utf-8
"""
Nordigen Account Information Services API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0 (v2)
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nordigen
from nordigen.api.token_api import TokenApi # noqa: E501
from nordigen.rest import ApiException
|
class TestTokenApi(unittest.TestCase):
"""TokenApi
|
unit test stubs"""
def setUp(self):
self.api = TokenApi() # noqa: E501
def tearDown(self):
pass
def test_j_wt_obtain(self):
"""Test case for j_wt_obtain
"""
pass
def test_j_wt_refresh(self):
"""Test case for j_wt_refresh
"""
pass
if __name__ == '__main__':
unittest.main()
|
jpat82792/EloTab
|
logic.py
|
Python
|
gpl-3.0
| 6,308
| 0.005707
|
from reportlab.pdfgen import canvas as pdf_container
from reportlab.lib.pagesizes import letter, A4
from reportlab.lib.units import inch
'''Contains methods to split lengthy sections so that they are able to fit within the pdf. After splitting the
sections, it appends the newly formed sections to the "to_pdf_holder". '''
class ModelGeneratePDF:
def __init__(self, manager, path, file_name):
self.font = "Courier"
self.font_size = 14
self.name = file_name
self.file_path = path + "/" + self.name + ".pdf"
self.manager = manager
self.to_pdf_holder = []
self.start_height = 50
self.vertical_offset = 10 + ((self.font_size - 12) * 1.5)
self.new_pdf = pdf_container.Canvas(self.file_path, pagesize=letter, bottomup=0)
self.new_pdf.setFont(self.font, size=self.font_size)
self.start_index = 0
self.end_index = 0
# Splits long sections into multiple sections to prevent overflow
def split_sections(self, access_to_section, start_index, end_index, name, add_string_control):
length_limit = 58
if len(access_to_section.section[0][start_index:]) > length_limit:
new_song_section = SongSection(name=name)
if add_string_control == 0:
new_song_section.section.append(access_to_
|
section.section[0][start_index:end_index])
new_song_section.section.append(access_to_section.section[1][start_index:end_index])
new_song_section.section.append(access_to_section.section[2][start_index:end_index])
new_song_section.section.append(access_to_section.section[3][start_index:end_index])
new_song_section.section.append(access_to_section.section[4][start_index:en
|
d_index])
new_song_section.section.append(access_to_section.section[5][start_index:end_index])
start_index += 62
end_index += 58
else:
new_song_section.section.append("e|" + access_to_section.section[0][start_index:end_index])
new_song_section.section.append("B|" + access_to_section.section[1][start_index:end_index])
new_song_section.section.append("G|" + access_to_section.section[2][start_index:end_index])
new_song_section.section.append("D|" + access_to_section.section[3][start_index:end_index])
new_song_section.section.append("A|" + access_to_section.section[4][start_index:end_index])
new_song_section.section.append("E|" + access_to_section.section[5][start_index:end_index])
start_index += 58
end_index += 58
self.to_pdf_holder.append(new_song_section)
self.split_sections(access_to_section, start_index, end_index, name, 1)
else:
new_song_section = SongSection(name=name)
if add_string_control == 0:
new_song_section.section.append(access_to_section.section[0][start_index:end_index])
new_song_section.section.append(access_to_section.section[1][start_index:end_index])
new_song_section.section.append(access_to_section.section[2][start_index:end_index])
new_song_section.section.append(access_to_section.section[3][start_index:end_index])
new_song_section.section.append(access_to_section.section[4][start_index:end_index])
new_song_section.section.append(access_to_section.section[5][start_index:end_index])
else:
new_song_section.section.append("e|" + access_to_section.section[0][start_index:end_index])
new_song_section.section.append("B|" + access_to_section.section[1][start_index:end_index])
new_song_section.section.append("G|" + access_to_section.section[2][start_index:end_index])
new_song_section.section.append("D|" + access_to_section.section[3][start_index:end_index])
new_song_section.section.append("A|" + access_to_section.section[4][start_index:end_index])
new_song_section.section.append("E|" + access_to_section.section[5][start_index:end_index])
self.to_pdf_holder.append(new_song_section)
# This function is used to gather sections, decide if they need to be split, and pass them to "split_sections" method if
# necessary.
def gather_sections(self):
temp_start_index = 0
temp_end_index = 62
for section_name in self.manager.second_screen_content.listview_array:
for section in self.manager.first_screen_content.section_holder:
if section_name == section.name:
if len(section.section[0]) > temp_end_index:
self.split_sections(section, temp_start_index, temp_end_index, section.name, 0)
else:
self.to_pdf_holder.append(section)
temp_start_index = 0
temp_end_index = 62
#
def draw_sections_to_pdf(self):
holder = 0
holder_vertical_offset = self.vertical_offset
for section in self.to_pdf_holder:
self.new_pdf.drawString(1*inch, self.start_height + (holder * holder_vertical_offset), section.section[0])
holder += 1
self.new_pdf.drawString(1*inch, self.start_height + (holder * holder_vertical_offset), section.section[1])
holder += 1
self.new_pdf.drawString(1*inch, self.start_height + (holder * holder_vertical_offset), section.section[2])
holder += 1
self.new_pdf.drawString(1*inch, self.start_height + (holder * holder_vertical_offset), section.section[3])
holder += 1
self.new_pdf.drawString(1*inch, self.start_height + (holder * holder_vertical_offset), section.section[4])
holder += 1
self.new_pdf.drawString(1*inch, self.start_height + (holder * holder_vertical_offset), section.section[5])
holder += 1
self.start_height += 20
self.new_pdf.save()
class SongSection:
def __init__(self, name):
self.section = []
self.name = name
|
ayoubg/gem5-graphics
|
gem5/configs/ruby/MI_example.py
|
Python
|
bsd-3-clause
| 8,604
| 0.009182
|
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Brad Beckmann
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
from Ruby import send_evicts
#
# Declare caches used by the protocol
#
class L1Cache(RubyCache): pass
def define_options(parser):
return
def create_system(options, full_system, system, dma_ports, ruby_system):
if buildEnv['PROTOCOL'] != 'MI_example':
panic("This script requires the MI_example protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
l1_cntrl_nodes = []
dir_cntrl_nodes = []
dma_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
block_size_bits = int(math.log(options.cacheline_size, 2))
for i in xrange(options.num_cpus):
#
# First create the Ruby objects associated with this cpu
# Only one cache exists for this protocol, so by default use the L1D
# config parameters.
#
cache = L1Cache(size = options.l1d_size,
assoc = options.l1d_assoc,
start_index_bit = block_size_bits)
#
# Only one unified L1 cache exists. Can cache instructions and data.
#
l1_cntrl = L1Cache_Controller(version = i,
cacheMemory = cache,
send_evictions = send_evicts(options),
transitions_per_cycle = options.ports,
clk_domain=system.cpu[i].clk_domain,
ruby_system = ruby_system)
cpu_seq = RubySequencer(version = i,
icache = cache,
dcache = cache,
clk_domain=system.cpu[i].clk_domain,
ruby_system = ruby_system)
l1_cntrl.sequencer = cpu_seq
exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
# Add controllers and sequencers to the appropriate lists
cpu_sequencers.append(cpu_seq)
l1_cntrl_nodes.append(l1_cntrl)
# Connect the L1 controllers and the network
l1_cntrl.mandatoryQueue = MessageBuffer()
l1_cntrl.requestFromCache = MessageBuffer(ordered = True)
l1_cntrl.requestFromCache.master = r
|
uby_system.network.slave
l1_cntrl.responseFromCache = MessageBuffer(ordered = True)
l1_cn
|
trl.responseFromCache.master = ruby_system.network.slave
l1_cntrl.forwardToCache = MessageBuffer(ordered = True)
l1_cntrl.forwardToCache.slave = ruby_system.network.master
l1_cntrl.responseToCache = MessageBuffer(ordered = True)
l1_cntrl.responseToCache.slave = ruby_system.network.master
phys_mem_size = sum(map(lambda r: r.size(), system.mem_ranges))
assert(phys_mem_size % options.num_dirs == 0)
mem_module_size = phys_mem_size / options.num_dirs
# Run each of the ruby memory controllers at a ratio of the frequency of
# the ruby system.
# clk_divider value is a fix to pass regression.
ruby_system.memctrl_clk_domain = DerivedClockDomain(
clk_domain=ruby_system.clk_domain,
clk_divider=3)
for i in xrange(options.num_dirs):
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
dir_cntrl = Directory_Controller(version = i,
directory = RubyDirectoryMemory(
version = i, size = dir_size),
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
# Connect the directory controllers and the network
dir_cntrl.requestToDir = MessageBuffer(ordered = True)
dir_cntrl.requestToDir.slave = ruby_system.network.master
dir_cntrl.dmaRequestToDir = MessageBuffer(ordered = True)
dir_cntrl.dmaRequestToDir.slave = ruby_system.network.master
dir_cntrl.responseFromDir = MessageBuffer()
dir_cntrl.responseFromDir.master = ruby_system.network.slave
dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered = True)
dir_cntrl.dmaResponseFromDir.master = ruby_system.network.slave
dir_cntrl.forwardFromDir = MessageBuffer()
dir_cntrl.forwardFromDir.master = ruby_system.network.slave
dir_cntrl.responseFromMemory = MessageBuffer()
for i, dma_port in enumerate(dma_ports):
#
# Create the Ruby objects associated with the dma controller
#
dma_seq = DMASequencer(version = i,
ruby_system = ruby_system)
dma_cntrl = DMA_Controller(version = i,
dma_sequencer = dma_seq,
transitions_per_cycle = options.ports,
ruby_system = ruby_system)
exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
dma_cntrl_nodes.append(dma_cntrl)
# Connect the directory controllers and the network
dma_cntrl.mandatoryQueue = MessageBuffer()
dma_cntrl.requestToDir = MessageBuffer()
dma_cntrl.requestToDir.master = ruby_system.network.slave
dma_cntrl.responseFromDir = MessageBuffer(ordered = True)
dma_cntrl.responseFromDir.slave = ruby_system.network.master
all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
# Create the io controller and the sequencer
if full_system:
io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
ruby_system._io_port = io_seq
io_controller = DMA_Controller(version = len(dma_ports),
dma_sequencer = io_seq,
ruby_system = ruby_system)
ruby_system.io_controller = io_controller
# Connect the dma controller to the network
io_controller.mandatoryQueue = MessageBuffer()
|
SpeedProg/eve-inc-waitlist
|
waitlist/blueprints/api/teamspeak.py
|
Python
|
mit
| 473
| 0
|
from flask.blueprints import Blueprint
import logging
from flask_login impo
|
rt login_required, current_user
from waitlist.ts3.connection impo
|
rt send_poke
from flask import jsonify
bp = Blueprint('api_ts3', __name__)
logger = logging.getLogger(__name__)
@bp.route("/test_poke")
@login_required
def test_poke():
send_poke(current_user.get_eve_name(), "Test Poke")
resp = jsonify(status_code=201, message="Poke was send!")
resp.status_code = 201
return resp
|
GGFHF/NGScloud
|
Package/cmenu.py
|
Python
|
gpl-3.0
| 58,331
| 0.00396
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
'''
This software has been developed by:
GI Genética, Fisiología e Historia Forestal
Dpto. Sistemas y Recursos Naturales
ETSI Montes, Forestal y del Medio Natural
Universidad Politécnica de Madrid
http://gfhforestal.com/
https://github.com/ggfhf/
Licence: GNU General Public Licence Version 3.
'''
#-------------------------------------------------------------------------------
'''
This file contains the functions related to the SOAPdenovo-Trans menus in console mode.
'''
#-------------------------------------------------------------------------------
import sys
import cbioinfoapp
import ccloud
import cdataset
import clib
import clog
import xlib
#-------------------------------------------------------------------------------
def build_menu_main():
'''
Build the menu Main.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Main')
# print the menu options
print('Options:')
print()
print(' 1. Cloud control')
print(' 2. RNA-seq')
print(' 3. Datasets')
print(' 4. Logs')
print()
print(' X. Exit NGScloud')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
build_menu_cloud_control()
elif option == '2':
build_menu_rnaseq()
elif option == '3':
build_menu_datasets()
elif option == '4':
build_menu_logs()
elif option == 'X':
sure = ''
print('')
while sure not in ['Y', 'N']:
sure = input('Are you sure to exit NGScloud (y or n)?: ').upper()
if sure == 'Y':
break
#-------------------------------------------------------------------------------
def build_menu_cloud_control():
'''
Build the menu Cloud control.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Cloud control')
# print the menu options
print('Options:')
print()
print(' 1. Set environment')
print()
print(' 2. Configuration')
print(' 3. Security')
print()
print(' 4. Cluster operation')
print(' 5. Node operation')
print(' 6. Volume operation')
print()
print(' 7. Bioinfo software setup')
print()
print(' 8. Open a terminal')
print()
print(' X. Return to menu Main')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
ccloud.form_set_environment()
elif option == '2':
build_menu_configuration()
elif option == '3':
build_menu_security()
elif option == '4':
build_menu_cluster_operation()
elif option == '5':
build_menu_node_operation()
elif option == '6':
build_menu_volume_operation()
elif option == '7':
build_menu_bioinfo_software_setup()
elif option == '8':
ccloud.form_open_terminal()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_configuration():
'''
Build the menu Configuration.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Config
|
uration')
# print the menu options
print('Options:')
print()
print(' 1. Recreate NGScloud config file')
print(' 2. View NGScloud config file')
print()
print(' 3. List cluster templates')
print()
print(' 4. Update connection data and contact e-mail')
print(' 5. Updat
|
e region and zone')
print()
print(' 6. Link volume in a cluster template')
print(' 7. Delink volume in a cluster template')
print(' 8. Review volumes linked to cluster templates')
print()
print(' X. Return to menu Cloud control')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
ccloud.form_create_ngscloud_config_file(is_menu_call=True)
elif option == '2':
ccloud.form_view_ngscloud_config_file()
elif option == '3':
ccloud.form_list_templates()
elif option == '4':
ccloud.form_update_connection_data()
elif option == '5':
ccloud.form_update_region_zone()
elif option == '6':
ccloud.form_link_volume_to_template()
elif option == '7':
ccloud.form_delink_volume_from_template()
elif option == '8':
ccloud.form_review_volume_links()
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_security():
'''
Build the menu Security.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Security')
# print the menu options
print('Options:')
print()
print(' 1. List key pairs')
print(' 2. Create key pairs')
print()
print(' 3. List cluster security groups (coming soon!)')
print(' 4. Force removal of a cluster security group (coming soon!)')
print()
print(' X. Return to menu Cloud control')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
ccloud.form_list_keypairs()
elif option == '2':
ccloud.form_create_keypairs()
elif option == '3':
pass
elif option == '3':
pass
elif option == 'X':
break
#-------------------------------------------------------------------------------
def build_menu_cluster_operation():
'''
Build the menu Cluster operation.
'''
while True:
# print headers
clib.clear_screen()
clib.print_headers_with_environment('Cluster operation')
# print the menu options
print('Options:')
print()
print(' 1. List clusters')
print()
print(' 2. Create cluster')
print(' 3. Stop cluster')
print(' 4. Restart cluster')
print(' 5. Terminate cluster')
print()
print(' 6. Force termination of a cluster')
print()
print(' 7. Show cluster composition')
print()
print(' 8. Show status of batch jobs')
print(' 9. Kill batch job')
print()
print(' X. Return to menu Cloud Control')
print()
# get the selected option
option = input('Input the selected option: ').upper()
# process the selected option
if option == '1':
ccloud.form_list_clusters()
elif option == '2':
ccloud.form_create_cluster()
elif option == '3':
ccloud.form_stop_cluster()
elif option == '4':
ccloud.form_restart_cluster()
elif option == '5':
ccloud.form_terminate_cluster(force=False)
elif option == '6':
ccloud.form_terminate_cluster(force=True)
elif option == '7':
ccloud.form_show_cluster_composing()
elif option == '8':
ccloud.form_show_status_batch_jobs()
elif option == '9':
ccloud.form_kill_batch_job()
elif option == 'X':
break
#--
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/simulator/public_billing/test_check_vm_lifecycle_with_cpu_billing.py
|
Python
|
apache-2.0
| 3,329
| 0.029438
|
'''
New Test For cpu bill Operations
1.test vm stop
2.test vm destory
3.test vm live migration
4.test vm clean
@author Antony WeiJiang
'''
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.operations.billing_operations as bill_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.net_operations as net_ops
import threading
import time
import os
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
vm = None
def test():
test_util.test_logger("start check vm lifecycle")
test_util.test_logger("create public ip billing")
bill_cpu = test_stub.CpuBilling()
bill_cpu.create_resource_type()
test_util.test_logger("create vm instance")
global vm
vm = test_stub.create_vm_billing("test_vmm", test_stub.set_vm_resource()[0], None,\
test_stub.set_vm_resource()[1], test_stub.set_vm_resource()[2])
cpuNum = res_ops.query_resource_fields(res_ops.INSTANCE_OFFERING, \
res_ops.gen_query_conditions('uuid', '=',\
test_stub.set_vm_resource()[1]))[0].cpuNum
time.sleep(1)
test_util.test_logger("verify calculate if right is")
if bill_cpu.get_price_total().total < cpuNum * int(bill_cpu.get_price()):
test_util.test_fail("test billing fail, bill is %s ,less than %s"\
%(bill_cpu.get_price_
|
total().total,cpuNum * bill_cpu.get_price()))
test_util.test_logger("stop vm instance")
vm.stop()
bill_cpu.compare("
|
stop")
test_util.test_logger("destory vm instance")
vm.destroy()
bill_cpu.compare("destory")
test_util.test_logger("recover vm instance")
vm.recover()
vm.start()
bill_cpu.compare("recover")
test_util.test_logger("get host total and primarystorge type")
Host_uuid = test_stub.get_resource_from_vmm(res_ops.HOST,vm.get_vm().zoneUuid,vm.get_vm().hostUuid)
PrimaryFlag = test_stub.get_resource_from_vmm(res_ops.PRIMARY_STORAGE,vm.get_vm().zoneUuid,\
vm.get_vm().hostUuid)
test_util.test_logger("antony %s" %(Host_uuid))
test_util.test_logger("antony %s" %(PrimaryFlag))
if Host_uuid and PrimaryFlag == 0:
test_util.test_logger("migration vm instance")
prices = bill_cpu.get_price_total()
vm.migrate(Host_uuid)
prices1 = bill_cpu.get_price_total()
if prices1.total > prices.total:
bill_cpu.compare("migration")
else:
test_util.test_fail("test bill fail, maybe can not calculate when vm live migration")
test_util.test_logger("clean vm instance")
vm.clean()
bill_cpu.compare("clean")
test_util.test_logger("delete public ip resource")
resourcePrices = test_stub.query_resource_price()
for resource_price in resourcePrices:
test_stub.delete_price(resource_price.uuid)
test_util.test_pass("check vm lifecycle with public ip billing pass")
def error_cleanup():
global vm
if vm:
vm.clean()
def env_recover():
global vm
if vm:
vm.clean()
resourcePrices = test_stub.query_resource_price()
if resourcePrices:
for resourceprice in resourcePrices:
test_stub.delete_price(resourceprice.uuid)
|
baojiwei/flasky
|
app/auth/views.py
|
Python
|
mit
| 136
| 0
|
from
|
flask import render_template
from . import auth
@auth.route('/login')
def login():
return render_template('auth/logi
|
n.html')
|
okfn/bibserver
|
test/test_web.py
|
Python
|
mit
| 3,627
| 0.003033
|
from nose.tools import assert_equal
import urllib
from base import *
from bibserver import web, ingest
import os
class TestWeb(object):
@classmethod
def setup_class(cls):
web.app.config['TESTING'] = True
cls.app = web.app.test_client()
# fixture data
recdict = fixtures['records'][0]
cls.record = dao.Record.upsert(recdict)
Fixtures.create_account()
config['download_cache_directory'] = 'test/data/downloads'
ingest.init()
@classmethod
def teardown_class(cls):
conn, db = dao.get_conn()
conn.delete_index(TESTDB)
for x in os.listdir('test/data/downloads'):
os.unlink(os.path.join('test/data/downloads', x))
os.rmdir('test/data/downloads')
def test_home(self):
res = self.app.get('/')
assert 'BibSoup' in res.data, res.data
def test_faq(self):
res = self.app.get('/faq')
assert 'This service is an example' in res.data, res.data
def test_record(self):
res = self.app.get('/' + Fixtures.account.id + '/' + self.record["collection"] + '/' + self.record["_id"] + '.json')
assert res.status == '200 OK', res.status
out = json.loads(res.data)
assert out["id"] == self.record["id"], out
def test_upload(self):
res = self.app.get('/upload')
print res.status
assert res.status == '302 FOUND', res.status
res = self.app.get('/upload',
headers={'REMOTE_USER': Fixtures.account.id}
)
assert res.status == '200 OK', res.status
assert 'upload' in res.data, res.data
def test_upload_post(self):
startnum = dao.Record.query().total
res = self.app.post('/upload?format=bibtex&collection='+urllib.quote_plus('"My Test Collection"'),
data = {'upfile': (open('test/data/sample.bibtex'), 'sample.bibtex')},
headers={'REMOTE_USER': Fixtures.account.id}
)
assert res.status == '302 FOUND', res.status
# Now we have to trigger the ingest handling of the ticket
# which is normally done asynchronously
for state in ('new', 'downloaded', 'parsed'):
for t in ingest.get_tickets(state):
ingest.determine_action(t)
endnum = dao.Record.query().total
assert_equal(endnum, startnum+1)
# TODO: re-enable
# This does not work because login in the previous method appears to
# pers
|
ist to this method. Not sure how to fix this ...
def _test_upload_post_401(self):
bibtex_data = open('test/data/sample.bibtex').read()
res = self.app.post('/upload',
data=dict(
format='bibtex',
collection='My Test Collection',
data=bibtex_d
|
ata,
)
)
assert res.status == '401 UNAUTHORIZED', res.status
def test_query(self):
res = self.app.get('/query')
assert res.status == '200 OK', res.status
res = self.app.get('/query?q=title:non-existent')
assert res.status == '200 OK', res.status
out = json.loads(res.data)
assert out.total == 0, out
def test_accounts_query_inaccessible(self):
res = self.app.get('/query/account')
assert res.status == '401 UNAUTHORIZED', res.status
def test_search(self):
res = self.app.get('/search?q=tolstoy&format=json')
assert res.status == '200 OK', res.status
out = json.loads(res.data)
assert len(out) == 1, out
assert "Tolstoy" in out[0]["author"][0]["name"], out
|
qll/autoCSP
|
testsuite/testcases/fixhtml.py
|
Python
|
mit
| 1,054
| 0.003795
|
#!/usr/bin/env python2
""" Quick helper to add HTML5 DOCTYPE and <title> to every testcase. """
impo
|
rt os
import re
import sys
def fixhtml(folder):
changed = 0
for dirpath, _, filenames in os.walk(folder):
for file in filenames:
name, ext = os.path.splitext(file)
if ext != '.html':
continue
path = '%s/%s' % (dirpath, file)
title = ' '.join(name.split('-'))
shouldbe = '<!DOCTYPE html>\n<title>%s</t
|
itle>\n' % title
with open(path, 'r') as f:
content = f.read()
if content.startswith(shouldbe):
continue
changed += 1
content = re.sub('\s*<!DOCTYPE[^>]*>\s*<title>[^<]*</title>\s*', '',
content)
with open(path, 'w') as f:
f.write(shouldbe + content)
return changed
if __name__ == '__main__':
folder = '.' if len(sys.argv) < 2 else sys.argv[1]
changed = fixhtml(folder)
print('Fixed %d files.' % changed)
|
WarrenWeckesser/scipy
|
scipy/constants/__init__.py
|
Python
|
bsd-3-clause
| 12,116
| 0.001073
|
r"""
==================================
Constants (:mod:`scipy.constants`)
==================================
.. currentmodule:: scipy.constants
Physical and mathematical constants and units.
Mathematical constants
======================
================ =================================================================
``pi`` Pi
``golden`` Golden ratio
``golden_ratio`` Golden ratio
================ =================================================================
Physical constants
==================
=========================== =================================================================
``c`` speed of light in vacuum
``speed_of_light`` speed of light in vacuum
``mu_0`` the magnetic constant :math:`\mu_0`
``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
``h`` the Planck constant :math:`h`
``Planck`` the Planck constant :math:`h`
``hbar`` :math:`\hbar = h/(2\pi)`
``G`` Newtonian constant of gravitation
``gravitational_constant`` Newtonian constant of gravitation
``g`` standard acceleration of gravity
``e`` elementary charge
``elementary_charge`` elementary charge
``R`` molar gas constant
``gas_constant`` molar gas constant
``alpha`` fine-structure constant
``fine_structure`` fine-structure constant
``N_A`` Avogadro constant
``Avogadro`` Avogadro constant
``k`` Boltzmann constant
``Boltzmann`` Boltzmann constant
``sigma`` Stefan-Boltzmann constant :math:`\sigma`
``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma`
``Wien`` Wien displacement law constant
``Rydberg`` Rydberg constant
``m_e`` electron mass
``electron_mass`` electron mass
``m_p`` proton mass
``proton_mass`` proton mass
``m_n`` neutron mass
``neutron_mass`` neutron mass
=========================== =================================================================
Constants database
------------------
In addition to the above variables, :mod:`scipy.constants` also contains the
2018 CODATA recommended values [CODATA2018]_ database containing more physical
constants.
.. autosummary::
:toctree: generated/
value -- Value in physical_constants indexed by key
unit -- Unit in physical_constants indexed by key
precision -- Relative precision in physical_constants indexed by key
find -- Return list of physical_constant keys with a given string
ConstantWarning -- Constant sought not in newest CODATA data set
.. data:: physical_constants
Dictionary of physical constants, of the format
``physical_constants[name] = (value, unit, uncertainty)``.
Available constants:
====================================================================== ====
%(constant_names)s
====================================================================== ====
Units
=====
SI prefixes
-----------
============ =================================================================
``yotta`` :math:`10^{24}`
``zetta`` :math:`10^{21}`
``exa`` :math:`10^{18}`
``peta`` :math:`10^{15}`
``tera`` :math:`10^{12}`
``giga`` :math:`10^{9}`
``mega`` :math:`10^{6}`
``kilo`` :math:`10^{3}`
``hecto`` :math:`10^{2}`
``deka`` :math:`10^{1}`
``deci`` :math:`10^{-1}`
``centi`` :math:`10^{-2}`
``milli`` :math:`10^{-3}`
``micro`` :math:`10^{-6}`
``nano`` :math:`10^{-9}`
``pico`` :math:`10^{-12}`
``femto`` :math:`10^{-15}`
``atto`` :math:`10^{-18}`
``zepto`` :math:`10^{-21}`
============ =================================================================
Binary prefixes
---------------
============ =================================================================
``kibi`` :math:`2^{10}`
``mebi`` :math:`2^{20}`
``gibi`` :math:`2^{30}`
``tebi`` :math:`2^{40}`
``pebi`` :math:`2^{50}`
``exbi`` :math:`2^{60}`
``zebi`` :math:`2^{70}`
``yobi`` :math:`2^{80}`
============ =================================================================
Mass
----
================= ============================================================
``gram`` :math:`10^{-3}` kg
``metric_ton`` :math:`10^{3}` kg
``grain`` one grain in kg
``lb`` one pound (avoirdupous) in kg
``pound`` one pound (avoirdupous) in kg
``blob`` one inch version of a slug in kg (added in 1.0.0)
``slinch`` one inch version of a slug in kg (added in 1.0.0)
``slug`` one slug in kg (added in 1.0.0)
``oz`` one ounce in kg
``ounce`` one ounce in kg
``stone`` one stone in kg
``grain`` one grain in kg
``long_ton`` one long ton in kg
``short_ton`` one short ton in kg
``troy_ounce`` one Troy ounce in kg
``troy_pound`` one Troy pound in kg
``carat`` one carat in kg
``m_u`` atomic mass constant (in kg)
``u`` atomic mass constant (in kg)
``atomic_mass`` atomic mass constant (in kg)
================= ============================================================
Angle
-----
================= ============================================================
``degree`` degree in radians
``arcmin`` arc minute in radians
``arcminute`` arc minute in radians
``arcsec`` arc second in radians
``arcsecond`` arc second in radians
================= ============================================================
Time
----
================= ============================================================
``minute`` one minute in seconds
``hour`` one hour in seconds
``day`` one day in seconds
``week`` one week in seconds
``year`` one year (365 days) in seconds
``Julian_year`` one Julian year (365.25 days) in seconds
================= ============================================================
Length
------
===================== ============================================================
``inch`` one inch in meters
``foot`` one foot in meters
``yard`` one yard in meters
``mile`` one mile in meters
``mil`` one mil in meters
``pt`` one point in meters
``point`` one point in meters
``survey_foot`` one survey foot in meters
``survey_mile`` one survey mile in meters
``nautical_mile`` one nautical mile in meters
``fermi`` one Fermi in meters
``angstrom`` one Angstrom in meters
``micron`` one micron in meters
``au`` one astronomical unit in meters
``astronomical_unit`` one astronomical unit in meters
``light_year`` one light year in meters
``parsec`` one parsec in meters
===================== ============================================================
Pressure
----
|
----
================= ============================================================
``atm`` standard atmosphere in pascals
``atmosphere`` standard atmosphere in pascals
``bar``
|
one bar in pascals
``torr`` one torr (mmHg) in pascals
``mmHg`` one torr (mmHg) in pascals
``psi`` one psi in pascals
================= ============================================================
Area
----
================= ============================================================
``hectare`` one hectare in square meters
``acre`` one acre in square meters
================= ============================================================
Volume
------
=================== ========================================================
``liter`` one liter in cubic meters
``litre`` one liter in cubic
|
zstackio/zstack-woodpecker
|
integrationtest/vm/mini/multiclusters/paths/multi_path131.py
|
Python
|
apache-2.0
| 2,990
| 0.01806
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster2'],
[TestAction.create_volume, 'volume1', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup1'],
[TestAction.create_mini_vm, 'vm2', 'cpu=random', 'cluster=cluster1'],
[TestAction.resize_volume, 'vm2', 5*1024*1024],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.create_volume, 'volume2', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume2'],
[TestAction.detach_volume, 'volume2'],
[TestAction.create_volume, 'volume3', 'cluster=cluster1', 'flag=thick,scsi'],
[TestAction.add_image, 'image1', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup2'],
[TestAction.delete_vm_backup, 'vm1-backup2'],
[TestAction.delete_image, 'image1'],
[TestAction.recover_image, 'image1'],
[TestAction.delete_image, 'image1'],
[TestAction.expunge_image, 'image1'],
[TestAction.create_volume, 'volume4', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume4'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup4'],
[TestAction.change_vm_ha, 'vm1'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.create_image_from_volume, 'vm2', 'vm2-image2'],
[TestAction.attach_volume, 'vm2', 'volume2'],
[TestAction.detach_volume, 'volume2'],
[TestAction.delete_volume, 'volume4'],
[TestAction.attach_volume, 'vm2', 'volume2'],
[TestAction.start_vm, 'vm2'],
[TestAction.create_volume_backup, 'volume2', 'volume2-backup5'],
[TestAction.stop_vm, 'vm2'],
[TestAction.use_volume_backup, 'volume2-backup5'],
[TestAction.create_mini_vm, 'vm3', 'cpu=random', 'cluster=cluster1'],
[TestAction.expunge_volume, 'volume4'],
[TestAction.create_mini_vm, 'vm4', 'cluster=cluster1'],
[TestAction.create_volume, 'volume5', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume5'],
[TestAction.create_volume_backup, 'volume5', 'volume5-backup6'],
[TestAction.resize_data_volume, 'volume5', 5*1024*1024],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.start
|
_vm, 'vm2'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup7'],
[TestAction.stop_vm, 'vm2'],
[TestAction.use_vm_backup, 'vm2-backup7'],
])
'''
The final status:
Running:['vm1', 'vm3', 'vm4']
Stopped:['vm2']
Enadbled:['volume1-backup1', 'volume4-backup4', 'volume2-back
|
up5', 'volume5-backup6', 'vm2-backup7', 'volume2-backup7', 'vm2-image2']
attached:['volume1', 'volume2', 'volume5']
Detached:['volume3']
Deleted:['vm1-backup2', 'volume1-backup2']
Expunged:['volume4', 'image1']
Ha:['vm1']
Group:
vm_backup1:['vm2-backup7', 'volume2-backup7']---vm2@volume2
'''
|
qwhelan/asv
|
test/test_repo_template/setup.py
|
Python
|
bsd-3-clause
| 434
| 0
|
impo
|
rt os
from setuptools import setup
with open('asv_test_repo/build_time_env.py', 'w') as f:
f.write("env = {{}}\n".format(repr(dict(os.environ))))
setup(name='asv_test_repo',
version="{version}",
packages=['asv_test_repo'],
# The following forces setuptools to generate .egg-info directory,
# which causes problems in test_environment.py:test_install_succe
|
ss
include_package_data=True,
)
|
catapult-project/catapult
|
third_party/gsutil/gslib/vendored/boto/boto/cloudhsm/__init__.py
|
Python
|
bsd-3-clause
| 1,726
| 0
|
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tri
|
bute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE W
|
ARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
from boto.regioninfo import connect
def regions():
"""
Get all available regions for the AWS CloudHSM service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.cloudhsm.layer1 import CloudHSMConnection
return get_regions('cloudhsm', connection_cls=CloudHSMConnection)
def connect_to_region(region_name, **kw_params):
from boto.cloudhsm.layer1 import CloudHSMConnection
return connect('cloudhsm', region_name, connection_cls=CloudHSMConnection,
**kw_params)
|
harpribot/deep-summarization
|
train_scripts/train_script_gru_simple_attn.py
|
Python
|
mit
| 879
| 0.004551
|
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))
from models import gru_simple
from helpers import checkpoint
# Get the review summary file
review_summary_file = 'extracted_data/review_summary.csv'
# Initialize Checkpointer to ensure checkpointing
checkpointer = checkpoint.Checkpointer('simple', 'gru', 'At
|
tention')
checkpointer.steps_per_checkpoint(1000)
checkpointer.steps_per_prediction(1000)
# Do using GRU cell - with attention mechanism
out_file = 'result/simple/gru/attention.csv'
checkpointer.set_result_location(out_file)
gru_net = gru_s
|
imple.GruSimple(review_summary_file, checkpointer, attention=True)
gru_net.set_parameters(train_batch_size=128, test_batch_size=128, memory_dim=128, learning_rate=0.05)
gru_net.begin_session()
gru_net.form_model_graph()
gru_net.fit()
gru_net.predict()
gru_net.store_test_predictions()
|
jiahaoliang/group-based-policy
|
gbpservice/nfp/service_vendor_agents/vyos/oc_config_server/vpn_api_server.py
|
Python
|
apache-2.0
| 17,163
| 0.000932
|
#!/usr/bin/env python
import logging
import json
import netifaces
import netaddr
import socket
import fcntl
import struct
import array
import time
import ast
import copy
import subprocess
import os
from netaddr import IPNetwork, IPAddress
from operations import configOpts
from vyos_session import utils
from netifaces import AF_INET, AF_INET6, AF_LINK, AF_PACKET, AF_BRIDGE
#from vyos_session.configsession import ConfigSession as session
from execformat.executor import session
OP_SUCCESS = True
OP_FAILED = False
OP_COMMAND_SCRIPT = "/usr/share/vyos-oc/vpn_op_commands.pl"
IPSEC_SITE2SITE_COMMANDS = {
'ike': [
'set vpn ipsec ike-group %s proposal 1',
'set vpn ipsec ike-group %s proposal 1 encryption %s',
'set vpn ipsec ike-group %s proposal 1 hash %s',
'set vpn ipsec ike-group %s proposal 2 encryption %s',
'set vpn ipsec ike-group %s proposal 2 hash %s',
'set vpn ipsec ike-group %s lifetime %d',
'set vpn ipsec ike-group %s dead-peer-detection action restart',
'set vpn ipsec ike-group %s dead-peer-detection interval %s',
'set vpn ipsec ike-group %s dead-peer-detection timeout %s'],
'esp': [
'set vpn ipsec esp-group %s proposal 1',
'set vpn ipsec esp-group %s proposal 1 encryption %s',
'set vpn ipsec esp-group %s proposal 1 hash %s',
'set vpn ipsec esp-group %s proposal 2 encryption %s',
'set vpn ipsec esp-group %s proposal 2 hash %s',
'set vpn ipsec esp-group %s lifetime %d',
'set vpn ipsec auto-update 60'],
'conn': [
'set vpn ipsec ipsec-interfaces interface %s',
'set vpn ipsec site-to-site peer %s \
authentication mode pre-shared-secret',
'set vpn ipsec site-to-site peer %s \
authentication pre-shared-secret %s',
'set vpn ipsec site-to-site peer %s default-esp-group %s',
'set vpn ipsec site-to-site peer %s ike-group %s',
'set vpn ipsec site-to-site peer %s local-address %s',
'set vpn ipsec site-to
|
-site peer %s authentication remote-id %s',
'set vpn ipsec site-to-site peer %s tunnel %d local prefix %s',
'set vpn ipsec site-to-site peer %s tunnel %d remote prefix %s',
'set vpn ipsec site-to-site peer %s authentication id %s'],
'delete': [
'delete vpn ipsec site-to-site pe
|
er %s',
'delete vpn ipsec site-to-site peer %s tunnel %s',
'delete vpn ipsec'],
'show': [
'show vpn ipsec sa peer %s']}
SSL_VPN_COMMANDS = {
'create': [
'set interfaces openvpn %s',
'set interfaces openvpn %s mode server',
'set interfaces openvpn %s server subnet %s',
'set interfaces openvpn %s tls ca-cert-file /config/auth/ca.crt',
'set interfaces openvpn %s tls cert-file /config/auth/server.crt',
'set interfaces openvpn %s tls dh-file /config/auth/dh.pem',
'set interfaces openvpn %s tls key-file /config/auth/server.key',
'set interfaces openvpn %s server push-route %s',
'set interfaces openvpn %s openvpn-option \
"--client-cert-not-required --script-security 3 \
--auth-user-pass-verify /usr/share/vyos-oc/auth_pam.pl via-file"'],
#'set interfaces openvpn %s local-host %s'],
'delete': [
'delete interfaces openvpn %s',
'delete interfaces openvpn vtun0 server push-route %s']}
logger = logging.getLogger(__name__)
utils.init_logger(logger)
class NoInterfaceOnCidr(Exception):
def __init__(self, **kwargs):
self.message = _("No interface in the network '%(cidr)s'") % kwargs
class VPNHandler(configOpts):
def __init__(self):
super(VPNHandler, self).__init__()
def create_ipsec_site_conn(self, ctx):
session.setup_config_session()
siteconn = ctx['siteconns'][0]
self._create_ike_group(siteconn['ikepolicy'],
siteconn['connection']['dpd'])
self._create_esp_group(siteconn['ipsecpolicy'])
self._create_ipsec_site_conn(ctx)
session.commit()
session.save()
time.sleep(2)
session.teardown_config_session()
return OP_SUCCESS
def create_ipsec_site_tunnel(self, tunnel):
session.setup_config_session()
self._create_ipsec_site_tunnel(tunnel)
session.commit()
session.save()
time.sleep(2)
session.teardown_config_session()
return OP_SUCCESS
def _ipsec_get_tunnel_idx(self, tunnel):
command = 'perl'
command += " " + OP_COMMAND_SCRIPT
command += " " + 'get_ipsec_tunnel_idx'
command += " " + tunnel['peer_address']
command += " " + tunnel['local_cidr']
command += " " + tunnel['peer_cidr']
proc = subprocess.Popen(
command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
tunidx = out.split('=')[1]
return int(tunidx)
def _ipsec_get_tunnel_count(self, tunnel):
command = 'perl'
command += " " + OP_COMMAND_SCRIPT
command += " " + 'get_ipsec_tunnel_count'
command += " " + tunnel['peer_address']
proc = subprocess.Popen(
command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
tuncount = out.split('=')[1]
return int(tuncount)
def delete_ipsec_site_tunnel(self, tunnel):
try:
session.setup_config_session()
self._delete_ipsec_site_tunnel(tunnel)
session.commit()
session.save()
time.sleep(2)
session.teardown_config_session()
return OP_SUCCESS
except Exception as ex:
logger.error("Error in deleting ipsec site tunnel. %s" % ex)
return OP_FAILED
def delete_ipsec_site_conn(self, peer_address):
try:
session.setup_config_session()
self._delete_ipsec_site_conn(peer_address)
session.commit()
session.save()
time.sleep(2)
session.teardown_config_session()
return OP_SUCCESS
except Exception as ex:
logger.error("Error in deleting ipsec site connection. %s" % ex)
return OP_FAILED
def create_ssl_vpn_conn(self, ctx):
session.setup_config_session()
self._create_ssl_vpn_conn(ctx)
session.commit()
session.save()
time.sleep(2)
session.teardown_config_session()
return OP_SUCCESS
def ssl_vpn_push_route(self, route):
session.setup_config_session()
self._ssl_vpn_push_route(route)
session.commit()
session.save()
time.sleep(2)
session.teardown_config_session()
return OP_SUCCESS
def delete_ssl_vpn_conn(self, tunnel):
session.setup_config_session()
self._delete_ssl_vpn_conn(tunnel)
session.commit()
session.save()
time.sleep(2)
session.teardown_config_session()
return OP_SUCCESS
def delete_ssl_vpn_route(self, route):
session.setup_config_session()
self._delete_ssl_vpn_route(route)
session.commit()
session.save()
time.sleep(2)
session.teardown_config_session()
return OP_SUCCESS
def get_ssl_vpn_conn_state(self, peer_address):
return OP_SUCCESS, 'UP'
def get_ipsec_site_tunnel_state(self, tunnel):
tunidx = self._ipsec_get_tunnel_idx(tunnel)
command = 'perl'
command += " " + OP_COMMAND_SCRIPT
command += " " + 'get_ipsec_tunnel_state'
command += " " + tunnel['peer_address']
command += " " + str(tunidx)
proc = subprocess.Popen(
command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
state = out.split('=')[1]
state = state[:-1]
return OP_SUCCESS, state
def _delete_ipsec_site_tunnel(self, tunnel):
tunidx = self._ipsec_get_tunnel_idx(tunnel)
|
desecho/movies
|
src/moviesapp/tmdb.py
|
Python
|
mit
| 5,407
| 0.00074
|
from datetime import datetime
from operator import itemgetter
import tmdbsimple
from babel.dates import format_date
from django.conf import settings
from .exceptions import MovieNotInDb
from .models import get_poster_url
def get_tmdb(lang=None):
tmdbsimple.API_KEY = settings.TMDB_KEY
tmdbsimple.LANGUAGE = lang
return tmdbsimple
def get_poster_from_tmdb(poster):
if poster:
return poster[1:]
return None
def get_movies_from_tmdb(query, search_type, options, user, lang):
def set_proper_date(movies):
def get_date(date):
if date:
date = datetime.strptime(date, "%Y-%m-%d")
if date:
return format_date(date, locale=lang)
return None
for movie in movies:
movie["releaseDate"] = get_date(movie["releaseDate"])
return movies
def is_popular_movie(movie):
return movie["popularity"] >= settings.MIN_POPULARITY
def sort_by_date(movies):
movies_with_date = []
movies_without_date = []
for movie in movies:
if movie["releaseDate"]:
movies_with_date.append(movie)
else:
movies_without_date.append(movie)
movies_with_date = sorted(movies_with_date, key=itemgetter("releaseDate"), reverse=True)
movies = movies_with_date + movies_without_date
return movies
def get_data(query, search_type):
"""
Get data.
For actor, director search - the first is used.
"""
def filter_movies_only(entries):
return [e for e in entries if e["media_type"] == "movie"]
query = query.encode("utf-8")
tmdb = get_tmdb(lang)
search = tmdb.Search()
if search_type == "movie":
movies = search.movie(query=query)["results"]
else:
persons = search.person(query=query)["results"]
# We only select the first found actor/director.
if persons:
person_id = persons[0]["id"]
else:
return []
person = tmdb.People(person_id)
person.combined_credits()
if search_type == "actor":
movies =
|
filter_movies_only(person.cast)
else:
movies = filter_movies_only(person.crew)
movies = [m for m in movies if m["job"] == "Director"]
return movies
movies_data = get_data(query, search_type)
movies = []
i = 0
if movies_data:
user_movies_tmdb_ids = user.get_records().values_list("movie__tmdb_id", flat=True)
for movie in movies_data:
|
tmdb_id = movie["id"]
i += 1
if i > settings.MAX_RESULTS:
break
if tmdb_id in user_movies_tmdb_ids:
continue
poster = get_poster_from_tmdb(movie["poster_path"])
# Skip unpopular movies if this option is enabled.
if search_type == "movie" and options["popularOnly"] and not is_popular_movie(movie):
continue
movie = {
"id": tmdb_id,
"tmdbLink": f"{settings.TMDB_MOVIE_BASE_URL}{tmdb_id}",
"elementId": f"movie{tmdb_id}",
"releaseDate": movie.get("release_date"),
"title": movie["title"],
"poster": get_poster_url("small", poster),
"poster2x": get_poster_url("normal", poster),
}
movies.append(movie)
if options["sortByDate"]:
movies = sort_by_date(movies)
movies = set_proper_date(movies)
return movies
return []
def get_tmdb_movie_data(tmdb_id):
def get_release_date(release_date):
if release_date:
return release_date
return None
def get_trailers(movie_data):
trailers = []
for trailer in movie_data.videos()["results"]:
trailer_ = {"name": trailer["name"], "source": trailer["key"]}
trailers.append(trailer_)
return trailers
def get_movie_data(tmdb_id, lang):
tmdb = get_tmdb(lang=lang)
return tmdb.Movies(tmdb_id)
movie_data_en = get_movie_data(tmdb_id, "en")
movie_info_en = movie_data_en.info()
# We have to get all info in english first before we switch to russian or everything breaks.
trailers_en = get_trailers(movie_data_en)
movie_data_ru = get_movie_data(tmdb_id, "ru")
movie_info_ru = movie_data_ru.info()
trailers_ru = get_trailers(movie_data_ru)
imdb_id = movie_info_en["imdb_id"]
if imdb_id:
return {
"tmdb_id": tmdb_id,
"imdb_id": imdb_id,
"release_date": get_release_date(movie_info_en["release_date"]),
"title_original": movie_info_en["original_title"],
"poster_ru": get_poster_from_tmdb(movie_info_ru["poster_path"]),
"poster_en": get_poster_from_tmdb(movie_info_en["poster_path"]),
"homepage": movie_info_en["homepage"],
"trailers_en": trailers_en,
"trailers_ru": trailers_ru,
"title_en": movie_info_en["title"],
"title_ru": movie_info_ru["title"],
"description_en": movie_info_en["overview"],
"description_ru": movie_info_ru["overview"],
}
raise MovieNotInDb(tmdb_id)
|
geminy/aidear
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/WebKit/Tools/Scripts/webkitpy/w3c/test_parser_unittest.py
|
Python
|
gpl-3.0
| 10,336
| 0.003289
|
# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES
|
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, I
|
NDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.w3c.test_parser import TestParser
class TestParserTest(unittest.TestCase):
def test_analyze_test_reftest_one_match(self):
test_html = """<head>
<link rel="match" href="green-box-ref.xht" />
</head>
"""
test_path = '/some/madeup/path/'
parser = TestParser(test_path + 'somefile.html', MockHost())
test_info = parser.analyze_test(test_contents=test_html)
self.assertNotEqual(test_info, None, 'did not find a test')
self.assertTrue('test' in test_info.keys(), 'did not find a test file')
self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
def test_analyze_test_reftest_multiple_matches(self):
test_html = """<head>
<link rel="match" href="green-box-ref.xht" />
<link rel="match" href="blue-box-ref.xht" />
<link rel="match" href="orange-box-ref.xht" />
</head>
"""
oc = OutputCapture()
oc.capture_output()
try:
test_path = '/some/madeup/path/'
parser = TestParser(test_path + 'somefile.html', MockHost())
test_info = parser.analyze_test(test_contents=test_html)
finally:
_, _, logs = oc.restore_output()
self.assertNotEqual(test_info, None, 'did not find a test')
self.assertTrue('test' in test_info.keys(), 'did not find a test file')
self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
def test_analyze_test_reftest_match_and_mismatch(self):
test_html = """<head>
<link rel="match" href="green-box-ref.xht" />
<link rel="match" href="blue-box-ref.xht" />
<link rel="mismatch" href="orange-box-notref.xht" />
</head>
"""
oc = OutputCapture()
oc.capture_output()
try:
test_path = '/some/madeup/path/'
parser = TestParser(test_path + 'somefile.html', MockHost())
test_info = parser.analyze_test(test_contents=test_html)
finally:
_, _, logs = oc.restore_output()
self.assertNotEqual(test_info, None, 'did not find a test')
self.assertTrue('test' in test_info.keys(), 'did not find a test file')
self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
def test_analyze_test_reftest_with_ref_support_Files(self):
"""Tests analyze_test() using a reftest that has refers to a
reference file outside of the tests directory and the reference
file has paths to other support files.
"""
test_html = """<html>
<head>
<link rel="match" href="../reference/green-box-ref.xht" />
</head>
"""
ref_html = """<head>
<link href="support/css/ref-stylesheet.css" rel="stylesheet" type="text/css">
<style type="text/css">
background-image: url("../../support/some-image.png")
</style>
</head>
<body>
<div><img src="../support/black96x96.png" alt="Image download support must be enabled" /></div>
</body>
</html>
"""
test_path = '/some/madeup/path/'
parser = TestParser(test_path + 'somefile.html', MockHost())
test_info = parser.analyze_test(test_contents=test_html, ref_contents=ref_html)
self.assertNotEqual(test_info, None, 'did not find a test')
self.assertTrue('test' in test_info.keys(), 'did not find a test file')
self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
self.assertTrue('reference_support_info' in test_info.keys(), 'there should be reference_support_info for this test')
self.assertEquals(len(test_info['reference_support_info']['files']), 3, 'there should be 3 support files in this reference')
self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
def test_analyze_jstest(self):
"""Tests analyze_test() using a jstest"""
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
</head>
"""
test_path = '/some/madeup/path/'
parser = TestParser(test_path + 'somefile.html', MockHost())
test_info = parser.analyze_test(test_contents=test_html)
self.assertNotEqual(test_info, None, 'test_info is None')
self.assertTrue('test' in test_info.keys(), 'did not find a test file')
self.assertFalse('reference' in test_info.keys(), 'should not have found a reference file')
self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
self.assertTrue('jstest' in test_info.keys(), 'test should be a jstest')
def test_analyze_wpt_manual_test(self):
"""Tests analyze_test() with a manual test that is not in csswg-test."""
test_html = """<html>
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR" />
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
test_path = '/some/madeup/path/'
parser = TestParser(test_path + 'somefile-manual.html', MockHost())
test_info = parser.analyze_test(test_contents=test_html)
self.assertNotEqual(test_info, None, 'test_info is None')
self.assertTrue('test' in test_info.keys(), 'did not find a test file')
self.assertFalse('reference' in test_info.keys(), 'shold not have found a r
|
sinesiobittencourt/explainshell
|
explainshell/shlext.py
|
Python
|
gpl-3.0
| 8,869
| 0.001128
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 Vinay M. Sajip. See LICENSE for licensing inform
|
ation.
#
# Enhancements in shlex to tokenize closer to the way real shells do
#
from collections import deque
import shlex
import sys
# We need to behave differently on 2,x and 3,x, because on 2.x
# shlex barfs on Unicode, and must be given str.
if sys.version_info[0] < 3:
PY3 = False
text_type = unicode
else:
PY3 = True
text_type = str
class shell_shlex(shlex.shlex):
def __init__(self, instream=None, **kwargs):
if 'control' not in kwargs:
control = ''
els
|
e:
control = kwargs.pop('control')
if control is True:
control = '();<>|&'
# shlex on 2.x doesn't like being passed Unicode :-(
if not PY3 and isinstance(instream, text_type):
instream = instream.encode('utf-8')
shlex.shlex.__init__(self, instream, **kwargs)
self.control = control
self.wordchars += '+-./*?=$%:@~`^,[]{}!\\' # these chars allowed in params
if self.control:
self.pbchars = deque()
def read_token(self):
quoted = False
escapedstate = ' '
self.preceding = ''
while True:
if self.control and self.pbchars:
nextchar = self.pbchars.pop()
else:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno += 1
if self.debug >= 3: # pragma: no cover
print("shlex: in state %r saw %r" % (self.state, nextchar))
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.token_type = self.state
self.state = None # end of file
break
elif nextchar in self.whitespace:
self.preceding = nextchar
if self.debug >= 2: # pragma: no cover
print("shlex: whitespace in whitespace state")
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno += 1
self.preceding = '\n'
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.token_type = self.state
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.token_type = self.state
self.state = 'a'
elif nextchar in self.control:
self.token = nextchar
self.token_type = self.state
self.state = 'c'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.token_type = self.state
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.token_type = self.state
self.state = 'a'
else:
# don't return any tokens that consist of characters that
# we don't know how to handle
raise ValueError('Illegal character %r' % nextchar)
# self.token = nextchar
# if self.token or (self.posix and quoted):
# break # emit current token
# else:
# continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2: # pragma: no cover
print("shlex: I see EOF in quotes state")
# XXX what error should be raised here?
raise ValueError("No closing quotation")
if nextchar == self.state:
self.token_type = self.state
if not self.posix:
self.token += nextchar
self.state = ' '
break
else:
self.state = 'a'
elif (self.posix and nextchar in self.escape and self.state
in self.escapedquotes):
escapedstate = self.state
self.token_type = self.state
self.state = nextchar
else:
self.token += nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2: # pragma: no cover
print("shlex: I see EOF in escape state")
# XXX what error should be raised here?
raise ValueError("No escaped character")
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if (escapedstate in self.quotes and nextchar != self.state
and nextchar != escapedstate):
self.token += self.state
self.token += nextchar
self.token_type = self.state
self.state = escapedstate
elif self.state in ('a', 'c'):
if not nextchar:
self.token_type = self.state
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2: # pragma: no cover
print("shlex: I see whitespace in word state")
self.token_type = self.state
self.state = ' '
if self.token or (self.posix and quoted):
# push back so that preceding is set
# correctly for the next token
if self.control:
self.pbchars.append(nextchar)
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno += 1
if self.posix:
self.token_type = self.state
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.token_type = self.state
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.token_type = self.state
self.state = nextchar
elif self.state == 'c':
if nextchar in self.control:
self.token += nextchar
else:
if nextchar not in self.whitespace:
self.pbchars.append(nextchar)
else:
self.preceding = nextchar
self.token_type = self.state
self.state = ' '
break
elif (nextchar in self.wordchars or nextchar in self.quotes
or self.whitespace_split):
self.token += nextchar
else:
if self.control:
self.pbchars.append(nextchar)
else:
self.pushback.appendleft(nextchar)
if
|
e-koch/spectral-cube
|
spectral_cube/tests/test_projection.py
|
Python
|
bsd-3-clause
| 27,131
| 0.003317
|
from __future__ import print_function, absolute_import, division
import warnings
import pytest
import numpy as np
from astropy import units as u
from astropy.wcs import WCS
from astropy.io import fits
from radio_beam import Beam, Beams
from .helpers import assert_allclose
from .test_spectral_cube import cube_and_raw
from ..spectral_cube import SpectralCube
from ..masks import BooleanArrayMask
from ..lower_dimensional_structures import (Projection, Slice, OneDSpectrum,
VaryingResolutionOneDSpectrum)
from ..utils import SliceWarning, WCSCelestialError, BeamUnitsError
from . import path
# needed for regression in numpy
import sys
try:
from astropy.utils.compat import NUMPY_LT_1_22
except ImportError:
# if astropy is an old version, we'll just skip the test
# (this is only used in one place)
NUMPY_LT_1_22 = False
# set up for parametrization
LDOs = (Projection, Slice, OneDSpectrum)
LDOs_2d = (Projection, Slice,)
two_qty_2d = np.ones((2,2)) * u.Jy
twelve_qty_2d = np.ones((12,12)) * u.Jy
two_qty_1d = np.ones((2,)) * u.Jy
twelve_qty_1d = np.ones((12,)) * u.Jy
data_two = (two_qty_2d, two_qty_2d, two_qty_1d)
data_twelve = (twelve_qty_2d, twelve_qty_2d, twelve_qty_1d)
data_two_2d = (two_qty_2d, two_qty_2d,)
data_twelve_2d = (twelve_qty_2d, twelve_qty_2d,)
def load_projection(filename):
hdu = fits.open(filename)[0]
proj = Projection.from_hdu(hdu)
return proj, hdu
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_two_2d))
def test_slices_of_projections_not_projections(LDO, data):
# slices of projections that have <2 dimensions should not be projections
p = LDO(data, copy=False)
assert not isinstance(p[0,0], LDO)
assert not isinstance(p[0], LDO)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_twelve_2d))
def test_copy_false(LDO, data):
# copy the data so we can manipulate inplace without affecting other tests
image = data.copy()
p = LDO(image, copy=False)
image[3,4] = 2 * u.Jy
assert_allclose(p[3,4], 2 * u.Jy)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_write(LDO, data, tmpdir):
p = LDO(data)
p.write(tmpdir.join('test.fits').strpath)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_twelve_2d))
def test_preserve_wcs_to(LDO, data):
# regression for #256
image = data.copy()
p = LDO(image, copy=False)
image[3,4] = 2 * u.Jy
p2 = p.to(u.mJy)
assert_allclose(p[3,4], 2 * u.Jy)
assert_allclose(p[3,4], 2000 * u.mJy)
assert p2.wcs == p.wcs
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_multiplication(LDO, data):
# regression: 265
p = LDO(data, copy=False)
p2 = p * 5
assert p2.unit == u.Jy
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value == 5)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_unit_division(LDO, data):
# regression: 265
image = data
p = LDO(image, copy=False)
p2 = p / u.beam
assert p2.unit == u.Jy/u.beam
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs_2d, data_twelve_2d))
def test_isnan(LDO, data):
# Check that np.isnan strips units
image = data.copy()
image[5,6] = np.nan
p = LDO(image, copy=False)
mask = np.isnan(p)
assert mask.sum() == 1
assert not hasattr(mask, 'unit')
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_self_arith(LDO, data):
image = data
p = LDO(image, copy=False)
p2 = p + p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==2)
p2 = p - p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==0)
@pytest.mark.parametrize(('LDO', 'data'),
zip(LDOs, data_twelve))
def test_self_arith_with_beam(LDO, data):
exp_beam = Beam(1.0 * u.arcsec)
image = data
p = LDO(image, copy=False)
p = p.with_beam(exp_beam)
p2 = p + p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==2)
assert p2.beam == exp_beam
p2 = p - p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==0)
assert p2.beam == exp_beam
@pytest.mark.xfail(raises=ValueError, strict=True)
def test_VRODS_wrong_beams_shape():
'''
Check that passing Beams with a different shape than the data
is caught.
'''
exp_beams = Beams(np.arange(1, 4) * u.arcsec)
p = VaryingResolutionOneDSpectrum(twelve_qty_1d, copy=False,
beams=exp_beams)
def test_VRODS_with_beams():
exp_beams = Beams(np.arange(1, twelve_qty_1d.size + 1) * u.arcsec)
p = VaryingResolutionOneDSpectrum(twelve_qt
|
y_1d, copy=False, beams=exp_beams)
assert (p.beams == exp_beams).all()
new_beams = Beams(np.arange(2, twelve_qty_1d.size + 2) * u.arcsec)
p = p.with_beams(new_beams)
assert np.all(p.beams == new_beams)
def test_VRODS_slice_with_beams():
exp_beams = Beams(np.arange(1, twelve_qty_1d.size + 1) * u.arcsec)
p = VaryingResolutionOneDSpectrum(twelve_qty_1d, copy=False,
|
wcs=WCS(naxis=1),
beams=exp_beams)
assert np.all(p[:5].beams == exp_beams[:5])
def test_VRODS_arith_with_beams():
exp_beams = Beams(np.arange(1, twelve_qty_1d.size + 1) * u.arcsec)
p = VaryingResolutionOneDSpectrum(twelve_qty_1d, copy=False, beams=exp_beams)
p2 = p + p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==2)
assert np.all(p2.beams == exp_beams)
p2 = p - p
assert hasattr(p2, '_wcs')
assert p2.wcs == p.wcs
assert np.all(p2.value==0)
assert np.all(p2.beams == exp_beams)
def test_onedspectrum_specaxis_units():
test_wcs = WCS(naxis=1)
test_wcs.wcs.cunit = ["m/s"]
test_wcs.wcs.ctype = ["VELO-LSR"]
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs)
assert p.spectral_axis.unit == u.Unit("m/s")
def test_onedspectrum_with_spectral_unit():
test_wcs = WCS(naxis=1)
test_wcs.wcs.cunit = ["m/s"]
test_wcs.wcs.ctype = ["VELO-LSR"]
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs)
p_new = p.with_spectral_unit(u.km/u.s)
assert p_new.spectral_axis.unit == u.Unit("km/s")
np.testing.assert_equal(p_new.spectral_axis.value,
1e-3*p.spectral_axis.value)
def test_onedspectrum_input_mask_type():
test_wcs = WCS(naxis=1)
test_wcs.wcs.cunit = ["m/s"]
test_wcs.wcs.ctype = ["VELO-LSR"]
np_mask = np.ones(twelve_qty_1d.shape, dtype=bool)
np_mask[1] = False
bool_mask = BooleanArrayMask(np_mask, wcs=test_wcs,
shape=np_mask.shape)
# numpy array
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs,
mask=np_mask)
assert (p.mask.include() == bool_mask.include()).all()
# MaskBase
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs,
mask=bool_mask)
assert (p.mask.include() == bool_mask.include()).all()
# No mask
ones_mask = BooleanArrayMask(np.ones(twelve_qty_1d.shape, dtype=bool),
wcs=test_wcs, shape=np_mask.shape)
p = OneDSpectrum(twelve_qty_1d, wcs=test_wcs,
mask=None)
assert (p.mask.include() == ones_mask.include()).all()
def test_slice_tricks():
test_wcs_1 = WCS(naxis=1)
test_wcs_2 = WCS(naxis=2)
spec = OneDSpectrum(twelve_qty_1d, wcs=test_wcs_1)
im = Slice(twelve_qty_2d, wcs=test_wcs_2)
with warnings.catch_warnings(record=True) as w:
new = spec[:,None,None] * im[None,:,:]
assert new.ndim == 3
# two warnings because we're doing BOTH slices!
assert len(w) == 2
assert w[0].category == SliceWarning
with warnings.catch_warning
|
The-Fonz/adventure-track
|
backend/transcode/transcoder.py
|
Python
|
mit
| 8,880
| 0.004392
|
import os
import re
import asyncio
import tempfile
import os.path as osp
import asyncio.subprocess
from concurrent.futures import ThreadPoolExecutor
from PIL import Image
from ..utils import getLogger
from .mediaconfig import *
logger = getLogger('transcode.transcoder')
class Transcoder:
"Nice object-oriented style transcoder implementation"
@classmethod
async def create(cls, loop=None):
tc = cls()
tc.loop = loop or asyncio.get_event_loop()
# Call this method to allow subclasses to initialize specific things, e.g. ThreadPoolExecutor
tc.init()
tc.media_root = osp.abspath(os.environ['AT_MEDIA_ROOT'])
tc.proc = None
tc.m = None
return tc
def init(self):
"Can be overridden in subclass"
pass
async def consume(self, pq, resq):
"Priority queue for input, result queue for output"
self.pq = pq
logger.debug("%s consuming from queue", self)
while True:
# Store on self for retrieval on unexpected exit
self.m = await self.pq.get()
logger.debug("Consumed from queue: %s", self.m)
try:
media = self.m[2]
media_transcoded = media.copy()
conf = self.m[3]
media_transcoded['update'] = conf.get('update', True)
src = media['path']
# Paths can be absolute or relative to media root
if not osp.isabs(src):
src = osp.abspath(osp.join(os.environ['AT_MEDIA_ROOT'], media['path']))
tmpdir = tempfile.mkdtemp(prefix='transcode-')
# Construct entire filename
n, ext = osp.splitext(osp.basename(src))
# Make <tmpdir>/<originalname>-video.<newext>
dest_tmp = osp.join(tmpdir, '{}-{}.{}'.format(n, conf['name'], conf['ext']))
# Returns dict with possible keys: timestamp, duration, width, height, log
logger.debug("Starting transcode for %s with conf %s", media, conf)
stat = await self.transcode(src, dest_tmp, conf)
logger.debug("Finished transcode for %s with conf %s", media, conf)
media_transcoded.update(stat)
# Move from temp folder to e.g. <media_root>/video/<filename>
dest_perm = osp.join(self.media_root, media['type'], osp.basename(dest_tmp))
# Make dir if non-existent
os.makedirs(osp.dirname(dest_perm), exist_ok=True)
# Use replace instead of rename to overwrite target
os.replace(dest_tmp, dest_perm)
# Make path relative to media root and store in media obj
media_transcoded['path'] = osp.relpath(dest_perm, self.media_root)
media_transcoded['conf_name'] = conf['name']
# Put in result queue
await resq.put(media_transcoded)
except Exception:
logger.exception("Error in transcoding process for media %s", self.m)
# Let queue counter decrease
self.pq.task_done()
self.m = None
def stop(self):
"Stops any process, returns any unfinished business or None"
if self.proc:
self.proc.kill()
return self.m
async def transcode(self, *args, **kwargs):
raise Warning("Method not implemented")
async def run_subprocess(self, cmd_list, ignore_returncode=False):
"Run a subprocess as coroutine"
logger.info("Running command > {}".format(' '.join(cmd_list)))
# TODO: Use asyncio.wait_for to specify a timeout
proc = await asyncio.create_subprocess_exec(
*cmd_list,
# Capture stdout, stderr to avoid dirty logs
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
# Save proc to be able to end it
self.proc = proc
await proc.wait()
# Process is over now
self.proc = None
stdout, stderr = await proc.communicate()
if proc.returncode and not ignore_returncode:
raise Warning("Return code {} is nonzero, stdout={} stderr={}".format(proc.returncode, stdout, stderr))
# stdout is bytes, conver to string for json serialization
return stderr.decode('utf-8')
class VidThumbTranscoder(Transcoder):
async def transcode(self, src, dest, conf):
"Generate thumbnail for video"
cmd = ("ffmpeg -y -i {src} -vf "
"thumbnail,scale=w='min(iw,{conf[wh][0]})':h='min(ih,{conf[wh][1]})':force_original_aspect_ratio=decrease "
"-frames:v 1 -an {dest}").format(
src=src, dest=dest, conf=conf)
stdout = await self.run_subprocess(cmd.split())
width, height = None, None
return {'width': width, 'height': height, 'log': stdout}
class VidTranscoder(Transcoder):
async def transcode(self, src, dest, conf, cutfromto=None):
"""
Improvement ideas:
- Normalize audio using https://github.com/slhck/ffmpeg-normalize or, better yet,
demux audio and use *sox* for compression, normalization, then remux
- GIF summary, see https://superuser.com/questions/556029/how-do-i-convert-a-video-to-gif-using-ffmpeg-with-reasonable-quality/556031#556031 and https://superuser.com/questions/538112/meaningful-thumbnails-for-a-video-using-ffmpeg
:param input: Video file
:param cutfrom: Skip to this time [seconds]
:param cutto: Until this time [seconds]
:return:
"""
# TODO: copy audio stream if correct format, otherwise transcode
# TODO: Find src file timestamp, duration, resolution
# TODO: Only transcode if resolution smaller than original
cut = "-ss {cut[0]} -to {cut[1]}".format(cut=cutfromto) if cutfromto else ''
# See https://trac.ffmpeg.org/wiki/Scaling%20(resizing)%20with%20ffmpeg for info on keeping aspect ratio
cmd = ("ffmpeg -y -i {src} {cut} -c:v libx264 -movflags +faststart -vf "
"scale=w='min(iw,{conf[wh][0]})':h='min(ih,{conf[wh][1]})':force_original_aspect_ratio=decrease -crf 26 -c:a copy {dest}"
.format(src=src, cut=cut, conf=conf, dest=dest))
stdout = await self.run_subprocess(cmd.split())
# TODO: Find dest file width, height
return {'timestamp': None, 'duration': None, 'width': None, 'height': None, 'log': stdout}
class ImageTranscoder(Transcoder):
def init(self):
# Just one thread, make concurrent by instantiating multiple ImageTranscoder classes
self.executor = ThreadPoolExecutor(1)
|
def _transcode(self, src, dest, conf):
"""
Run this blocking function in ThreadPoolExecutor.
Not sure how and when the termination signal propagates to child threads.
"""
logger.debug("Running _transcode")
|
im = Image.open(src)
newim = im.copy()
# Modifies in-place
newim.thumbnail(conf['wh'])
newim.save(dest, format='JPEG')
# TODO: Find src file timestamp
# TODO: Find dest file width/height
return {'timestamp': None, 'width': None, 'height': None}
async def transcode(self, *args):
# Avoid blocking eventloop
logger.debug("Running transcode in ThreadPoolExecutor")
return await self.loop.run_in_executor(self.executor, self._transcode, *args)
class AudioTranscoder(Transcoder):
async def transcode(self, src, dest, conf):
"Keep same bitrate preferably"
# Ignore returncode as ffmpeg will show info but exit saying it needs output file
info = await self.run_subprocess(['ffmpeg', '-i', src], ignore_returncode=True)
bitrate = 1000
try:
res = re.search('bitrate:\s*([\d\.]+)\s*kb\/s', info)
bitrate = int(res.group(1))
except (AttributeError, ValueError) as e:
logger.warning("Could not find audio bitrate in ffmpeg info string: %s\nError: %s", info, str(e))
bitrate = min(bitrate, conf.get('max-bitrate', 128
|
Storj/storjtorrent
|
storjtorrent/__init__.py
|
Python
|
mit
| 1,271
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014 Josh Brandoff
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGE
|
MENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from storjtorrent import *
from session import *
from exception import *
from thread_management import *
|
byronlin92/django_local_library
|
catalog/models.py
|
Python
|
apache-2.0
| 4,639
| 0.006682
|
from django.db import models
from django.contrib.auth.models import User
from datetime import date
# Create your models here.
class Genre(models.Model):
"""
Model representing a b
|
ook genre (e.g. Science Fiction, Non Fiction).
"
|
""
name = models.CharField(max_length=200, help_text="Enter a book genre (e.g. Science Fiction, French Poetry etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
from django.urls import reverse #Used to generate URLs by reversing the URL patterns
class Book(models.Model):
class Meta:
permissions = (("can_edit_book", "Edit book"),)
"""
Model representing a book (but not a specific copy of a book).
"""
title = models.CharField(max_length=200)
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in the file.
summary = models.TextField(max_length=1000, help_text="Enter a brief description of the book")
isbn = models.CharField('ISBN',max_length=13, help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
genre = models.ManyToManyField(Genre, help_text="Select a genre for this book")
# ManyToManyField used because genre can contain many books. Books can cover many genres.
# Genre class has already been defined so we can specify the object above.
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True)
def __str__(self):
"""
String for representing the Model object.
"""
return self.title
def get_absolute_url(self):
"""
Returns the url to access a particular book instance.
"""
return reverse('book-detail', args=[str(self.id)])
def display_genre(self):
"""
Creates a string for the Genre. This is required to display genre in Admin.
"""
return ', '.join([ genre.name for genre in self.genre.all()[:3] ])
display_genre.short_description = 'Genre'
import uuid # Required for unique book instances
class BookInstance(models.Model):
class Meta:
permissions = (("can_mark_returned", "Set book as returned"),)
ordering = ["due_back"]
"""
Model representing a specific copy of a book (i.e. that can be borrowed from the library).
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text="Unique ID for this particular book across whole library")
book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
imprint = models.CharField(max_length=200)
due_back = models.DateField(null=True, blank=True)
borrower = models.ForeignKey(User, on_delete=models.SET_NULL, null=True, blank=True)
LOAN_STATUS = (
('m', 'Maintenance'),
('o', 'On loan'),
('a', 'Available'),
('r', 'Reserved'),
)
status = models.CharField(max_length=1, choices=LOAN_STATUS, blank=True, default='d', help_text='Book availability')
def __str__(self):
"""
String for representing the Model object
"""
return '%s (%s)' % (self.id,self.book.title)
@property
def is_overdue(self):
if self.due_back and date.today() > self.due_back:
return True
return False
class Author(models.Model):
"""
Model representing an author.
"""
class Meta:
permissions = (("can_edit_author", "Edit author"),)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('Died', null=True, blank=True)
def get_absolute_url(self):
"""
Returns the url to access a particular author instance.
"""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return '%s, %s' % (self.last_name, self.first_name)
class Language(models.Model):
"""
Model representing a Language (e.g. English, French, Japanese, etc.)
"""
name = models.CharField(max_length=200, help_text="Enter a the book's natural language (e.g. English, French, Japanese etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
|
SmotritelTerve/python_training
|
test/test_modify_contact.py
|
Python
|
apache-2.0
| 2,179
| 0.000918
|
# -*- coding: utf-8 -*-
from model.contact import Contact
from random import randrange
def test_modify_so
|
me_contact(app, db, check_ui):
|
random_value = str(app.get_random_int())
if app.contact.count() == 0:
app.contact.add(Contact(first_name="First Contact"))
# old_contacts = app.contact.get_contact_list()
old_contacts = db.get_contact_list()
index = randrange(len(old_contacts))
contact = Contact(first_name="name" + random_value,
middle_name="middleName" + random_value,
last_name="lastName" + random_value,
nick_name="nickName" + random_value,
title="title" + random_value,
company="company" + random_value,
address="address" + random_value,
home_phone="homePhone" + random_value,
mobile_phone="mobilePhone" + random_value,
work_phone="workPhone" + random_value,
fax_phone="faxPhone" + random_value,
email_1="email1@company" + random_value + ".com",
email_2="email2@company" + random_value + ".com",
email_3="email3@company" + random_value + ".com",
homepage="www.homepage" + random_value + ".com",
birthday_year=random_value,
anniversary_year=random_value,
address_2="address" + random_value,
phone_2="home" + random_value,
notes="notes" + random_value)
contact.id = old_contacts[index].id
app.contact.modify_contact_by_index(index, contact)
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
old_contacts[index] = contact
def clean(contact):
return Contact(id=contact.id, first_name=contact.first_name.strip())
new_contacts = map(clean, db.get_contact_list())
new_contacts_1 = map(clean, app.contact.get_contact_list())
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(new_contacts_1, key=Contact.id_or_max)
|
tigerking/pyvision
|
src/samples/TutFaceEyes.py
|
Python
|
bsd-3-clause
| 3,860
| 0.010363
|
# PyVision License
#
# Copyright (c) 2006-2010 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This is some sample code that shows how to use the face detector
and eye locator.
"""
import os.path
import pyvision as pv
# The ASEF eye locator has patent complications. This next line
# disables those warnings.
pv.disableCommercialUseWarnings()
from pyvision.face.CascadeDetector import CascadeDetector
from pyvision.face.FilterEyeLocator import FilterEyeLocator
if __name__ == "__main__":
ilog = pv.ImageLog()
# Load the face image file
fname = os.path.join(pv.__path__[0],'data','misc','FaceSample.jpg')
# Create the annotation image in black and white so that color
# annotations show up better.
im = pv.Image(fname,bw_annotate=True)
ilog(pv.Image(fname),"Origina
|
l")
# Create a OpenCV cascade face detector object
cd = CascadeDetector()
# Create an eye detector object
el = FilterEyeLocator()
# Call the face detector like a function to get a list of face rectangles
rec
|
ts = cd(im)
# print the list of rectangles
print "Face Detection Output:",rects
# Also call the eye detector like a function with the original image and
# the list of face detections to locate the eyes.
eyes = el(im,rects)
# print the list of eyes. Format [ [ face_rect, left_eye, right_eye], ...]
print "Eye Locator Output:",eyes
# Now you can process the detection and eye data for each face detected in the
# image. Here we annotate the image with the face detection box and
# eye coordinates and we create create a normalized face image by translating
# rotating and scaling the face using pv.AffineFromPoints
for face_rect,left_eye,right_eye in eyes:
# Annotate the original image
im.annotateRect(face_rect, color='red')
im.annotatePoint(left_eye, color='yellow')
im.annotatePoint(right_eye, color='yellow')
# Align the eye coordinates to produce a face tile. This is a typical
# step before running a face verification algorithm.
affine = pv.AffineFromPoints(left_eye,right_eye,pv.Point(32.0,64.0),pv.Point(96.0,64.0),(128,160))
tile = affine.transformImage(im)
ilog(tile,"NormalizedFace")
# Finally, display the annotate image.
ilog(im,"DetectionData")
ilog.show()
|
godber/pds3label
|
pds3label/vendor/pds3_python/ODLv21Listener.py
|
Python
|
mit
| 5,308
| 0.008478
|
# Generated from java-escape by ANTLR 4.5
from antlr4 import *
# This class defines a complete listener for a parse tree produced by ODLv21Parser.
class ODLv21Listener(ParseTreeListener):
# Enter a parse tree produced by ODLv21Parser#label.
def enterLabel(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#label.
def exitLabel(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#statement.
def enterStatement(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#statement.
def exitStatement(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#assignment_stmt.
def enterAssignment_stmt(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#assignment_stmt.
def exitAssignment_stmt(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#pointer_stmt.
def enterPointer_stmt(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#pointer_stmt.
def exitPointer_stmt(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#object_stmt.
def enterObject_stmt(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#object_stmt.
def exitObject_stmt(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#group_stmt.
def enterGroup_stmt(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#group_stmt.
def exitGroup_stmt(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#value.
def enterValue(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#value.
def exitValue(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#date_time_value.
def enterDate_time_value(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#date_time_value.
def exitDate_time_value(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#sequence_value.
def enterSequence_value(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#sequence_value.
def exitSequence_value(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#sequence_1D.
def enterSequence_1D(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#sequence_1D.
def exitSequence_1D(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#sequence_2D.
def enterSequence_2D(se
|
lf, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#sequence_2D.
def exitSequence_2D(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#set_value.
def enterSet_value(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#set_value.
def exitSet_value(self, ctx):
pass
# Enter a parse tree produced by ODLv21Pa
|
rser#ScalarInteger.
def enterScalarInteger(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#ScalarInteger.
def exitScalarInteger(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#ScalarBasedInteger.
def enterScalarBasedInteger(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#ScalarBasedInteger.
def exitScalarBasedInteger(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#ScalarFloat.
def enterScalarFloat(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#ScalarFloat.
def exitScalarFloat(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#ScalarScaledReal.
def enterScalarScaledReal(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#ScalarScaledReal.
def exitScalarScaledReal(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#ScalarIdentifier.
def enterScalarIdentifier(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#ScalarIdentifier.
def exitScalarIdentifier(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#ScalarSymbol.
def enterScalarSymbol(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#ScalarSymbol.
def exitScalarSymbol(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#ScalarString.
def enterScalarString(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#ScalarString.
def exitScalarString(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#units_expression.
def enterUnits_expression(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#units_expression.
def exitUnits_expression(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#units_factor.
def enterUnits_factor(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#units_factor.
def exitUnits_factor(self, ctx):
pass
# Enter a parse tree produced by ODLv21Parser#namespace_identifier.
def enterNamespace_identifier(self, ctx):
pass
# Exit a parse tree produced by ODLv21Parser#namespace_identifier.
def exitNamespace_identifier(self, ctx):
pass
|
GrognardsFromHell/TemplePlus
|
deploy_symbols_s3.py
|
Python
|
mit
| 1,455
| 0.002749
|
import sys
import os.path
import lzma
import time
import os
import subprocess
if "APPVEYOR_REPO_TAG" not in os.environ or os.environ["APPVEYOR_REPO_TAG"] != "true":
print("Not u
|
ploading symbols since build is not tagged")
sys.exit()
symbol_filename = sys.argv[1]
with open(symbol_filename, "rt") as fh:
first_line = next(fh).strip()
tokens = first_line.split()
expected_tokens = ['MODULE', 'windows', 'x86']
if tokens[0:3] != expected
|
_tokens:
raise RuntimeError("Expected first tokens to be " + str(expected_tokens) + ", but was: " + str(tokens[0:3]))
file_hash = tokens[3]
file_name = tokens[4]
basename = os.path.basename(symbol_filename)
target_path = "%s/%s/%s.xz" % (file_name, file_hash, basename)
# Compress symbols with LZMA to save bandwidth
print("Compressing symbol file...")
t_start = time.perf_counter()
with open(symbol_filename, "rb") as fh:
symbol_data = fh.read()
symbol_data_len = len(symbol_data)
compressed_symbols = lzma.compress(symbol_data)
compression_ratio = len(compressed_symbols) * 100 / symbol_data_len
print("Compressed symbol data (ratio %d%%) in %fs" % (compression_ratio, time.perf_counter() - t_start))
print("Uploading symbols to ", target_path)
with open("TemplePlus.sym.xz", "wb") as fh:
fh.write(compressed_symbols)
subprocess.run(["aws", "s3", "cp", "TemplePlus.sym.xz", "s3://templeplus-symbols/" + target_path], check=True, shell=True)
print("Uploaded symbols to S3.")
|
googleads/google-ads-python
|
google/ads/googleads/v9/services/services/custom_audience_service/transports/grpc.py
|
Python
|
apache-2.0
| 12,186
| 0.001067
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.resources.types import custom_audience
from google.ads.googleads.v9.services.types import custom_audience_service
from .base import CustomAudienceServiceTransport, DEFAULT_CLIENT_INFO
class CustomAudienceServiceGrpcTransport(CustomAudienceServiceTransport):
"""gRPC backend transport for CustomAudienceService.
Service to manage custom audiences.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoin
|
t
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channe
|
l_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A opti
|
SujaySKumar/bedrock
|
bedrock/firefox/helpers.py
|
Python
|
mpl-2.0
| 8,166
| 0.000245
|
from collections import OrderedDict
from django.core.cache import cache
from django.conf import settings
import jingo
import jinja2
from bedrock.firefox.models import FirefoxOSFeedLink
from bedrock.firefox.firefox_details import firefox_desktop, firefo
|
x_android
from bedrock.base.urlresolvers import reverse
from lib.l10n_utils import get_locale
def android_builds(channel, builds=None):
builds = builds or []
variations = OrderedDict([
('api-9', 'Gingerbread'),
('api-11', 'Honeycomb+ ARMv7+'),
('x86', 'x86'),
])
if c
|
hannel == 'alpha':
for type, arch_pretty in variations.iteritems():
link = firefox_android.get_download_url('alpha', type)
builds.append({'os': 'android',
'os_pretty': 'Android',
'os_arch_pretty': 'Android %s' % arch_pretty,
'arch': 'x86' if type == 'x86' else 'armv7up %s' % type,
'arch_pretty': arch_pretty,
'download_link': link})
else:
link = firefox_android.get_download_url(channel)
builds.append({'os': 'android',
'os_pretty': 'Android',
'download_link': link})
return builds
@jingo.register.function
@jinja2.contextfunction
def download_firefox(ctx, channel='release', small=False, icon=True,
platform='all', dom_id=None, locale=None, simple=False,
force_direct=False, force_full_installer=False,
force_funnelcake=False, check_old_fx=False):
""" Output a "download firefox" button.
:param ctx: context from calling template.
:param channel: name of channel: 'release', 'beta' or 'alpha'.
:param small: Display the small button if True.
:param icon: Display the Fx icon on the button if True.
:param platform: Target platform: 'desktop', 'android' or 'all'.
:param dom_id: Use this string as the id attr on the element.
:param locale: The locale of the download. Default to locale of request.
:param simple: Display button with text only if True. Will not display
icon or privacy/what's new/systems & languages links. Can be used
in conjunction with 'small'.
:param force_direct: Force the download URL to be direct.
:param force_full_installer: Force the installer download to not be
the stub installer (for aurora).
:param force_funnelcake: Force the download version for en-US Windows to be
'latest', which bouncer will translate to the funnelcake build.
:param check_old_fx: Checks to see if the user is on an old version of
Firefox and, if true, changes the button text from 'Free Download'
to 'Update your Firefox'. Must be used in conjunction with
'simple' param being true.
:return: The button html.
"""
show_desktop = platform in ['all', 'desktop']
show_android = platform in ['all', 'android']
alt_channel = '' if channel == 'release' else channel
locale = locale or get_locale(ctx['request'])
funnelcake_id = ctx.get('funnelcake_id', False)
dom_id = dom_id or 'download-button-%s-%s' % (
'desktop' if platform == 'all' else platform, channel)
l_version = firefox_desktop.latest_builds(locale, channel)
if l_version:
version, platforms = l_version
else:
locale = 'en-US'
version, platforms = firefox_desktop.latest_builds('en-US', channel)
# Gather data about the build for each platform
builds = []
if show_desktop:
for plat_os, plat_os_pretty in firefox_desktop.platform_labels.iteritems():
# Windows 64-bit builds are currently available only on the Aurora
# and Beta channel
if plat_os == 'win64' and channel not in ['alpha', 'beta']:
continue
# Fallback to en-US if this plat_os/version isn't available
# for the current locale
_locale = locale if plat_os_pretty in platforms else 'en-US'
# And generate all the info
download_link = firefox_desktop.get_download_url(
channel, version, plat_os, _locale,
force_direct=force_direct,
force_full_installer=force_full_installer,
force_funnelcake=force_funnelcake,
funnelcake_id=funnelcake_id,
)
# If download_link_direct is False the data-direct-link attr
# will not be output, and the JS won't attempt the IE popup.
if force_direct:
# no need to run get_download_url again with the same args
download_link_direct = False
else:
download_link_direct = firefox_desktop.get_download_url(
channel, version, plat_os, _locale,
force_direct=True,
force_full_installer=force_full_installer,
force_funnelcake=force_funnelcake,
funnelcake_id=funnelcake_id,
)
if download_link_direct == download_link:
download_link_direct = False
builds.append({'os': plat_os,
'os_pretty': plat_os_pretty,
'download_link': download_link,
'download_link_direct': download_link_direct})
if show_android:
builds = android_builds(channel, builds)
# Get the native name for current locale
langs = firefox_desktop.languages
locale_name = langs[locale]['native'] if locale in langs else locale
data = {
'locale_name': locale_name,
'version': version,
'product': 'firefox-android' if platform == 'android' else 'firefox',
'builds': builds,
'id': dom_id,
'small': small,
'simple': simple,
'channel': alt_channel,
'show_android': show_android,
'show_desktop': show_desktop,
'icon': icon,
'check_old_fx': check_old_fx and simple,
}
html = jingo.render_to_string(ctx['request'],
'firefox/includes/download-button.html',
data)
return jinja2.Markup(html)
@jingo.register.function
def firefox_url(platform, page, channel=None):
"""
Return a product-related URL like /firefox/all/ or /mobile/beta/notes/.
Examples
========
In Template
-----------
{{ firefox_url('desktop', 'all', 'organizations') }}
{{ firefox_url('desktop', 'sysreq', channel) }}
{{ firefox_url('android', 'notes') }}
"""
kwargs = {}
# Tweak the channel name for the naming URL pattern in urls.py
if channel == 'release':
channel = None
if channel == 'alpha':
if platform == 'desktop':
channel = 'developer'
if platform == 'android':
channel = 'aurora'
if channel == 'esr':
channel = 'organizations'
if channel:
kwargs['channel'] = channel
if page == 'notes' and platform != 'desktop':
kwargs['platform'] = platform
return reverse('firefox.%s' % page, kwargs=kwargs)
@jingo.register.function
def firefox_os_feed_links(locale, force_cache_refresh=False):
if locale in settings.FIREFOX_OS_FEED_LOCALES:
cache_key = 'firefox-os-feed-links-' + locale
if not force_cache_refresh:
links = cache.get(cache_key)
if links:
return links
links = list(
FirefoxOSFeedLink.objects.filter(locale=locale).order_by(
'-id').values_list('link', 'title')[:10])
cache.set(cache_key, links)
return links
elif '-' in locale:
return firefox_os_feed_links(locale.split('-')[0])
@jingo.register.function
def firefox_os_blog_link(locale):
try:
return settings.FXOS_PRESS_BLOG_LINKS[locale]
except KeyError:
if '-' in locale:
return firefox_os_blog_link(locale.split('-')[0])
else:
return None
|
minrk/sympy
|
sympy/functions/special/gamma_functions.py
|
Python
|
bsd-3-clause
| 10,069
| 0.002086
|
from sympy.core import Add, S, C, sympify, oo, pi
from sympy.core.function import Function, ArgumentIndexError
from zeta_functions import zeta
from sympy.functions.elementary.exponential import log
from sympy.functions.elementary.integers import floor
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.combinatorial.numbers import bernoulli
###############################################################################
############################ COMPLETE GAMMA FUNCTION ##########################
###############################################################################
class gamma(Function):
"""The gamma function returns a function which passes through the integral
values of the factorial function, i.e. though defined in the complex plane,
when n is an integer, gamma(n) = (n - 1)!
Reference:
http://en.wikipedia.org/wiki/Gamma_function
"""
nargs = 1
def fdiff(self, argindex=1):
if argindex == 1:
return gamma(self.args[0])*polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg.is_Integer:
if arg.is_positive:
return C.factorial(arg-1)
else:
return S.ComplexInfinity
elif arg.is_Rational:
if arg.q == 2:
n = abs(arg.p) // arg.q
if arg.is_positive:
k, coeff = n, S.One
else:
n = k = n + 1
if n & 1 == 0:
coeff = S.One
else:
coeff = S.NegativeOne
for i in range(3, 2*k, 2):
coeff *= i
if arg.is_positive:
return coeff*sqrt(S.Pi) / 2**n
else:
return 2**n*sqrt(S.Pi) / coeff
def _eval_expand_func(self, deep=True, **hints):
if deep:
arg = self.args[0].expand(deep, **hints)
else:
arg = self.args[0]
if arg.is_Add:
coeff, tail = arg.as_coeff_add()
if coeff and coeff.q != 1:
tail = (C.Rational(1, coeff.q),) + tail
coeff = floor(coeff)
tail = arg._new_rawargs(*tail, **dict(reeval=False))
return gamma(tail)*C.RisingFactorial(tail, coeff)
return self.func(*self.args)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_rewrite_as_tractable(self, z):
return C.exp(loggamma(z))
###############################################################################
################## LOWER and UPPER INCOMPLETE GAMMA FUNCTIONS #################
###############################################################################
class lowergamma(Function):
"""Lower incomplete gamma function"""
nargs = 2
@classmethod
def eval(cls, a, x):
if a.is_Number:
if a is S.One:
|
return S.One - C.exp(-x)
|
elif a.is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, x) - x**b * C.exp(-x)
class uppergamma(Function):
"""Upper incomplete gamma function"""
nargs = 2
def fdiff(self, argindex=2):
if argindex == 2:
a, z = self[0:2]
return -C.exp(-z)*z**(a-1)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, a, z):
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
return S.Zero
elif z is S.Zero:
return gamma(a)
if a.is_Number:
if a is S.One:
return C.exp(-z)
elif a.is_Integer:
b = a - 1
if b.is_positive:
return b*cls(b, z) + z**b * C.exp(-z)
###############################################################################
########################### GAMMA RELATED FUNCTIONS ###########################
###############################################################################
class polygamma(Function):
"""The function polygamma(n, z) returns log(gamma(z)).diff(n + 1)
Reference:
http://en.wikipedia.org/wiki/Polygamma_function
"""
nargs = 2
def fdiff(self, argindex=2):
if argindex == 2:
n, z = self.args[0:2]
return polygamma(n+1, z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_is_positive(self):
if self.args[1].is_positive and self.args[0] > 0:
return self.args[0].is_odd
def _eval_is_negative(self):
if self.args[1].is_positive and self.args[0] > 0:
return self.args[0].is_even
def _eval_is_real(self):
return self.args[0].is_real
def _eval_aseries(self, n, args0, x, logx):
if args0[1] != oo or not \
(self.args[0].is_Integer and self.args[0].is_nonnegative):
return super(polygamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[1]
N = self.args[0]
if N == 0:
# digamma function series
# Abramowitz & Stegun, p. 259, 6.3.18
r = log(z) - 1/(2*z)
o = None
if n < 2:
o = C.Order(1/z, x)
else:
m = C.ceiling((n+1)//2)
l = [bernoulli(2*k) / (2*k*z**(2*k)) for k in range(1, m)]
r -= Add(*l)
o = C.Order(1/z**(2*m), x)
return r._eval_nseries(x, n, logx) + o
else:
# proper polygamma function
# Abramowitz & Stegun, p. 260, 6.4.10
# We return terms to order higher than O(x**n) on purpose
# -- otherwise we would not be able to return any terms for
# quite a long time!
fac = gamma(N)
e0 = fac + N*fac/(2*z)
m = C.ceiling((n+1)//2)
for k in range(1, m):
fac = fac*(2*k+N-1)*(2*k+N-2) / ((2*k)*(2*k-1))
e0 += bernoulli(2*k)*fac/z**(2*k)
o = C.Order(1/z**(2*m), x)
if n == 0:
o = C.Order(1/z, x)
elif n == 1:
o = C.Order(1/z**2, x)
r = e0._eval_nseries(z, n, logx) + o
return -1 * (-1/z)**N * r
@classmethod
def eval(cls, n, z):
n, z = map(sympify, (n, z))
if n.is_integer:
if n.is_negative:
return loggamma(z)
else:
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
if n.is_Number:
if n is S.Zero:
return S.Infinity
else:
return S.Zero
elif z.is_Integer:
if z.is_nonpositive:
return S.ComplexInfinity
else:
if n is S.Zero:
return -S.EulerGamma + C.harmonic(z-1, 1)
elif n.is_odd:
return (-1)**(n+1)*C.factorial(n)*zeta(n+1, z)
def _eval_expand_func(self, deep=True, **hints):
if deep:
hints['func'] = False
n = self.args[0].expand(deep, **hints)
z = self.args[1].expand(deep, **hints)
else:
n, z = self.args[0], self.args[1].expand(deep, func=True)
if n.is_Integer and n.is_nonnegative:
if z.is_Add:
coeff = z.args[0]
if coeff.is_Integer:
e = -(n + 1)
|
cosmicAsymmetry/zulip
|
api/integrations/trac/zulip_trac.py
|
Python
|
apache-2.0
| 5,443
| 0.003308
|
# -*- coding: utf-8 -*-
# Copyright © 2012 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Zulip trac plugin -- sends zulips when tickets change.
#
# Install by copying this file and zulip_trac_config.py to the trac
# plugins/ subdirectory, customizing the constants in
# zulip_trac_config.py, and then adding "zulip_trac" to the
# components section of the conf/trac.ini file, like so:
#
# [components]
# zulip_trac = enabled
#
# You may then need to restart trac (or restart Apache) for the bot
# (or changes to the bot) to actually be loaded by trac.
from trac.core import Component, implements
from trac.ticket import ITicketChangeListener
import sys
import os.path
sys.path.insert(0, os.path.dirname(__file__))
import zulip_trac_config as config
VERSION = "0.9"
if False:
from typing import Any
if config.ZULIP_API_PATH is not None:
sys.path.append(config.ZULIP_API_PATH)
import zulip
client = zulip.Client(
email=config.ZULIP_USER,
site=config.ZULIP_SITE,
api_key=config.ZULIP_API_KEY,
client="ZulipTrac/" + VERSION)
def markdown_ticket_url(ticket, heading="ticket"):
# type: (Any, str) -> str
return "[%s #%s](%s/%s)" % (heading, ticket.id, config.TRAC_BASE_TICKET_URL, ticket.id)
def markdown_block(desc):
# type: (str) -> str
return "\n\n>" + "\n> ".join(desc.split("\n")) + "\n"
def truncate(string, length):
# type: (str, int) -> str
if len(string) <= length:
return string
return string[:length - 3] + "..."
def trac_subject(ticket):
# type: (Any) -> str
return truncate("#%s: %s" % (ticket.id, ticket.values.get("summary")), 60)
def send_update(ticket, content):
# type: (Any, str) -> None
client.send_message({
"type": "stream",
"to": config.STREAM_FOR_NOTIFICATIONS,
"content": content,
"subject": trac_subject(ticket)
})
class ZulipPlugin(Component):
implements(ITicketChangeListener)
def ticket_created(self, ticket):
# type: (Any) -> None
"""Called when a ticket is created."""
content = "%s created %s in component **%s**, priority **%s**:\n" % \
(ticket.values.get("reporter"), markdown_ticket_url(ticket),
ticket.values.get("component"), ticket.values.get("priority"))
# Include the full subject if it will be truncated
if len(ticket.values.get("summary")) > 60:
content += "**%s**\n" % (ticket.values.get("summary"),)
if ticket.values.get("description") != "":
content += "%s" % (markdown_block(ticket.values.get("description")),)
send_update(ticket, content)
def ticket_changed(self, ticket, comment, author, old_values):
# type: (Any, str, str, Dict[str, Any]) -> None
"""Called when a ticket is modified.
`old_values` is a dictionary containing the previous values of the
fields that have changed.
"""
if not (set(old_values.keys()).intersection(set(config.TRAC_NOTIFY_FIELDS)) or
(comment and "comment" in set(config.TRAC_NOTIFY_FIELDS))):
return
content = "%s updated %s" % (author, markdown_ticket_url(ticket))
if comment:
content += ' with comment: %s\n\n' % (markdown_block(comment),)
else:
content += ":\n\n"
field_changes = []
for key in old_values.keys():
if key == "description":
content += '- Changed %s from %s\n\nto %s
|
' % (key, markdown_block(old_values.get(key)),
markdown_block(ticket.values.get(key)))
elif old_values.get(key) == "":
field_changes.append('%s: => **%s**' % (key, ticket.values.get(key)))
elif ticket.values.get(key) == "":
field_changes.append('%s: **%s** => ""' % (key, old_values.get(key)))
else:
field_changes.append('%s: **%s*
|
* => **%s**' % (key, old_values.get(key),
ticket.values.get(key)))
content += ", ".join(field_changes)
send_update(ticket, content)
def ticket_deleted(self, ticket):
# type: (Any) -> None
"""Called when a ticket is deleted."""
content = "%s was deleted." % markdown_ticket_url(ticket, heading="Ticket")
send_update(ticket, content)
|
minghu6/csdn
|
csdn/csdn_offline/csdn_offline_common.py
|
Python
|
bsd-3-clause
| 313
| 0.00639
|
# -*- coding:utf-8 -*-
#!/usr/bin/env python3
"""
"""
from collections import
|
namedtuple
from minghu6.internet.proxy_ip import proxy_ip
from minghu6.text.seq_enh import filter_invalid_char
URL_LIST_FILE_PATH = 'URList-{username:s}.txt'
UrlNameTuple = namedtuple('UrlNameTuple', ['url',
|
'title'])
|
chdecultot/erpnext
|
erpnext/accounts/report/profitability_analysis/profitability_analysis.py
|
Python
|
gpl-3.0
| 5,688
| 0.031997
|
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, getdate, formatdate, cstr
from erpnext.accounts.report.financial_statements import filter_accounts, filter_out_zero_value_rows
from erpnext.accounts.report.trial_balance.trial_balance import validate_filters
value_fields = ("income", "expense", "gross_profit_loss")
def execute(filters=None):
if not filters.get('based_on'): filters["based_on"] = 'Cost Center'
based_on = filters.based_on.replace(' ', '_').lower()
validate_filters(filters)
accounts = get_accounts_data(based_on, filters.get("company"))
data = get_data(accounts, filters, based_on)
columns = get_columns(filters)
return columns, data
def get_accounts_data(based_on, company):
if based_on == 'cost_center':
return frappe.db.sql("""select name, parent_cost_center as parent_account, cost_center_name as account_name, lft, rgt
from `tabCost Center` where company=%s order by name""", company, as_dict=True)
else:
return frappe.get_all('Project', fields = ["name"], filters = {'company': company}, order_by = 'name')
def get_data(accounts, filters, based_on):
if not accounts:
return []
accounts, accounts_by_name, parent_children_map = filter_accounts(accounts)
gl_entries_by_account = {}
set_gl_entries_by_account(filters.get("company"), filters.get("from_date"),
filters.get("to_date"), based_on, gl_entries_by_account, ignore_closing_entries=not flt(filters.get("with_period_closing_entry")))
total_row = calculate_values(accounts, gl_entries_by_account, filters)
accumulate_values_into_parents(accounts, accounts_by_name)
data = prepare_data(accounts, filters, total_row, parent_children_map, based_on)
data = filter_out_zero_value_rows(data, parent_children_map,
show_zero_values=filters.get("show_zero_values"))
return data
def calculate_values(accounts, gl_entries_by_account, filters):
init = {
"income": 0.0,
"expense": 0.0,
"gross_profit_loss": 0.0
}
total_row = {
"cost_center": None,
"account_name": "'" + _("Total") + "'",
"warn_if_negative": True,
"income": 0.0,
"expense": 0.0,
"gross_profit_loss": 0.0,
"account": "'" + _("Total") + "'",
"parent_account": None,
"indent": 0,
"has_value": True
}
for d in accounts:
d.update(init.copy())
# add opening
for entry in gl_entries_by_account.get(d.name, []):
if cstr(entry.is_opening) != "Yes":
if entry.type == 'Income':
d["income"] += flt(entry.credit) - flt(entry.debit)
if entry.type == 'Expense':
d["expense"] += flt(entry.debit) - flt(entry.credit)
d["gross_profit_loss"] = d.get("income") - d.get("expense")
total_row["income"] += d["income"]
total_row["expense"] += d["expense"]
total_row["gross_profit_loss"] = total_row.get("income") - total_row.get("expense")
return total_row
def accumulate_values_into_parents(accounts, accounts_by_name):
for d in reversed(accounts):
if d.parent_account:
for key in value_fields:
accounts_by_name[d.parent_account][key] += d[key]
def prepare_data(accounts, filters, total_row, parent_children_map, based_on):
data = []
company_currency = frappe.get_cached_value('Company', filters.get("company"), "default_currency")
for d in accounts:
has_value = False
row = {
"account_name": d.account_name or d.name,
"account": d.name,
"parent_account": d.parent_account,
"indent": d.indent,
"fiscal_year": filters.get("fiscal_year"),
"currency": company_currency,
"based_on": based_on
}
for key in value_fields:
row[key] = flt(d.get(key, 0.0), 3)
if abs(row[key]) >= 0.005:
# ignore zero values
has_value = True
row["has_value"] = has_value
data.append(row)
data.extend([{},total_row])
return data
def get_columns(filters):
return [
{
"fieldname": "account",
"label": _(filters.get("based_on")),
"fieldtype": "Link",
"options": filters.get("based_on"),
"width": 300
},
{
"fieldname": "income",
"label": _("Income"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "expense",
"label": _("Expense"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "gross_profit_loss",
"label": _("Gross Profit / Loss"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"hidden": 1
}
]
def set_gl_entries_by_account(company, from_date, to_date, based_on, gl_entries_by_account,
ignore_closing_entries=False):
"""Returns a dict like { "account": [gl entries], ... }"""
additional_conditions = []
if ignore_closing_entries:
additional_conditions.append("and ifnull(voucher_type, '')!='Period Closing Voucher'")
if from_date:
additional_conditions.append("and posting_date >= %(from_date)s")
gl_entries = frappe.db.sql("""select posting_date, {based_on} as based_on, debit, cred
|
it,
is_opening, (select root_type from `tabAccount` where name = account) as type
from `tabGL Entry` where company=%(company)s
{additional_conditions}
and posting_date <= %(to_date)s
and {based_on} is not null
order by {based_on}, posting_date""".format(additi
|
onal_conditions="\n".join(additional_conditions), based_on= based_on),
{
"company": company,
"from_date": from_date,
"to_date": to_date
},
as_dict=True)
for entry in gl_entries:
gl_entries_by_account.setdefault(entry.based_on, []).append(entry)
return gl_entries_by_account
|
lucasrangit/twitter-winner
|
twitter-winner/oauthlib/oauth1/rfc5849/parameters.py
|
Python
|
mit
| 4,909
| 0.000407
|
# -*- coding: utf-8 -*-
"""
oauthlib.parameters
~~~~~~~~~~~~~~~~~~~
This module contains methods related to `section 3.5`_ of the OAuth 1.0a spec.
.. _`section 3.5`: http://tools.ietf.org/html/rfc5849#section-3.5
"""
from __future__ import absolute_import, unicode_literals
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
from . import utils
from oauthlib.common import extract_params, urlencode
# TODO: do we need filter_params now that oauth_params are handled by Request?
# We can easily pass in just oauth protocol params.
@utils.filter_params
def prepare_headers(oauth_params, headers=None, realm=None):
"""**Prepare the Authorization header.**
Per `section 3.5.1`_ of the spec.
Protocol parameters can be transmitted using the HTTP "Authorization"
header field as defined by `RFC2617`_ with the auth-scheme name set to
"OAuth" (case insensitive).
For example::
Authorization: OAuth realm="Example",
oauth_consumer_key="0685bd9184jfhq22",
oauth_token="ad180jjd733klru7",
oauth_signature_method="HMAC-SHA1",
oauth_signature="wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D",
oauth_timestamp="137131200",
oauth_nonce="4572616e48616d6d65724c61686176",
oauth_version="1.0"
.. _`section 3.5.1`: http://tools.ietf.org/html/rfc5849#section-3.5.1
.. _`RFC2617`: http://tools.ietf.org/html/rfc2617
"""
headers = headers or {}
# Protocol parameters SHALL be included in the "Authorization" header
# field as follows:
authorization_header_parameters_parts = []
for oauth_parameter_name, value in oauth_params:
# 1. Parameter names and values are encoded per Parameter Encoding
# (`Section 3.6`_)
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
escaped_name = utils.escape(oauth_parameter_name)
escaped_value = utils.escape(value)
# 2. Each parameter's name is immediately followed by an "=" character
# (ASCII code 61), a """ character (ASCII code 34), the parameter
# value (MAY be empty), and another """ character (ASCII code 34).
part = '{0}="{1}"'.format(escaped_name, escaped_value)
authorization_header_parameters_parts.append(part)
# 3. Parameters are separated by a "," character (ASCII code 44) and
# OPTIONAL linear whitespace per `RFC2617`_.
#
# .. _`RFC2617`: http://tools.ietf.org/html/rfc2617
authorization_header_parameters = ', '.join(
authorization_header_parameters_parts)
# 4. The OPTIONAL "realm" parameter MAY be added and interpreted per
# `RFC2617 section 1.2`_.
#
# .. _`RFC2617 section 1.2`: http://tools.ietf.org/html/rfc2617#section-1.2
if realm:
# NOTE: realm should *not* be escaped
authorization_header_parameters = ('realm="%s", ' % realm +
authorization_header_parameters)
# the auth-scheme name set to "OAuth" (case insensitive).
authorization_header = 'OAuth %s' % authorization_header_parameters
# contribute the Authorization header to the given headers
full_headers = {}
full_headers.update(headers)
full_headers['Authorization'] = authorization_header
return full_headers
def _append_params(oauth_params, params):
"""Append OAuth params to an existing set of parameters.
Both params and oauth_params is must be lists of 2-tuples.
Per `section 3.5.2`_ and `3.5.3`_ of the spec.
.. _`section 3.5.2`: http://tools.ietf.org/html/rfc5849#section-3.5.2
.. _`3.5.3`: http://tools.ietf.org/html/rfc5849#section-3.5.3
"""
merged = list(params)
merged.extend(oauth_params)
# The request URI / entity
|
-body MAY include other request-specific
# parameters, in which case, the protocol parameters SHOU
|
LD be appended
# following the request-specific parameters, properly separated by an "&"
# character (ASCII code 38)
merged.sort(key=lambda i: i[0].startswith('oauth_'))
return merged
def prepare_form_encoded_body(oauth_params, body):
"""Prepare the Form-Encoded Body.
Per `section 3.5.2`_ of the spec.
.. _`section 3.5.2`: http://tools.ietf.org/html/rfc5849#section-3.5.2
"""
# append OAuth params to the existing body
return _append_params(oauth_params, body)
def prepare_request_uri_query(oauth_params, uri):
"""Prepare the Request URI Query.
Per `section 3.5.3`_ of the spec.
.. _`section 3.5.3`: http://tools.ietf.org/html/rfc5849#section-3.5.3
"""
# append OAuth params to the existing set of query components
sch, net, path, par, query, fra = urlparse(uri)
query = urlencode(_append_params(oauth_params, extract_params(query) or []))
return urlunparse((sch, net, path, par, query, fra))
|
portableant/open-context-py
|
opencontext_py/apps/ocitems/projects/metadata.py
|
Python
|
gpl-3.0
| 15,649
| 0.001342
|
import json
import numpy as np
from numpy import vstack, array
from scipy.cluster.vq import kmeans,vq
from django.db import models
from django.db.models import Avg, Max, Min
from opencontext_py.libs.general import LastUpdatedOrderedDict
from opencontext_py.apps.ocitems.geospace.models import Geospace
from opencontext_py.apps.ocitems.events.models import Event
from opencontext_py.apps.ocitems.assertions.containment import Containment
from opencontext_py.apps.ocitems.projects.models import Project
from opencontext_py.libs.chronotiles import ChronoTile
from opencontext_py.libs.globalmaptiles import GlobalMercator
from opencontext_py.apps.entities.entity.models import Entity
class ProjectRels():
"""
Checks on project relationships with subprojects
"""
def __init__(self):
self.sub_projects = False
self.parent_projects = []
self.child_entities = LastUpdatedOrderedDict()
def get_sub_projects(self, uuid):
"""
Gets (child) sub-projects from the current project uuid
"""
sub_projs = Project.objects.filter(project_uuid=uuid).exclude(uuid=uuid)
if(len(sub_projs) > 0):
self.sub_projects = sub_projs
else:
self.sub_projects = False
return self.sub_projects
def get_jsonldish_parents(self, uuid, add_original=True):
"""
Gets parent projects for a project.
Returns a list of dictionary objects similar to JSON-LD expectations
This is useful for faceted search
If add_original is true, add the original UUID for the entity
that's the childmost item, at the bottom of the hierarchy
"""
output = False
raw_parents = self.get_parents(uuid)
if(add_original):
# add the original identifer to the list of parents, at lowest rank
raw_parents.insert(0, uuid)
if len(raw_parents) > 0:
# reverse the order of the list, to make top most concept
# first
output = []
parents = raw_parents[::-1]
for par_id in parents:
ent = Entity()
foun
|
d = ent.dereference(par_id)
if(found):
p_item = LastUpdatedOrderedDict()
p_item['id'] = ent.uri
|
p_item['slug'] = ent.slug
p_item['label'] = ent.label
if(ent.data_type is not False):
p_item['type'] = ent.data_type
else:
p_item['type'] = '@id'
output.append(p_item)
return output
def get_parents(self, uuid):
""" gets the project parents , recursively"""
par_proj = Project.objects.filter(uuid=uuid).exclude(project_uuid=uuid)[:1]
if len(par_proj) > 0:
self.parent_projects.append(par_proj[0].project_uuid)
self.get_parents(par_proj[0].project_uuid) # recursively look for the parent of the parent
return self.parent_projects
class ProjectMeta():
"""
Generates geospatial and chronological summaries for project metadata
"""
MAX_CLUSTERS = 15
MIN_CLUSTER_SIZE = .05 # of the diagonal length between min(lat/lon) and max(lat/lon)
LENGTH_CENTOID_FACTOR = .75 # for comparing cluster diagonal length with centoid distances
def __init__(self):
self.project_uuid = False
self.geo_objs = False # geospace objects of proj. metadata
self.max_geo_range = False # max distance (Euclidean coordinates)
# of rectangular region with all project points
self.event_ents = False
self.geo_range = False # dict object of min, max longitude, latitudes
def make_geo_meta(self, project_uuid):
output = False
self.project_uuid = project_uuid
pr = ProjectRels()
sub_projs = pr.get_sub_projects(project_uuid)
if sub_projs is False:
uuids = project_uuid
else:
uuids = []
for sub_proj in sub_projs:
uuids.append(sub_proj.uuid)
uuids.append(project_uuid)
self.get_geo_range(uuids)
if self.geo_range is not False:
min_lon_lat = [self.geo_range['longitude__min'],
self.geo_range['latitude__min']]
max_lon_lat = [self.geo_range['longitude__max'],
self.geo_range['latitude__max']]
min_point = np.fromiter(min_lon_lat, np.dtype('float'))
max_point = np.fromiter(max_lon_lat, np.dtype('float'))
self.max_geo_range = self.get_point_distance(min_point[0],
min_point[1],
max_point[0],
max_point[1])
print(str(self.max_geo_range))
if self.max_geo_range == 0:
# only 1 geopoint known for the project
lon_lat = [self.geo_range['longitude__min'],
self.geo_range['latitude__max']]
clusts = {'centroids': [lon_lat],
'boxes': []}
else:
# need to cluster geo data
clusts = self.cluster_geo(uuids)
self.make_geo_objs(clusts)
output = True
return output
def make_geo_objs(self, clusts):
geo_objs = []
if len(clusts['boxes']) == 0:
# no bounding box polygons, just a simple point to add
print(str(clusts))
geo_obj = self.make_geo_obj(1,
clusts['centroids'][0][0],
clusts['centroids'][0][1]
)
geo_objs.append(geo_obj)
else:
# has 1 or more bounding box polygons
i = 0
for box in clusts['boxes']:
act_cent = clusts['centroids'][i]
i += 1
geo_obj = self.make_geo_obj(i,
act_cent[0],
act_cent[1],
box
)
if(box[0][0][0] != box[0][2][0] and box[0][0][1] != box[0][2][1]):
# only add a box if the cluster has more than 1 item
geo_objs.append(geo_obj)
self.geo_objs = geo_objs
return geo_objs
def make_geo_obj(self, feature_id, lon, lat, coords=False):
geo_obj = Geospace()
geo_obj.uuid = self.project_uuid
geo_obj.project_uuid = self.project_uuid
geo_obj.source_id = 'Project metadata summary'
geo_obj.item_type = 'projects'
geo_obj.feature_id = feature_id
geo_obj.meta_type = 'oc-gen:geo-coverage'
if coords is False:
geo_obj.ftype = 'Point'
geo_obj.coordinates = ''
else:
geo_obj.ftype = 'Polygon'
geo_obj.coordinates = json.dumps(coords, ensure_ascii=False)
geo_obj.latitude = lat
geo_obj.longitude = lon
geo_obj.specificity = 0
geo_obj.note = 'Project geographic coverage \
summarized from geospatial data \
describing subjects published \
with this project.'
return geo_obj
def cluster_geo(self, uuids):
""" Puts geo points into clusters """
dist_geo = self.get_distinct_geo(uuids)
lon_lats = []
for geo in dist_geo:
# using geojson order of lon / lat
lon_lat = [geo['longitude'], geo['latitude']]
# makes it a numpy float
dpoint = np.fromiter(lon_lat, np.dtype('float'))
lon_lats.append(dpoint)
# create a numpy array object from my list of float coordinates
data = array(lon_lats)
resonable_clusters = False
number_clusters = self.MAX_CLUSTERS
while resonable_
|
yuanming-hu/taichi
|
python/taichi/__main__.py
|
Python
|
mit
| 32
| 0
|
from
|
._main
|
import main
main()
|
luosch/leetcode
|
python/Add Binary.py
|
Python
|
mit
| 964
| 0.004149
|
class Solution(object):
def addBinary(self, a, b):
len_a = len(a)
len_b = len(b)
length = max(len_a, len_b)
c = [0] * (length + 1)
if len_a < len_b:
a, b = b, a
len_a, len_b = len_b, len_a
for i in range(0, length):
if len_a - 1 - i >= 0 and len_b - 1 - i >= 0:
num_a = int(a[len_a - 1 - i])
num_b = int(b[len_b - 1 - i])
c[i] += (num_a + nu
|
m_b)
c[i + 1] += c[i] / 2
c[i] %= 2
elif len_a - 1 - i >= 0 and len_b - 1 - i < 0
|
:
num_a = int(a[len_a - 1 - i])
c[i] += num_a
c[i + 1] += c[i] / 2
c[i] %= 2
else:
break
while len(c) > 1 and c[-1] == 0:
c.pop()
answer =
while c:
answer += str(c.pop())
return answer
|
joakim-hove/ert
|
ert_gui/ert_splash.py
|
Python
|
gpl-3.0
| 2,664
| 0
|
import sys
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QSplashScreen, QApplication
from qtpy.QtGui import QColor, QPen, QFont
from ert_gui.ertwidgets import resourceImage
class ErtSplash(QSplashScreen):
def __init__(self, version_string="Version string"):
QSplashScreen.__init__(self)
self.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.SplashScreen)
splash_width = 720
splash_height = 400
desktop = QApplication.desktop()
screen = desktop.screenGeometry(desktop.primaryScreen()).size()
screen_width, screen_height = screen.width(), screen.height()
x = screen_width // 2 - splash_width // 2
y = screen_height // 2 - splash_height // 2
self.setGeometry(x, y, splash_width, splash_height)
self.ert = "ERT"
self.ert_title = "Ensemble based Reservoir Tool"
self.version = version_string
self.timestamp = "Timestamp string"
def drawContents(self, painter):
"""@type painter: QPainter"""
w = self.width()
h = self.height()
margin = 10
background = QColor(210, 211, 215)
text_color = QColor(0, 0, 0)
foreground = QColor(255, 255, 255)
painter.setBrush(background)
painter.fillRect(0, 0, w, h, background)
pen = QPen()
pen.setWidth(2)
pen.setColor(foreground)
painter.setPen(pen)
painter.drawRect(0, 0, w - 1, h - 1)
text_x = 2 * margin
top_offset = margin
text_area_width = w - 2 * margin
painter.setPen(text_color)
text_size = 150
font = QFont("Serif")
font.setStyleHint(QFont.Serif)
font.setPixelSize(text_size)
painter.setFont(font)
painter.drawText(
text_x,
margin + top_offset,
text_area_width,
text_size,
int(Qt.AlignHCenter | Qt.AlignCenter),
self.ert,
)
top_offset += text_size + 2 * margin
text_size = 25
font.setPixelSize(text_size)
painter.setFont(font)
painter.drawText(
text_x,
top_offset,
text_area_width,
text_size,
int(Qt.AlignHCenter | Qt.AlignCenter),
self.ert_title,
)
|
top_offset += text_size + 4 * margin
text_size = 20
font.
|
setPixelSize(text_size)
painter.setFont(font)
painter.drawText(
text_x,
top_offset,
text_area_width,
text_size,
int(Qt.AlignHCenter | Qt.AlignCenter),
self.version,
)
|
cbettemb/askomics
|
askomics/libaskomics/TripleStoreExplorer.py
|
Python
|
agpl-3.0
| 7,283
| 0.009886
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
from askomics.libaskomics.ParamManager import ParamManager
from askomics.libaskomics.rdfdb.SparqlQueryBuilder import SparqlQueryBuilder
from askomics.libaskomics.rdfdb.QueryLauncher import QueryLauncher
class TripleStoreExplorer(ParamManager):
"""
Use the different Sparql template queries in order to:
- get special settings:
* relation between two classes (nodes) specified by another class (hidden node).
* virtual relation adding special Where clauses specified in the database domain.
- get the suggestion list for classes listed as Categories and displayed
as node attributes by AskOmics.
- get the startpoints to begin a query building.
- get the neighbor nodes and the attributes of a node.
"""
def __init__(self, settings, session, dico={}):
ParamManager.__init__(self, settings, session)
self.log = logging.getLogger(__name__)
def get_start_points(self):
"""
Get the possible starting points for your graph.
:return: List of starting points
:rtype: Node list
"""
self.log.debug(" =========== TripleStoreExplorer:get_start_points ===========")
nodes = []
sqb = Sparq
|
lQueryBuilder(self.settings, self.session)
ql = QueryLauncher(self.settings, self.session)
sparql_template = self.get_template_sparql(self.ASKOMICS_initial_query)
query = sqb.load_from_file(sparql_template, {}).query
results = ql.process_query(query)
for result in results:
uri = result["nodeUri"]
label = result["nodeLabel"]
|
nodes.append({ 'uri': uri, 'label': label })
return nodes
def getUserAbstraction(self):
"""
Get the user abstraction (relation and entity as subject and object)
:return:
:rtype:
"""
data = {}
listEntities = {}
self.log.debug(" =========== TripleStoreExplorer:getUserAbstraction ===========")
nodes_startpoint = self.get_start_points()
# add start node at first
for node in nodes_startpoint:
listEntities[node['uri']]=0
sqb = SparqlQueryBuilder(self.settings, self.session)
ql = QueryLauncher(self.settings, self.session)
sparql_template = self.get_template_sparql(self.ASKOMICS_abstractionRelationUser)
query = sqb.load_from_file(sparql_template, { 'OwlProperty' : 'owl:ObjectProperty'}).query
results = ql.process_query(query)
data['relations'] = results
for elt in results:
if not elt['object'] in listEntities:
listEntities[elt['object']]=0
if not elt['subject'] in listEntities:
listEntities[elt['subject']]=0
#sparql_template = self.get_template_sparql(self.ASKOMICS_abstractionRelationUser)
#query = sqb.load_from_file(sparql_template, { 'OwlProperty' : 'owl:SymmetricProperty'}).query
#results = ql.process_query(query)
#data['relationsSym'] = results
#for elt in results:
# if not elt['object'] in listEntities:
# listEntities[elt['object']]=0
# if not elt['subject'] in listEntities:
# listEntities[elt['subject']]=0
filterEntities = ' '.join(["<"+s+">" for s in listEntities.keys()])
sparql_template = self.get_template_sparql(self.ASKOMICS_abstractionEntityUser)
query = sqb.load_from_file(sparql_template, {"entities" : filterEntities }).query
results = ql.process_query(query)
data['entities'] = results
sparql_template = self.get_template_sparql(self.ASKOMICS_abstractionAttributesEntityUser)
query = sqb.load_from_file(sparql_template, {"entities" : filterEntities }).query
results = ql.process_query(query)
data['attributes'] = results
sparql_template = self.get_template_sparql(self.ASKOMICS_abstractionCategoriesEntityUser)
query = sqb.load_from_file(sparql_template, {"entities" : filterEntities }).query
results = ql.process_query(query)
data['categories'] = results
sparql_template = self.get_template_sparql(self.ASKOMICS_abstractionPositionableEntityUser)
query = sqb.load_from_file(sparql_template, {}).query
results = ql.process_query(query)
data['positionable'] = results
return data
# build SPARQL Block following this grammar :
# B ==> [ A , KEYWORKD ] . KEYWORKD is a string prefix for BLOCK (ex: OPTIONAL, SERVICE)
# A ==> [ ((B|F),)+ ] . a list of Block or constraints leafs
# F ==> [ CONSTRAINT1, CONSTRAINT2,.... ] an array contains only constraints
def buildRecursiveBlock(self,tabul,constraints):
if len(constraints) == 2 and isinstance(constraints[0], list) and isinstance(constraints[1], str):
return tabul+constraints[1] + "{\n"+ self.buildRecursiveBlock(tabul+'\t',constraints[0])+tabul+"}\n"
else:
req = "";
for elt in constraints:
if isinstance(elt, str):
req+=tabul+elt+".\n"
elif len(elt) == 2 and isinstance(elt[0], list) and isinstance(elt[1], str):
if elt[1]!="":
req+= tabul+elt[1] + " {\n"+ self.buildRecursiveBlock(tabul+'\t',elt[0])+tabul+"}\n"
else:
req+= self.buildRecursiveBlock(tabul,elt[0])
else:
raise ValueError("buildRecursiveBlock:: constraint malformed :"+str(elt))
return req
return ""
def build_sparql_query_from_json(self,variates,constraintesRelations,limit,sendRequestToTPS):
self.log.debug("variates")
self.log.debug(variates)
self.log.debug("constraintesRelations")
self.log.debug(constraintesRelations)
sqb = SparqlQueryBuilder(self.settings, self.session)
ql = QueryLauncher(self.settings, self.session)
res = ql.execute_query(sqb.get_list_named_graphs().query)
namedGraphs = []
#for indexResult in range(len(res['results']['bindings'])):
# namedGraphs.append(res['results']['bindings'][indexResult]['g']['value'])
req = ""
req += "SELECT DISTINCT "+' '.join(variates)+"\n"
#TODO OFI: External Service do not work and, anyway, graphes have to be selectionned by the user in the UI
#
#for graph in namedGraphs:
# req += "FROM "+ "<"+graph+ ">"+"\n"
req += "WHERE \n"
req += self.buildRecursiveBlock('',constraintesRelations)
if limit != None and limit >0 :
req +=" LIMIT "+str(limit)
sqb = SparqlQueryBuilder(self.settings, self.session)
prefixes = sqb.header_sparql_config(req)
query = prefixes+req
results = {}
if sendRequestToTPS:
ql = QueryLauncher(self.settings, self.session)
results = ql.process_query(query)
else:
# add comment inside query to inform user
query = "# endpoint = "+self.get_param("askomics.endpoint") + "\n" + query
return results,query
|
pdawson1983/Python-F5-AFM-App
|
F5AFMApp.py
|
Python
|
mit
| 2,130
| 0.022535
|
import os
from flask import Flask, url_for, send_from_directory, flash, get_flashed_messages, session, request, render_template, redirect
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from configure_database import Base, F5Device, AFMStat, StatValue
engine = create_engine('sqlite:///F5AFM_App.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
#spawn Flask application
app = Flask(__name__)
app.secret_key = '13641ijkqrewf9dflkq359faan230fanoacv92r3noj2398cncq92njfqwfughq9f0823nbr9fjawh90q23rlkijqhwef98qroh'
#Added Favicon Support
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),'favicon.ico', mimetype='image/vnd.microsoft.icon')
#Point default Traffic to home template
@app.route('/')
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/device/')
@app.route('/devices/')
def deviceList():
devices = session.query(F5Device).all()
return render_template('devicelist.html', devices=devices)
@app.route('/device/new', methods=['GET','POST'])
d
|
ef newDevice():
if request.method == 'POST':
post = request.get_json()
device = F5Device(hostName = post.get('hostName'), ipAddress = post.get('ipAddress'), details = post.get('details'), apiUserName = post.get('a
|
piUserName'), apiPassword = post.get('apiPassword'))
session.add(device)
session.commit()
flash("Added New Device - %s" %device.hostName)
return redirect(url_for('deviceList'))
else:
return render_template('newdevice.html')
@app.route('/device/<int:device_id>')
def device(device_id):
device = session.query(F5Device).filter_by(id = device_id).one()
return render_template('device.html', device=device)
@app.route('/device/<int:device_id>/stats/<int:stats_id>')
def statPage(device_id, stats_id):
text = ''
text += "%s, %s" %(device_id, stats_id)
return text
@app.route('/device/<int:device_id>/stats/new')
def newStat(device_id):
return render_template('newstat.html')
if __name__=='__main__':
app.run(host='0.0.0.0', port=80, debug=True)
|
xmikos/hangupsbot
|
hangupsbot/handlers/forwarding.py
|
Python
|
gpl-3.0
| 1,493
| 0.00134
|
import hangups
from hangupsbot.utils import text_to_segments
from hangupsbot.handlers import handler
@handler.register(priority=7, event=hangups.ChatMessageEvent)
def handle_forward(bot, event):
"""Handle message forwarding"""
# Test if message is not empty
if not event.text:
return
# Test if message forwarding is enabled
if not bot.get_config_suboption(event.conv_id, 'forwarding_enabled'):
return
# Test if there are actually a
|
ny forwarding destinations
forward_to_list = bot.get_config_suboption(event.conv_id, 'forward_to')
if not forward_to_list:
return
# Prepare attachments
image_id_list = yield from bot.upload_images(event.conv_event.attachments)
# Forward message to all destinations
for dst in forward_to_list:
try:
|
conv = bot._conv_list.get(dst)
except KeyError:
continue
# Prepend forwarded message with name of sender
link = 'https://plus.google.com/u/0/{}/about'.format(event.user_id.chat_id)
segments = text_to_segments('**[{}]({}):** '.format(event.user.full_name, link))
# Copy original message segments
segments.extend(event.conv_event.segments)
# Send text message first (without attachments)
yield from conv.send_message(segments)
# If there are attachments, send them separately
for image_id in image_id_list:
yield from conv.send_message([], image_id=image_id)
|
root-mirror/root
|
interpreter/llvm/src/tools/clang/docs/conf.py
|
Python
|
lgpl-2.1
| 9,185
| 0.006641
|
# -*- coding: utf-8 -*-
#
# Clang documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 9 20:01:55 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import absolute_import, division, print_function
import sys, os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clang'
copyright = u'2007-%d, The Clang Team' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short version.
version = '9'
# The full version, including alpha/beta/rc tags.
release = '9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Clangdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, auth
|
or, documentclass [howto/manual]).
latex_documents = [
('index', 'Clang.tex', u'Clang Documentation',
u'The Clang Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual"
|
documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = []
# Automatically derive the list of man pages from the contents of the command
# guide subdirectory. This was copied from llvm/docs/conf.py.
basedir = os.path.dirname(__file__)
man_page_authors = u'Maintained by the Clang / LLVM Team (<http://clang.llvm.org>)'
command_guide_subpath = 'CommandGuide'
command_guide_path = os.path.join(basedir, command_guide_subpath)
for name in os.listdir(command_guide_path):
# Ignore non-ReST files and the index page.
if not name.endswith('.rst') or name in ('index.rst',):
continue
# Otherwise, automatically extract the description.
file_subpath = os.path.join(command_guide_subpath, name)
with open(os.path.join(command_guide_path, name)) as f:
title = f.readline().rstrip('\n')
header = f.readline().rstrip('\n')
if len(header) != len(title):
print((
"error: invalid header in %r (does not match title)" % (
file_subpath,)), file=sys.stderr)
if ' - ' not in title:
print((
("error: invalid title in %r "
"(expected '<name> - <description>
|
MaxRobinson/CS449
|
project7/extra.py
|
Python
|
apache-2.0
| 494,316
| 0.000004
|
""" Created by Max 12/10/2017 """
from ValueIteration import ValueIteration
from Game import Game
x = {(0, 32, -5, -5): None,
(0, 32, -5, -4): None,
(0, 32, -5, -3): None,
(0, 32, -5, -2): None,
(0, 32, -5, -1): None,
(0, 32, -5, 0): None,
(0, 32, -5, 1): None,
(0, 32, -5, 2): None,
(0, 32, -5, 3): None,
(0, 32, -5, 4): None,
(0, 32, -5, 5): None,
(0, 32, -4, -5): None,
(0, 32, -4, -4): None,
(0, 32, -4, -3): None,
(0, 32, -4, -2): None,
(0, 32, -4, -1): None,
(0, 32, -4, 0): None,
(0, 32, -4, 1): None,
(0, 32, -4, 2): None,
(0, 32, -4, 3): None,
(0, 32, -4, 4): None,
(0, 32, -4, 5): None,
(0, 32, -3, -5): None,
(0, 32, -3, -4): None,
(0, 32, -3, -3): None,
(0, 32, -3, -2): None,
(0, 32, -3, -1): None,
(0, 32, -3, 0): None,
(0, 32, -3, 1): None,
(0, 32, -3, 2): None,
(0, 32, -3, 3): None,
(0, 32, -3, 4): None,
(0, 32, -3, 5): None,
(0, 32, -2, -5): None,
(0, 32, -2, -4): None,
(0, 32, -2, -3): None,
(0, 32, -2, -2): None,
(0, 32, -2, -1): None,
(0, 32, -2, 0): None,
(0, 32, -2, 1): None,
(0, 32, -2, 2): None,
(0, 32, -2, 3): None,
(0, 32, -2, 4): None,
(0, 32, -2, 5): None,
(0, 32, -1, -5): None,
(0, 32, -1, -4): None,
(0, 32, -1, -3): None,
(0, 32, -1, -2): None,
(0, 32, -1, -1): None,
(0, 32, -1, 0): None,
(0, 32, -1, 1): None,
(0, 32, -1, 2): None,
(0, 32, -1, 3): None,
(0, 32, -1, 4): None,
(0, 32, -1, 5): None,
(0, 32, 0, -5): None,
(0, 32, 0, -4): None,
(0, 32, 0, -3): None,
(0, 32, 0, -2): None,
(0, 32, 0, -1): None,
(0, 32, 0, 0): None,
(0, 32, 0, 1): None,
(0, 32, 0, 2): None,
(0, 32, 0, 3): None,
(0, 32, 0, 4): None,
(0, 32, 0, 5): None,
(0, 32, 1, -5): None,
(0, 32, 1, -4): None,
(0, 32, 1, -3): None,
(0, 32, 1, -2): None,
(0, 32, 1, -1): None,
(0, 32, 1, 0): None,
(0, 32, 1, 1): None,
(0, 32, 1, 2): None,
(0, 32, 1, 3): None,
(0, 32, 1, 4): None,
(0, 32, 1, 5): None,
(0, 32, 2, -5): None,
(0, 32, 2, -4): None,
(0, 32, 2, -3): None,
(0, 32, 2, -2): None,
(0, 32, 2, -1): None,
(0, 32, 2, 0): None,
(0, 32, 2, 1): None,
(0, 32, 2, 2): None,
(0, 32, 2, 3): None,
(0, 32, 2, 4): None,
(0, 32, 2, 5): None,
(0, 32, 3, -5): None,
(0, 32, 3, -4): None,
(0, 32, 3, -3): None,
(0, 32, 3, -2): None,
(0, 32, 3, -1): None,
(0, 32, 3, 0): None,
(0, 32, 3, 1): None,
(0, 32, 3, 2): None,
(0, 32, 3, 3): None,
(0, 32, 3, 4): None,
(0, 32, 3, 5): None,
(0, 32, 4, -5): None,
(0, 32, 4, -4): None,
(0, 32, 4, -3): None,
(0, 32, 4, -2): None,
(0, 32, 4, -1): None,
(0, 32, 4, 0): None,
(0, 32, 4, 1): None,
(0, 32, 4, 2): None,
(0, 32, 4, 3): None,
(0, 32, 4, 4): None,
(0, 32, 4, 5): None,
(0, 32, 5, -5): None,
(0, 32, 5, -4): None,
(0, 32, 5, -3): None,
(0, 32, 5, -2): None,
(0, 32, 5, -1): None,
(0, 32, 5, 0): None,
(0, 32, 5, 1): None,
(0, 32, 5, 2): None,
(0, 32, 5, 3): None,
(0, 32, 5, 4): None,
(0, 32, 5, 5): None,
(0, 33, -5, -5): None,
(0, 33, -5, -4): None,
(0, 33, -5, -3): None,
(0, 33, -5, -2): None,
(0, 33, -5, -1): None,
(0, 33, -5, 0): None,
(0, 33, -5, 1): None,
(0, 33, -5, 2): None,
(0, 33, -5, 3): None,
(0, 33, -5, 4): None,
(0, 33, -5, 5): None,
(0, 33, -4, -5): None,
(0, 33, -4, -4): None,
(0, 33, -4, -3): None,
(0, 33, -4, -2): None,
(0, 33, -4, -1): None,
(0, 33, -4, 0): None,
(0, 33, -4, 1): None,
(0, 33, -4, 2): None,
(0, 33, -4, 3): None,
(0, 33, -4, 4): None,
(0, 33, -4, 5): None,
(0, 33, -3, -5): None,
(0, 33, -3, -4): None,
(0, 33, -3, -3): None,
(0, 33, -3, -2): None,
(0, 33, -3, -1): None,
(0, 33, -3, 0): None,
(0, 33, -3, 1): None,
(0, 33, -3, 2): None,
(0, 33, -3, 3): None,
(0, 33, -3, 4): None,
(0, 33, -3, 5): None,
(0, 33, -2, -5): None,
(0, 33, -2, -4): None,
(0, 33, -2, -3): None,
(0, 33, -2, -2): None,
(0, 33, -2, -1): None,
(0, 33, -2, 0): None,
(0, 33, -2, 1): None,
(0, 33, -2, 2): None,
(0, 33, -2, 3): None,
(0, 33, -2, 4): None,
(0, 33, -2, 5): None,
(0, 33, -1, -5): None,
(0, 33, -1, -4): None,
(0, 33, -1, -3): None,
(0, 33, -1, -2): None,
(0, 33, -1, -1): None,
(0, 33, -1, 0): None,
(0, 33, -1, 1): None,
(0, 33, -1, 2): None,
(0, 33, -1, 3): None,
(0, 33, -1, 4): None,
(0, 33, -1, 5): None,
(0, 33, 0, -5): None,
(0, 33, 0, -4): None,
(0, 33, 0, -3): None,
(0, 33, 0, -2): None,
(0, 33, 0, -1): None,
(0, 33, 0, 0): None,
(0, 33, 0, 1): None,
(0, 33, 0, 2): None,
(0, 33, 0, 3): None,
(0, 33, 0, 4): None,
(0, 33, 0, 5): None,
(0, 33, 1, -5): None,
(0, 33, 1, -4): None,
(0, 33, 1, -3): None,
(0, 33, 1, -2): None,
(0, 33, 1, -1): None,
(0, 33, 1, 0): None,
(0, 33, 1, 1): None,
(0, 33, 1, 2): None,
(0, 33, 1, 3): None,
(0, 33, 1, 4): None,
(0, 33, 1, 5): None,
(0, 33, 2, -5): None,
(0, 33, 2, -4): None,
(0, 33, 2, -3): None,
(0, 33, 2, -2): None,
(0, 33, 2, -1): None,
(0, 33, 2, 0): None,
(0, 33, 2, 1): None,
(0, 33, 2, 2): None,
(0, 33, 2, 3): None,
(0, 33, 2, 4): None,
(0, 33, 2, 5): None,
(0, 33, 3, -5): None,
(0, 33, 3, -4): None,
(0, 33, 3, -3): None,
(0, 33, 3, -2): None,
(0, 33, 3, -1): None,
(0, 33, 3, 0): None,
(0, 33, 3, 1): None,
(0, 33, 3, 2): None,
(0, 33, 3, 3): None,
(0, 33, 3, 4): None,
(0, 33, 3, 5): None,
(0, 33, 4, -5): None,
(0, 33, 4, -4): None,
(0, 33, 4, -3): None,
(0, 33, 4, -2): None,
(0, 33, 4, -1): None,
(0, 33, 4, 0): None,
(0, 33, 4, 1): None,
(0, 33, 4, 2): None,
(0, 33, 4, 3): None,
(0, 33, 4, 4): None,
(0, 33, 4, 5): None,
(0, 33, 5, -5): None,
(0, 33, 5, -4): None,
(0, 33, 5, -3): None,
(0, 33, 5, -2): None,
(0, 33, 5, -1): None,
(0, 33, 5, 0): None,
(0, 33, 5, 1): None,
(0, 33, 5, 2): None,
(0, 33, 5, 3): None,
(0, 33, 5, 4): None,
(0, 33, 5, 5): None,
(0, 34, -5, -5): None,
(0, 34, -5, -4): None,
(0, 34, -5, -3): None,
(0, 34, -5, -2): None,
(0, 34, -5, -1): None,
(0, 34, -5, 0): None,
(0, 34, -5, 1): None,
(0, 34, -5, 2): None,
(0, 34, -5, 3): None,
(0, 34, -5, 4): None,
(0, 34, -5, 5): None,
(0, 34, -4, -5): None,
(0, 34, -4, -4): None,
(0, 34, -4, -3): None,
(0, 34, -4, -2): None,
(0, 34, -4, -1): None,
(0, 34, -4, 0): None,
(0, 34, -4, 1): None,
(0, 34, -4, 2): None,
(0, 34, -4, 3): None,
(0, 34, -4, 4): None,
(0, 34, -4, 5): None,
(0, 34, -3, -5): None,
(0, 34, -3, -4): None,
(0, 34, -3, -3): None,
(0, 34, -3, -2): None,
(0, 34, -3, -1): None,
(0, 34, -3, 0): None,
(0, 34, -3, 1): None,
(0, 34, -3, 2): None,
(0, 34, -3, 3): None,
(0, 34, -3, 4): None,
(0, 34, -3, 5): None,
(0, 34, -2, -5): None,
(0, 34, -2, -4): None,
(0, 34, -2, -3): None,
(0, 34, -2, -2): None,
(0, 34, -2, -1): None,
(0, 34, -2, 0): None,
(0, 34, -2, 1): None,
(0, 34, -2, 2): None,
(0, 34, -2, 3): None,
(0, 34, -2, 4): None,
(0, 34, -2, 5): None,
(0, 34,
|
-1, -5): None,
(0, 34, -1, -4): None,
(0, 34, -1, -3): None,
(0, 34, -1, -2): None,
(0, 34, -1, -1): None,
(0, 34, -1, 0): None,
(0, 34, -1, 1): None,
(0, 34, -1, 2): None,
(0, 34, -1, 3): None,
(0, 34, -1, 4): None,
(0, 34, -1, 5): None,
(0, 34, 0, -5): None,
(0, 34, 0, -4): None,
(0, 34, 0, -3): None,
(0, 34, 0, -2): None,
(0, 34, 0, -1): None,
(0, 34, 0, 0): None,
(0, 34, 0, 1): None,
(0, 34, 0, 2): None,
(0, 34, 0, 3): Non
|
e,
(0, 34, 0, 4): None,
(0, 34, 0, 5): None,
(0, 34, 1, -5): None,
(0, 34, 1, -4): None,
(0, 34, 1, -3): None,
(0, 34, 1, -2): None,
(0, 34, 1, -1): None,
(0, 34, 1, 0): None,
(0, 34, 1, 1): None,
(0, 34, 1, 2): None,
(0, 34, 1, 3): None,
(0, 34, 1, 4): None,
(0, 34, 1, 5): None,
(0, 34, 2, -5): None,
(0, 34, 2, -4): None,
(0, 34, 2, -3): None,
(0, 34, 2, -2): None,
(0, 34, 2, -1): None,
(0, 34, 2, 0): None,
(0, 34, 2, 1): None,
(0, 34, 2, 2): None,
(0, 34, 2, 3): None,
(0, 34, 2, 4): None,
(0, 34, 2, 5): None,
(0, 34, 3, -5): None,
(0, 34, 3, -4): None,
(0, 34, 3, -3): None,
(0, 34, 3, -2): None,
(0, 34, 3, -1): None,
(0, 34, 3, 0): None,
(0, 34, 3, 1): None,
(0, 34, 3, 2): None,
(0, 34, 3, 3): None,
(0, 34, 3, 4): None,
(0, 34, 3, 5): None,
(0, 34, 4, -5): None,
(0, 34, 4, -4): None,
(0, 34, 4, -3): None,
(0, 34, 4, -2): None,
(0, 34, 4, -1): None,
(0, 34, 4, 0): None,
(0, 34, 4, 1): None,
(0, 34, 4, 2): None,
(0, 34, 4, 3): None,
(0, 34, 4, 4): None,
(0, 34, 4, 5): None,
(0, 34
|
testerofpens/parameterpatrol
|
linkConsumer.py
|
Python
|
gpl-3.0
| 1,022
| 0.018591
|
######################################################################################
# class: LinkConsumer
# purpose: consumes items in the outQueue and sends to parameterFetcher for processing
####################
|
##################################################################
import logging
import Queue
import threading
import parameterFetcher
import results
class LinkConsumer(threading.Thread):
def __init__(self, outQueue, projectDirectory, results):
threading.Thread.__init__(self)
self.outQueue = outQueue
self.projectDirectory = projectDirectory
#data storage
self.results = results
#logging
self.logger = logging.getLogger(__name__)
def run(self):
while True:
page
|
= self.outQueue.get()
self.logger.debug('Consumer: getting parameters for ' + page)
paramFetcher = parameterFetcher.ParameterFetcher(page, self.projectDirectory)
paramFetcher.saveParameters(self.results) #parse parameters
self.outQueue.task_done()
|
soshial/text-normalization
|
numword/numword_en_gb.py
|
Python
|
lgpl-3.0
| 1,631
| 0.011649
|
# coding: utf-8
#This file is part of numword. The COPYRIGHT file at the top level of
#th
|
is repository contains the full copyright notices and license terms.
'''
numword for EN_GB
'''
from numword_en import NumWordEN
class NumWordENGB(NumWordEN):
'''
NumWord EN_GB
'''
de
|
f currency(self, val, longval=True):
'''
Convert to currency
'''
return self._split(val, hightxt=u"pound/s", lowtxt=u"pence",
jointxt=u"and", longval=longval)
_NW = NumWordENGB()
def cardinal(value):
'''
Convert to cardinal
'''
return _NW.cardinal(value)
def ordinal(value):
'''
Convert to ordinal
'''
return _NW.ordinal(value)
def ordinal_number(value):
'''
Convert to ordinal number
'''
return _NW.ordinal_number(value)
def currency(value, longval=True):
'''
Convert to currency
'''
return _NW.currency(value, longval=longval)
def year(value, longval=True):
'''
Convert to year
'''
return _NW.year(value, longval=longval)
def main():
'''
Main
'''
for val in [ 1, 11, 12, 21, 31, 33, 71, 80, 81, 91, 99, 100, 101, 102, 120, 155,
180, 300, 308, 832, 1000, 1001, 1061, 1100, 1120, 1500, 1701, 1800,
2000, 2010, 2099, 2171, 3000, 8280, 8291, 150000, 500000, 1000000,
2000000, 2000001, -21212121211221211111, -2.121212, -1.0000100,
1325325436067876801768700107601001012212132143210473207540327057320957032975032975093275093275093270957329057320975093272950730]:
_NW.test(val)
if __name__ == "__main__":
main()
|
openstack/taskflow
|
releasenotes/source/conf.py
|
Python
|
apache-2.0
| 9,270
| 0
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# taskflow Release Notes documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 3 17:40:50 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/taskflow'
openstackdocs_auto_name = False
openstackdocs_bug_project = 'taskflow'
openstackdocs_bug_tag = ''
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'taskflow Release Notes'
copyright = u'2016, taskflow Developers'
# Release do not need a version number in the title, they
# cover multiple versions.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into indivi
|
dual page
|
s for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'taskflowReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'taskflowReleaseNotes.tex',
u'taskflow Release Notes Documentation',
u'taskflow Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'taskflowreleasenotes',
u'taskflow Release Notes Documentation',
[u'taskflo
|
sgmap/openfisca-core
|
openfisca_core/reforms.py
|
Python
|
agpl-3.0
| 13,388
| 0.00478
|
# -*- coding: utf-8 -*-
import collections
import copy
from . import formulas, legislations, periods, taxbenefitsystems, columns
class AbstractReform(taxbenefitsystems.AbstractTaxBenefitSystem):
"""A reform is a variant of a TaxBenefitSystem, that refers to the real TaxBenefitSystem as its reference."""
CURRENCY = None
DECOMP_DIR = None
DEFAULT_DECOMP_FILE = None
key = None
name = None
def __init__(self):
assert self.key is not None
assert self.name is not None
assert self.reference is not None, 'Reform requires a reference tax-benefit-system.'
assert isinstance(self.reference, taxbenefitsystems.AbstractTaxBenefitSystem)
self.Scenario = self.reference.Scenario
if self.CURRENCY is None:
currency = getattr(self.reference, 'CURRENCY', None)
if currency is not None:
self.CURRENCY = currency
if self.DECOMP_DIR is None:
decomp_dir = getattr(self.reference, 'DECOMP_DIR', None)
if decomp_dir is not None:
self.DECOMP_DIR = decomp_dir
if self.DEFAULT_DECOMP_FILE is None:
default_decomp_file = getattr(self.reference, 'DEFAULT_DECOMP_FILE', None)
if default_decomp_file is not None:
self.DEFAULT_DECOMP_FILE = default_decomp_file
super(AbstractReform, self).__init__(
entity_class_by_key_plural = self.entity_class_by_key_plural or self.reference.entity_class_by_key_plural,
legislation_json = self.reference.legislation_json,
)
@property
def full_key(self):
key = self.key
assert key is not None, 'key was not set for reform {} (name: {!r})'.format(self, self.name)
if self.reference is not None and hasattr(self.reference, 'key'):
reference_full_key = self.reference.full_key
key = u'.'.join([reference_full_key, key])
return key
def modify_legislation_json(self, modifier_function):
"""
Copy the reference TaxBenefitSystem legislation_json attribute and return it.
Used by reforms which need to modify the legislation_json, usually in the build_reform() function.
Validates the new legislation.
"""
reference_legislation_json = self.reference.legislation_json
reference_legislation_json_copy = copy.deepcopy(reference_legislation_json)
reform_legislation_json = modifier_function(reference_legislation_json_copy)
assert reform_legislation_json is not None, \
'modifier_function {} in module {} must return the modified legislation_json'.format(
modifier_function.__name__,
modifier_function.__module__,
)
reform_legislation_json, error = legislations.validate_legislation_json(reform_legislation_json)
assert error is None, \
'The modified legislation_json of the reform "{}" is invalid, error: {}'.format(
self.key, error).encode('utf-8')
self.legislation_json = reform_legislation_json
def clone_entity_class(entity_class):
return type(entity_class.__name__.encode('utf-8'), (entity_class,), dict(
column_by_name = entity_class.column_by_name.copy(),
))
def compose_reforms(build_functions_and_keys, tax_benefit_system):
"""
Compose reforms: the first reform is built with the given base tax-benefit system,
then each one is built with the previous one as the reference.
"""
def compose_reforms_reducer(memo, item):
build_reform, key = item
reform = build_reform(tax_benefit_system = memo)
assert isinstance(reform, AbstractReform), 'Reform {} returned an invalid value {!r}'.format(key, reform)
return reform
assert isinstance(build_functions_and_keys, list)
reform = reduce(compose_reforms_reducer, build_functions_and_keys, tax_benefit_system)
return reform
def make_reform(key, name, reference, decomposition_dir_name = None, decomposition_file_name
|
= None):
"""Return a Reform class inherited from AbstractReform."""
assert isinstance(key, basestring)
assert isinstance(name, basestring)
assert isinstance(reference, taxbenefitsystems.AbstractTaxBenefitSystem)
reform_entity_class_by_key_plural = {
key_plural: clone_entity_class(entity_class)
for key_plural, entity_class in reference.entity_class_by_key_plural.iteritems()
}
class Reform(AbstractReform
|
):
_constructed = False
DECOMP_DIR = decomposition_dir_name
DEFAULT_DECOMP_FILE = decomposition_file_name
entity_class_by_key_plural = reform_entity_class_by_key_plural
def __init__(self):
super(Reform, self).__init__()
# TODO Remove this mechanism.
Reform._constructed = True
@classmethod
def add_column(cls, column):
if cls._constructed:
print 'Caution: You are adding a formula to an instantiated Reform. Reform must be reinstatiated.'
assert isinstance(column, columns.Column)
assert column.formula_class is not None
entity_class = reform_entity_class_by_key_plural[column.entity_key_plural]
entity_column_by_name = entity_class.column_by_name
name = column.name
entity_column_by_name[name] = column
return column
# Classes for inheriting from reform variables.
class DatedVariable(object):
"""Syntactic sugar to generate a DatedFormula class and fill its column"""
__metaclass__ = formulas.FormulaColumnMetaclass
entity_class_by_key_plural = reform_entity_class_by_key_plural
formula_class = formulas.DatedFormula
class EntityToPersonColumn(object):
"""Syntactic sugar to generate an EntityToPerson class and fill its column"""
__metaclass__ = formulas.ConversionColumnMetaclass
formula_class = formulas.EntityToPerson
class PersonToEntityColumn(object):
"""Syntactic sugar to generate an PersonToEntity class and fill its column"""
__metaclass__ = formulas.ConversionColumnMetaclass
formula_class = formulas.PersonToEntity
class Variable(object):
"""Syntactic sugar to generate a SimpleFormula class and fill its column"""
__metaclass__ = formulas.FormulaColumnMetaclass
entity_class_by_key_plural = reform_entity_class_by_key_plural
formula_class = formulas.SimpleFormula
# Define class attributes after class declaration to avoid "name is not defined" exceptions.
Reform.key = key
Reform.name = name
Reform.reference = reference
return Reform
# Legislation helpers
def update_legislation(legislation_json, path, period = None, value = None, start = None, stop = None):
"""
This function is deprecated.
Update legislation JSON with a value defined for a specific couple of period defined by
its start and stop instant or a period object.
This function does not modify input parameters.
"""
assert value is not None
if period is not None:
assert start is None and stop is None, u'period parameter can\'t be used with start and stop'
start = period.start
stop = period.stop
assert start is not None and stop is not None, u'start and stop must be provided, or period'
def build_node(root_node, path_index):
if isinstance(root_node, collections.Sequence):
return [
build_node(node, path_index + 1) if path[path_index] == index else node
for index, node in enumerate(root_node)
]
elif isinstance(root_node, collections.Mapping):
return collections.OrderedDict((
(
key,
(
updated_legislation_items(node, start, stop, value)
if path_index == len(path) - 1
else build_node(node, path_index + 1)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.