repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
analytics-pros/mozilla-bedrock
|
bedrock/settings/base.py
|
Python
|
mpl-2.0
| 33,383
| 0.000899
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import logging
import platform
from os.path import abspath
from django.utils.functional import lazy
import dj_database_url
from decouple import Csv, config
from pathlib import Path
from .static_media import PIPELINE_CSS, PIPELINE_JS # noqa
# ROOT path of the project. A pathlib.Path object.
ROOT_PATH = Path(__file__).resolve().parents[2]
ROOT = str(ROOT_PATH)
def path(*args):
return abspath(str(ROOT_PATH.joinpath(*args)))
# Is this a dev instance?
DEV = config('DEV', cast=bool, default=False)
PROD = config('PROD', cast=bool, default=False)
DEBUG = config('DEBUG', cast=bool, default=False)
# Production uses PostgreSQL, but Sqlite should be sufficient for local development.
db_url = config('DATABASE_URL', default='sqlite:///bedrock.db')
DATABASES = {
# leave 'default' empty so that Django will start even
# if it can't connect to the DB at boot time
'default': {},
'bedrock': dj_database_url.parse(db_url)
}
if db_url.startswith('sqlite'):
# no server, can use 'default'
DATABASES['default'] = DATABASES['bedrock']
# leave the config in 'bedrock' as well so scripts
# hardcoded for 'bedrock' will continue to work
else:
# settings specific to db server environments
DATABASES['bedrock']['CONN_MAX_AGE'] = None
DATABASE_ROUTERS = ['bedrock.base.database.BedrockRouter']
CACHES = config(
'CACHES',
cast=json.loads,
default=json.dumps(
{'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'translations'}}))
# in case django-pylibmc is in use
PYLIBMC_MIN_COMPRESS_LEN = 150 * 1024
PYLIBMC_COMPRESS_LEVEL = 1 # zlib.Z_BEST_SPEED
# Logging
LOG_LEVEL = config('LOG_LEVEL', cast=int, default=logging.INFO)
HAS_SYSLOG = True
SYSLOG_TAG = "http_app_bedrock"
LOGGING_CONFIG = None
# CEF Logging
CEF_PRODUCT = 'Bedrock'
CEF_VENDOR = 'Mozilla'
CEF_VERSION = '0'
CEF_DEVICE_VERSION = '0'
# Internationalization.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = config('TIME_ZONE', default='America/Los_Angeles')
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
USE_TZ = True
# just here so Django doesn't complain
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
# Use Ship It as the source for product_details
PROD_DETAILS_URL = config('PROD_DETAILS_URL',
default='https://product-details.mozilla.org/1.0/')
# Tells the product_details module where to find our local JSON files.
# This ultimately controls how LANGUAGES are constructed.
PROD_DETAILS_CACHE_NAME = 'product-details'
PROD_DETAILS_CACHE_TIMEOUT = 60 * 15 # 15 min
default_pdstorage = 'PDDatabaseStorage' if PROD else 'PDFileStorage'
PROD_DETAILS_STORAGE = config('PROD_DETAILS_STORAGE',
default='product_details.storage.' + default_pdstorage)
# Accepted locales
PROD_LANGUAGES = ('ach', 'af', 'an', 'ar', 'as', 'ast', 'az', 'be', 'bg',
'bn-BD', 'bn-IN', 'br', 'brx', 'bs', 'ca', 'cak', 'cs',
'cy', 'da', 'de', 'dsb', 'ee', 'el', 'en-GB', 'en-US',
'en-ZA', 'eo', 'es-AR', 'es-CL', 'es-ES', 'es-MX', 'et',
'eu', 'fa', 'ff', 'fi', 'fr', 'fy-NL', 'ga-IE', 'gd',
'gl', 'gn', 'gu-IN', 'ha', 'he', 'hi-IN', 'hr', 'hsb',
'hu', 'hy-AM', 'id', 'ig', 'is', 'it', 'ja', 'ja-JP-mac',
'ka', 'kk', 'km', 'kn', 'ko', 'lij', 'ln', 'lt', 'ltg',
'lv', 'mai', 'mk', 'ml', 'mr', 'ms', 'my', 'nb-NO', 'nl',
'nn-NO', 'oc', 'or', 'pa-IN', 'pl', 'pt-BR', 'pt-PT',
'rm', 'ro', 'ru', 'sat', 'si', 'sk', 'sl', 'son', 'sq',
'sr', 'sv-SE', 'sw', 'ta', 'te', 'th', 'tr', 'uk', 'ur',
'uz', 'vi', 'wo', 'xh', 'yo', 'zh-CN', 'zh-TW', 'zu')
LOCALES_PATH = ROOT_PATH / 'locale'
default_locales_repo = 'www.mozilla.org' if DEV else 'bedrock-l10n'
default_locales_repo = 'https://github.com/mozi
|
lla-l10n/{}'.format(default_locales_repo)
LOCALES_REPO = config('LOCALES_REPO', default=default_locales_repo)
def get_dev_languages():
try:
return [lang.name for lang in LOCALES_PATH.iterdir()
if lang.is_dir() and lang.name != 'templates']
except OSError:
|
# no locale dir
return list(PROD_LANGUAGES)
DEV_LANGUAGES = get_dev_languages()
DEV_LANGUAGES.append('en-US')
# Map short locale names to long, preferred locale names. This
# will be used in urlresolvers to determine the
# best-matching locale from the user's Accept-Language header.
CANONICAL_LOCALES = {
'en': 'en-US',
'es': 'es-ES',
'ja-jp-mac': 'ja',
'no': 'nb-NO',
'pt': 'pt-BR',
'sv': 'sv-SE',
'zh-hant': 'zh-TW', # Bug 1263193
'zh-hant-tw': 'zh-TW', # Bug 1263193
}
# Unlocalized pages are usually redirected to the English (en-US) equivalent,
# but sometimes it would be better to offer another locale as fallback. This map
# specifies such cases.
FALLBACK_LOCALES = {
'es-AR': 'es-ES',
'es-CL': 'es-ES',
'es-MX': 'es-ES',
}
def lazy_lang_url_map():
from django.conf import settings
langs = settings.DEV_LANGUAGES if settings.DEV else settings.PROD_LANGUAGES
return {i.lower(): i for i in langs}
# Override Django's built-in with our native names
def lazy_langs():
from django.conf import settings
from product_details import product_details
langs = DEV_LANGUAGES if settings.DEV else settings.PROD_LANGUAGES
return {lang.lower(): product_details.languages[lang]['native']
for lang in langs if lang in product_details.languages}
LANGUAGE_URL_MAP = lazy(lazy_lang_url_map, dict)()
LANGUAGES = lazy(lazy_langs, dict)()
FEED_CACHE = 3900
DOTLANG_CACHE = 600
DOTLANG_FILES = ['main', 'download_button']
# Paths that don't require a locale code in the URL.
# matches the first url component (e.g. mozilla.org/gameon/)
SUPPORTED_NONLOCALES = [
# from redirects.urls
'media',
'static',
'certs',
'images',
'contribute.json',
'credits',
'gameon',
'rna',
'robots.txt',
'telemetry',
'webmaker',
'contributor-data',
'healthz',
'2004',
'2005',
'2006',
'keymaster',
'microsummaries',
'xbl',
]
ALLOWED_HOSTS = config(
'ALLOWED_HOSTS', cast=Csv(),
default='www.mozilla.org,www.ipv6.mozilla.org,www.allizom.org')
# The canonical, production URL without a trailing slash
CANONICAL_URL = 'https://www.mozilla.org'
# Make this unique, and don't share it with anybody.
SECRET_KEY = config('SECRET_KEY', default='ssssshhhhh')
MEDIA_URL = config('MEDIA_URL', default='/user-media/')
MEDIA_ROOT = config('MEDIA_ROOT', default=path('media'))
STATIC_URL = config('STATIC_URL', default='/media/')
STATIC_ROOT = config('STATIC_ROOT', default=path('static'))
STATICFILES_STORAGE = ('pipeline.storage.NonPackagingPipelineStorage' if DEBUG else
'bedrock.base.storage.ManifestPipelineStorage')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.CachedFileFinder',
'pipeline.finders.PipelineFinder',
)
STATICFILES_DIRS = (
path('media'),
)
PIPELINE = {
'STYLESHEETS': PI
|
pigay/COMDIRAC
|
Interfaces/scripts/dmkdir.py
|
Python
|
gpl-3.0
| 1,488
| 0.024866
|
#!/usr/bin/env
|
python
"""
create a directory in the FileCatalog
"""
import os
import DIRAC
from DIRAC.Core.Base import Script
from COMDIRAC.Interfaces import critical
from COMDIRAC.Interfaces import DSession
from COMDIRAC.Interfaces import createCatalog
from COMDIRAC.Interfaces import pathFromArgum
|
ents
if __name__ == "__main__":
import sys
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s Path...' % Script.scriptName,
'Arguments:',
' Path: path to new directory',
'', 'Examples:',
' $ dmkdir ./some_lfn_dir',
] )
)
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
session = DSession( )
if len( args ) < 1:
print "Error: No argument provided\n%s:" % Script.scriptName
Script.showHelp( )
DIRAC.exit( -1 )
Script.enableCS( )
catalog = createCatalog()
result = catalog.createDirectory( pathFromArguments( session, args ) )
if result["OK"]:
if result["Value"]["Failed"]:
for p in result["Value"]["Failed"]:
print "ERROR - \"%s\": %s" % ( p, result["Value"]["Failed"][p] )
else:
print "ERROR: %s" % result["Message"]
|
RCOS-Grading-Server/HWserver
|
migration/run_migrator.py
|
Python
|
bsd-3-clause
| 331
| 0.003021
|
"""Run the migrator tool thro
|
ugh its CLI."""
from pathlib import Path
import sys
from migrator import cli
if __name__ == '__main__':
config_path = Path(Path(__file__).parent.resolve(), '..', '..', '..', 'config')
config_path = config_path.resolve() if config_path.exists() else None
cli.run(sys.argv[1
|
:], config_path)
|
will-iam/Variant
|
casepy/eulerRuO2/nNoh131072x1/chars.py
|
Python
|
mit
| 472
| 0.012712
|
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], '../../../'))
import script.rio as io
import script.initial_condition.noh1D as noh1D
# Domain properties
lx = 1.0
ly = 1.0
Nx = 131072
Ny = 1
# S
|
cheme execution options
T = 0.6
CFL
|
= 0.5
gamma = 5./3.
BClayer = 1
quantityList = ['rho', 'rhou_x', 'rhou_y', 'rhoE']
def buildme(quantityDict, coords_to_uid, coords_to_bc):
noh1D.build(quantityDict, coords_to_uid, coords_to_bc, Nx, Ny, lx, ly, BClayer)
|
SalesforceFoundation/CumulusCI
|
cumulusci/tasks/salesforce/Deploy.py
|
Python
|
bsd-3-clause
| 5,848
| 0.002394
|
import pathlib
from typing import Optional
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.utils import process_bool_arg, process_list_arg
from cumulusci.salesforce_api.metadata import ApiDeploy
from cumulusci.salesforce_api.package_zip import MetadataPackageZipBuilder
from cumulusci.tasks.salesforce.BaseSalesforceMetadataApiTask import (
BaseSalesforceMetadataApiTask,
)
class Deploy(BaseSalesforceMetadataApiTask):
api_class = ApiDeploy
task_options = {
"path": {
"description": "The path to the metadata source to be deployed",
"required": True,
},
"unmanaged": {
"description": "If True, changes namespace_inject to replace tokens with a blank string"
},
"namespace_inject": {
"description": "If set, the namespace tokens in files and filenames are replaced with the namespace's prefix"
},
"namespace_strip": {
"description": "If set, all namespace prefixes for the namespace specified are stripped from files and filenames"
},
"check_only": {
"description": "If True, performs a test deployment (validation) of components without saving the components in the target org"
},
"test_level": {
"description": "Specifies which tests are run as part of a deployment. Valid values: NoTestRun, RunLocalTests, RunAllTestsInOrg, RunSpecifiedTests."
},
"specified_tests": {
"description": "Comma-separated list of test classes to run upon deployment. Applies only with test_level set to RunSpecifiedTests."
},
"static_resource_path": {
"description": "The path where decompressed static resources are stored. Any subdirectories found will be zipped and added to the staticresources directory of the build."
},
"namespaced_org": {
"description": "If True, the tokens %%%NAMESPACED_ORG%%% and ___NAMESPACED_ORG___ will get replaced with the namespace. The default is false causing those tokens to get stripped and replaced with an empty string. Set this if deploying to a namespaced scratch org or packaging org."
},
"clean_meta_xml": {
"description": "Defaults to True which strips the <pac
|
kageVersions/> element from all meta.xml files. The packageVersion element gets added automatically by the target org and is set to whatever version is installed in the org. To disable this, set this option to False"
},
}
name
|
spaces = {"sf": "http://soap.sforce.com/2006/04/metadata"}
def _init_options(self, kwargs):
super(Deploy, self)._init_options(kwargs)
self.check_only = process_bool_arg(self.options.get("check_only", False))
self.test_level = self.options.get("test_level")
if self.test_level and self.test_level not in [
"NoTestRun",
"RunLocalTests",
"RunAllTestsInOrg",
"RunSpecifiedTests",
]:
raise TaskOptionsError(
f"Specified test run level {self.test_level} is not valid."
)
self.specified_tests = process_list_arg(self.options.get("specified_tests", []))
if bool(self.specified_tests) != (self.test_level == "RunSpecifiedTests"):
raise TaskOptionsError(
"The specified_tests option and test_level RunSpecifiedTests must be used together."
)
self.options["namespace_inject"] = (
self.options.get("namespace_inject")
or self.project_config.project__package__namespace
)
def _get_api(self, path=None):
if not path:
path = self.options.get("path")
package_zip = self._get_package_zip(path)
if package_zip is not None:
self.logger.info("Payload size: {} bytes".format(len(package_zip)))
else:
self.logger.warning("Deployment package is empty; skipping deployment.")
return
return self.api_class(
self,
package_zip,
purge_on_delete=False,
check_only=self.check_only,
test_level=self.test_level,
run_tests=self.specified_tests,
)
def _has_namespaced_package(self, ns: Optional[str]) -> bool:
if "unmanaged" in self.options:
return not process_bool_arg(self.options.get("unmanaged", True))
return bool(ns) and ns in self.org_config.installed_packages
def _is_namespaced_org(self, ns: Optional[str]) -> bool:
if "namespaced_org" in self.options:
return process_bool_arg(self.options.get("namespaced_org", False))
return bool(ns) and ns == self.org_config.namespace
def _get_package_zip(self, path):
assert path, f"Path should be specified for {self.__class__.name}"
if not pathlib.Path(path).exists():
self.logger.warning(f"{path} not found.")
return
namespace = self.options["namespace_inject"]
options = {
**self.options,
"clean_meta_xml": process_bool_arg(
self.options.get("clean_meta_xml", True)
),
"namespace_inject": namespace,
"unmanaged": not self._has_namespaced_package(namespace),
"namespaced_org": self._is_namespaced_org(namespace),
}
package_zip = MetadataPackageZipBuilder(
path=path, options=options, logger=self.logger
)
if not package_zip.zf.namelist():
return
return package_zip.as_base64()
def freeze(self, step):
steps = super(Deploy, self).freeze(step)
for step in steps:
if step["kind"] == "other":
step["kind"] = "metadata"
return steps
|
jcshen007/cloudstack
|
test/integration/smoke/misc/test_escalations_templates.py
|
Python
|
apache-2.0
| 8,699
| 0.002299
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#Test from the Marvin - Testing in Python wiki
#All tests inherit from cloudstackTestCase
from marvin.cloudstackTestCase import cloudstackTestCase
#Import Integration Libraries
#base - contains all resources as entities and defines create, delete, list operations on them
from marvin.lib.base import (
Account,
VirtualMachine,
Volume,
ServiceOffering,
Configurations,
DiskOffering,
Template)
#utils - utility classes for common cleanup, external library wrappers etc
from marvin.lib.utils import cleanup_resources, validateList
#common - commonly used methods for all tests are listed here
from marvin.lib.common import get_zone, get_domain, get_template
from marvin.codes import PASS
from nose.plugins.attrib import attr
import time
class TestTemplates(cloudstackTestCase):
@classmethod
def setUpClass(cls):
try:
cls._cleanup = []
cls.testClient = super(TestTemplates, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = cls.testClient.getParsedTestDataConfig()
# Get Domain, Zone, Template
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(
cls.api_client,
cls.testClient.getZoneForTests())
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["template"]["ostypeid"] = cls.template.ostypeid
cls.services["template"]["isextractable"] = 'True'
if cls.zone.localstorageenabled:
|
cls.storagetype = 'local'
cls.services["service_offerings"][
"tiny"]["storagetype"] = 'local'
cls.services["disk_offering"]["storagetype"] = 'local'
else:
cls.storagetype = 'shared'
cls.services["service_offerings"][
"tiny
|
"]["storagetype"] = 'shared'
cls.services["disk_offering"]["storagetype"] = 'shared'
cls.services['mode'] = cls.zone.networktype
cls.services["virtual_machine"][
"hypervisor"] = cls.testClient.getHypervisorInfo()
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.services["custom_volume"]["zoneid"] = cls.zone.id
# Creating Disk offering, Service Offering and Account
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["tiny"]
)
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
# Getting authentication for user in newly created Account
cls.user = cls.account.user[0]
cls.userapiclient = cls.testClient.getUserApiClient(cls.user.username, cls.domain.name)
cls._cleanup.append(cls.disk_offering)
cls._cleanup.append(cls.service_offering)
cls._cleanup.append(cls.account)
except Exception as e:
cls.tearDownClass()
raise Exception("Warning: Exception in setup : %s" % e)
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiClient = self.testClient.getApiClient()
self.cleanup = []
return
def tearDown(self):
#Clean up, terminate the created volumes
cleanup_resources(self.apiClient, self.cleanup)
return
@attr(tags=["advanced", "advancedsg", "sg"], required_hardware='true')
def test01_template_download_URL_expire(self):
"""
@Desc:Template files are deleted from secondary storage after download URL expires
Step1:Deploy vm with default cent os template
Step2:Stop the vm
Step3:Create template from the vm's root volume
Step4:Extract Template and wait for the download url to expire
Step5:Deploy another vm with the template created at Step3
Step6:Verify that vm deployment succeeds
"""
params = ['extract.url.expiration.interval', 'extract.url.cleanup.interval']
wait_time = 0
for param in params:
config = Configurations.list(
self.apiClient,
name=param,
)
self.assertEqual(validateList(config)[0], PASS, "Config list returned invalid response")
wait_time = wait_time+int(config[0].value)
self.debug("Total wait time for url expiry: %s" % wait_time)
# Creating Virtual Machine
self.virtual_machine = VirtualMachine.create(
self.userapiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
)
self.assertIsNotNone(self.virtual_machine, "Virtual Machine creation failed")
self.cleanup.append(self.virtual_machine)
#Stop virtual machine
self.virtual_machine.stop(self.userapiclient)
list_volume = Volume.list(
self.userapiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(validateList(list_volume)[0],
PASS,
"list volumes with type ROOT returned invalid list"
)
self.volume = list_volume[0]
self.create_template = Template.create(
self.userapiclient,
self.services["template"],
volumeid=self.volume.id,
account=self.account.name,
domainid=self.account.domainid
)
self.assertIsNotNone(self.create_template, "Failed to create template from root volume")
self.cleanup.append(self.create_template)
"""
Extract template
"""
try:
Template.extract(
self.userapiclient,
self.create_template.id,
'HTTP_DOWNLOAD',
self.zone.id
)
except Exception as e:
self.fail("Extract template failed with error %s" % e)
self.debug("Waiting for %s seconds for url to expire" % repr(wait_time+20))
time.sleep(wait_time+20)
self.debug("Waited for %s seconds for url to expire" % repr(wait_time+20))
"""
Deploy vm with the template created from the volume. After url expiration interval only
url should be deleted not the template. To validate this deploy vm with the template
"""
try:
self.vm = VirtualMachine.create(
|
futurice/sforce2flowdock
|
sforce-show-api-versions.py
|
Python
|
gpl-3.0
| 362
| 0
|
#! /usr/bin/env python3
import json
from s2f.sforce
|
import SClient
from s2f import util
"""
Print SalesForce API versions.
Use this to set the version in the config file JSON.
"""
if __name__ == '__main__':
util.setupLogging()
client = SClient(util.SForceCfgFileName, util.SForceTokenFileName)
print(json.dumps(client.getAPIVersi
|
ons(), indent=2))
|
orian/umo
|
pdf_scraping.py
|
Python
|
mit
| 12,546
| 0.012195
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
a='51.01 633.55 Td\n'
b='(LICZBA UPRAWNIONYCH) Tj\n'
re.compile(r'() () Td\\n')
m = re.match(r'(?P<x>[\d\.]+) (?P<y>[\d\.]+) Td\n', a)
print(m.groupdict())
t = re.match(r'\((?P<t>.*)\) Tj\n', b)
print(t.groupdict())
# <codecell>
import json
import re
DEBUG=False
# here it starts
class Context(object):
def __init__(self):
self._stack = []
self._start_state = State('start')
self._end_state = State('end')
self._state = self._start_state
self._texts = []
def run(self, f):
line_counter = 0
for line in f:
follow = True
while follow:
x, follow = self._state.update(self, line)
if x != None:
self._state.on_exit(self)
self._state = x
self._state.on_enter(self)
if self._state == self._end_state:
return
line_counter+=1
# if line_counter > 400:
# return
class State(object):
def __init__(self, name):
self._name = name
self._transition_func_and_state = []
def add_state(self, func, state, follow=False):
self._transition_func_and_state.append((func, state, follow,))
def update(self, ctx, text):
# print('got line: {}'.format(text))
for func,state,follow in self._transition_func_and_state:
if func(text):
return state,follow
return None,False
def on_exit(self, ctx):
if DEBUG:
print('on_exit: {}'.format(self._name))
def on_enter(self, ctx):
if DEBUG:
print('on_enter: {}'.format(self._name))
class TextObj(object):
def __init__(self):
self._x = 0.0
self._y = 0.0
self._text = ''
def __str__(self):
return '({x} {y} \'{t}\')'.format(x=self._x, y=self._y, t=self._text)
def __unicode__(self):
return '({x} {y} \'{t}\')'.format(x=self._x, y=self._y, t=self._text)
def __repr__(self):
return '({x} {y} \'{t}\')'.format(x=self._x, y=self._y, t=self._text)
class BoxStateStart(State):
def on_enter(self, ctx):
ctx._texts.append(('MARKER',))
super(BoxStateStart, self).on_enter(ctx)
class BoxStateEnd(State):
def on_exit(self, ctx):
stack=[]
idx = 1
for a in reversed(ctx._texts):
# print(a)
if type(a) is tuple and a[0]=='MARKER':
# print ('breaking {}'.format(idx))
break
stack.append(a)
idx+=1
ctx._texts = ctx._texts[:-idx]
if idx>1:
t = []
for x in reversed(stack):
t.append(x._text)
stack[-1]._text = ' '.join(t)
# print('joined from {} {} {}'.format(idx, len(t), stack[-1]._text))
ctx._texts.append(stack[-1])
super(BoxStateEnd, self).on_exit(ctx)
'51.01 633.55 Td\n'
'(LICZBA UPRAWNIONYCH) Tj\n'
class TextState(State):
def __init__(self, name):
super(TextState, self).__init__(name)
def reset(self):
self._text = TextObj()
self._has_text = False
self._has_xy = False
def update(self, ctx, text):
m = re.match(r'(?P<x>[\d\.]+) (?P<y>[\d\.]+) Td\n', text)
if m != None:
if DEBUG:
print('x y match')
m = m.groupdict()
self._text._x = m['x']
self._text._y = m['y']
self._has_xy = True
else:
t = re.match(r'\((?P<t>.*)\) Tj\n', text)
if t != None:
if DEBUG:
print('title match')
t = t.groupdict()
self._text._text = t['t']
self._has_text = True
return super(TextState, self).update(ctx, text)
def on_enter(self, ctx):
if DEBUG:
print('on_enter: {}'.format(self._name))
self.reset()
def on_exit(self, ctx):
if DEBUG:
print('parsed: {}'.format(self._text.__unicode__()))
ctx._texts.append(self._text)
self._text = None
def create_state_machine(f):
ctx = Context()
s_obj = State('obj')
obj_start_f = lambda t: t.find('obj\n') >=0
obj_end_f = lambda t: t.find('endobj\n') >=0
ctx._start_state.add_state(obj_start_f, s_obj)
s_obj.add_state(obj_end_f, ctx._start_state)
s_stream = State('stream')
stream_start_f = lambda t: t.find('stream\n') >=0
stream_end_f = lambda t: t.find('endstream\n') >=0
s_obj.add_state(stream_start_f, s_stream)
s_stream.add_state(stream_end_f, s_obj)
s_text = TextState('text')
text_start_f = lambda t: t.find('BT\n') >=0
text_end_f = lambda t: t.find('ET\n') >=0
s_stream.add_state(text_start_f, s_text)
s_text.add_state(text_end_f, s_stream)
ctx.run(f)
return ctx._texts
def create_state_machine2(f):
ctx = Context()
s_obj = State('obj')
obj_start_f = lambda t: t.find('obj\n') >=0
obj_end_f = lambda t: t.find('endobj\n') >=0
ctx._start_state.add_state(obj_start_f, s_obj)
s_obj.add_state(obj_end_f, ctx._start_state)
s_stream = State('stream')
stream_start_f = lambda t: t.find('stream\n') >=0
stream_end_f = lambda t: t.find('endstream\n') >=0
s_obj.add_state(stream_start_f, s_stream)
s_stream.add_state(stream_end_f, s_obj)
s_box = BoxStateStart('box_start')
box_start_f = lambda t: t=='n\n'
box_end_f = lambda t: t=='W\n'
s_stream.add_state(box_start_f, s_box, follow=True)
s_box_wait = State('box_wait')
s_box.add_state(lambda x:True, s_box_wait)
# there may be 2 ways to end box
s_box_end = BoxStateEnd('box_end')
s_box_wait.add_state(box_end_f, s_box_end, follow=True)
s_box_wait.add_state(stream_end_f, s_box_end, follow=True)
s_box_end.add_state(lambda x:True, s_stream, follow=True)
s_text = TextState('text')
text_start_f = lambda t: t.find('BT\n') >=0
text_end_f = lambda t: t.find('ET\n') >=0
s_box_wait.add_state(text_start_f, s_text)
s_text.add_state(text_end_f, s_box_wait)
ctx.run(f)
return ctx._texts
# <codecell>
from operator import itemgetter, attrgetter
class Parser(object):
def __init__(self, t_list):
s = sorted(t_list, key=lambda x: float(x._x))
self._text_objs = sorted(s, key=lambda x: float(x._y), reverse=True)
self._idx = 0
self._data = {'votes':[]}
self._prev = None
self._d_x = 2.0
self._d_y = 2.0
def __str__(self):
return self._data.__str__()
def fill(self, move_x, move_y, field, obj=None, parser=None):
assert move_x == 0 or move_y == 0
curr = self._text_objs[self._idx]
prev = self._prev
if self._prev:
if move_x>0:
assert float(curr._x) >= float(prev._x)+self._d_x, "{} is NOT more to right than {}".format(curr, prev)
if move_y>0:
assert float(curr._y) <= float(prev._y)-self._d_y, "{} is NOT lower than {}".format(curr, prev)
val = curr._text
if parser != None:
val = parser(val)
if obj != None:
obj[field] = val
else:
self._data[field] = val
self._prev = curr
self._idx += 1
def maybe_fill(self, move_x, move_y, field, cond=None, obj=None, parser=None):
assert move_x == 0 or move_y == 0
if self._idx >= len(self._text_objs):
return False
curr = self._text_objs[self._idx]
|
prev = self._prev
if cond != None and not cond(curr._text):
return False
r_val = True
if self._prev:
if move_x>0:
r_val = float(curr._x) >= float(prev._x)+self._d_x
if move_y>0:
r
|
_val = float(curr._y) <= float(prev._y)-self._d_y
self._prev = curr
self._idx += 1
if not r_val:
return False
val = curr._text
if parser != None:
try:
val = parser(val)
|
turbulenz/turbulenz_local
|
turbulenz_local/lib/deploy.py
|
Python
|
mit
| 37,875
| 0.002139
|
# Copyright (c) 2010-2014 Turbulenz Limited
"""
Controller class for deploying a game
"""
from urllib3.exceptions import HTTPError, SSLError
from simplejson import dump as json_dump, load as json_load, loads as json_loads, JSONDecodeError
from os import stat, sep, error, rename, remove, makedirs, utime, access, R_OK, walk
from os.path import join, basename, abspath, splitext, sep, isdir, dirname
from errno import EEXIST
from stat import S_ISREG
from glob import iglob
from logging import getLogger
from mimetypes import guess_type
from gzip import GzipFile
from shutil import rmtree
from Queue import Queue
from threading import Thread
from time import time
from subprocess import Popen, PIPE
# pylint: disable=F0401
from poster.encode import gen_boundary, get_headers, MultipartParam
# pylint: enable=F0401
from turbulenz_local.tools import get_absolute_path, get_7zip_path
from turbulenz_tools.utils.hash import hash_file_sha256_md5, hash_file_sha256, hash_file_md5
from turbulenz_local import __version__
LOG = getLogger(__name__)
def _update_file_mtime(file_path, mtime):
# We round mtime up to the next second to avoid precision problems with floating point values
mtime = long(mtime) + 1
utime(file_path, (mtime, mtime))
def _get_upload_file_token(index, filename):
# We build the upload token using an index and the file extension since the hub doesn't care
# about the actual filename only the extension
return '%d%s' % (index, splitext(filename)[1])
def _get_cached_file_name(file_name, file_hash, file_length):
return '%s%x%s' % (file_hash, file_length, splitext(file_name)[1])
# pylint: disable=R0902
class Deployment(object):
_batch_checks = True
_empty_meta_data = {'length': 0,
'hash': '',
'md5': ''}
_base_check_url = '/dynamic/upload/check?'
_check_url_format = 'name=%s&hash=%s&length=%d'
_cached_hash_folder = '__cached_hashes__'
_cached_hash_ttl = (30 * 24 * 60 * 60) # 30 days
_do_not_compress = set([ 'ogg',
'png',
'jpeg',
'jpg',
'gif',
'ico',
'mp3',
'wav',
'swf',
'webm',
'mp4',
'm4v',
'm4a',
'aac' ])
_directories_to_ignore = set([ '.git',
'.hg',
'.svn' ])
def __init__(self, game, hub_pool, hub_project, hub_version, hub_versiontitle, hub_cookie, cache_dir):
self.path = abspath(get_absolute_path(game.path))
self.plugin_main = game.plugin_main
self.canvas_main = game.canvas_main
self.flash_main = game.flash_main
self.mapping_table = game.mapping_table
self.files = game.deploy_files.items
self.engine_version = game.engine_version
self.is_multiplayer = game.is_multiplayer
self.aspect_ratio = game.aspect_ratio
self.cache_dir = cache_dir
self.game_cache_dir = join(abspath(cache_dir), game.slug)
self.stopped = False
self.hub_project = hub_project
self.hub_version = hub_version
self.hub_versiontitle = hub_versiontitle
self.hub_session = None
self.hub_pool = hub_pool
self.hub_cookie = hub_cookie
self.hub_timeout = 200
self.total_files = 0
self.num_files = 0
self.num_bytes = 0
self.uploaded_files = 0
self.uploaded_bytes = 0
self.done = False
self.error = None
try:
makedirs(self.get_gzip_dir())
except OSError as e:
if e.errno != EEXIST:
LOG.error(str(e))
def get_meta_data_path(self):
return self.game_cache_dir + '.json.gz'
def get_gzip_dir(self):
return self.game_cache_dir.replace('\\', '/')
def deploy(self, ultra=False):
self.done = self.upload_files(ultra)
if self.hub_session:
headers = {'Cookie': self.hub_cookie}
fields = {'session': self.hub_session}
try:
if self.done:
self.hub_pool.request('POST',
'/dynamic/upload/end',
fields=fields,
headers=headers,
r
|
edirect=False,
retries=5,
timeout=self.hub_timeout)
else:
self.hub_pool.request('POST',
'/dynami
|
c/upload/cancel',
fields=fields,
headers=headers,
redirect=False,
retries=5,
timeout=self.hub_timeout)
except (HTTPError, SSLError) as e:
LOG.error(e)
def cancel(self):
self.stopped = True
self.error = 'Canceled.'
def stop(self, error_msg):
self.stopped = True
self.error = error_msg
def read_metadata_cache(self):
try:
file_name = self.get_meta_data_path()
gzip_file = GzipFile(filename=file_name,
mode='rb')
meta_data_cache = json_load(gzip_file)
gzip_file.close()
cache_time = stat(file_name).st_mtime
except (IOError, ValueError):
cache_time = -1
meta_data_cache = {}
return cache_time, meta_data_cache
def write_metadata_cache(self, meta_data, force_mtime):
try:
file_path = self.get_meta_data_path()
gzip_file = GzipFile(filename=file_path,
mode='wb',
compresslevel=9)
json_dump(meta_data, gzip_file, separators=(',', ':'), sort_keys=True)
gzip_file.close()
if force_mtime > 0:
_update_file_mtime(file_path, force_mtime)
except (IOError, OSError):
pass
def delete_unused_cache_files(self, meta_data, meta_data_cache):
old_files_to_delete = (set(meta_data_cache.iterkeys()) - set(meta_data.iterkeys()))
if old_files_to_delete:
gzip_cache_dir = self.get_gzip_dir()
for relative_path in old_files_to_delete:
cache_file_name = '%s/%s.gz' % (gzip_cache_dir, relative_path)
if access(cache_file_name, R_OK):
remove(cache_file_name)
def batch_check_files(self, files, checked_queue_put):
urlopen = self.hub_pool.urlopen
base_url = self._base_check_url
url_format = self._check_url_format
get_upload_token = _get_upload_file_token
timeout = self.hub_timeout
if self._batch_checks:
query = '&'.join((url_format % (get_upload_token(i, f[1]), f[3], f[2])) for i, f in enumerate(files))
r = urlopen('GET',
base_url + query,
redirect=False,
assert_same_host=False,
timeout=timeout)
if r.status == 200:
# pylint: disable=E1103
missing_files = set(json_loads(r.data).get('missing', []))
# pylint: enable=E1103
for i, f in enumerate(files):
if get_upload_token(i, f[1]) in missing_files:
# Update meta data cache and upload
checked_queue_put(f)
else:
# Only needs to update meta data cache
checked_queue_put((f[1], f[2], f[3], f[4], f[5]))
return
else:
f = files.pop
|
Kobzol/debug-visualizer
|
gui/config.py
|
Python
|
gpl-3.0
| 2,272
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Jakub Beranek
#
# This file is part of Devi.
#
# Devi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Devi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
|
License
# along with Devi. If not, see <http://www.gnu.org/licenses/>.
#
import os
import paths
from gi.repository import Gtk
from gi.repository import Gdk
class Config(object):
UI_DIR = os.path.join(paths.DIR_ROOT, paths.DIR_RES, "gui")
GUI_MAIN_WINDOW_MENU = None
GUI_MAIN_WINDOW_TOOLBAR = None
GUI_IO_CONSOLE = None
GUI
|
_MEMORY_CANVAS_TOOLBAR = None
GUI_STARTUP_INFO_DIALOG = None
@staticmethod
def get_gui_builder(path):
return Gtk.Builder.new_from_file(os.path.join(Config.UI_DIR,
path + ".glade"))
@staticmethod
def preload():
Config.UI_DIR = os.path.join(paths.DIR_ROOT, paths.DIR_RES, "gui")
Config.GUI_MAIN_WINDOW_MENU = Config.get_gui_builder(
"main_window_menu")
Config.GUI_MAIN_WINDOW_TOOLBAR = Config.get_gui_builder(
"main_window_toolbar")
Config.GUI_IO_CONSOLE = Config.get_gui_builder(
"io_console")
Config.GUI_MEMORY_CANVAS_TOOLBAR = Config.get_gui_builder(
"memory_canvas_toolbar")
Config.GUI_STARTUP_INFO_DIALOG = Config.get_gui_builder(
"startup_info_dialog"
)
provider = Gtk.CssProvider.new()
screen = Gdk.Screen.get_default()
Gtk.StyleContext.add_provider_for_screen(
screen,
provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
provider.load_from_path(os.path.join(paths.DIR_RES,
"css",
"style.css"))
|
clejeu03/EWP
|
core/sessionManager/Project.py
|
Python
|
mit
| 3,334
| 0.007199
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from core.sessionManager.Video import Video
class Project(object):
def __init__(self, name, path):
super(Project, self).__init__()
self._name = name
self._path = path
self._videos = []
self._sketchBoardVideos = [] #/!\ Unique variables : means can't add twice the same video on the sketch board
#TODO : the possibility of adding multiple times the same video to the sketch board
def addVideo(self, path):
"""
Create a Video class from the given file and add the video to the project.
:param path: the absolute path to the video
"""
if len(self._videos) < 20 :
video = Video(path)
self._videos.append(video)
else :
#TODO : message saying that it's the last video that can be added
pass
def suppressVideo(self, video):
"""
Suppress a video from the current project
"""
if isinstance(video, Video):
for item in self._videos:
print "item : " + str(type(item)) + "video : " + str(type(video))
if item == video:
self._videos.remove(video)
#If the video is on the sketchboard too, remove it
if video in self._sketchBoardVideos:
self._sketchBoardVideos.remove(video)
else:
raise TypeError("Trying to suppress an item which is not of type Video")
def newSketchBoardVideo(self, video):
""" A video need to be added to the sketchboard view, so this function update the model first of all. """
if video in self._videos:
self._sketchBoardVideos.append(video)
else:
raise Exception("Error : video not recognized.")
def removeSketchBoardVideo(self, video):
""" A video need to be removed from the sketch board view, so this function remove it from the model first to update the
|
view after. """
if video in self._sketchBoardVideos:
self._sketchBoardVideos.remove(video)
else :
raise Exception("Can't find the video")
# ---------------------- BUILT-IN FUNCTIONS ------------------------- #
def __str__(self):
#String representation of the class
describe = 'Project => name : ' + str(self._nam
|
e) + ' / path : ' + str(self._path) + ' / videos : ' + str(len(self._videos))
video = str(self._videos[0])
sketchBoardVideo = str(self._sketchBoardVideos[0])
return describe + video + sketchBoardVideo
def __eq__(self, other):
#Stands for the == compare
if self._name == other.getName() and self._path == other.getPath():
return True
else :
return False
def __ne__(self, other):
#Stands for the != compare
if self._name != other.getName() or self._path != other.getPath():
return True
else :
return False
# ---------------------- GETTER / SETTER ------------------------- #
def getName(self):
return self._name
def getPath(self):
return self._path
def getVideos(self):
return self._videos
def getSketchBoardVideos(self):
return self._sketchBoardVideos
|
spaceone/tehbot
|
tehbot/plugins/wolframalpha/__init__.py
|
Python
|
mit
| 2,727
| 0.002567
|
from tehbot.plugins import *
import tehbot.plugins as plugins
import wolframalpha
import prettytable
class WolframAlphaPlugin(StandardPlugin):
def __init__(self):
StandardPlugin.__init__(self)
self.parser.add_argument("query", nargs="+")
def initialize(self, dbconn):
StandardPlugin.initialize(self, dbconn)
try:
self.client = wolframalpha.Client(self.settings["wolframalpha_app_id"])
except:
self.settings["enabled"] = False
@staticmethod
def remove_empty_columns(table, nr_cols):
t = [[] for n in range(len(table))]
for i in range(nr_cols):
keep = False
for line in table:
if line[i]:
keep = True
break
if keep:
for j in range(len(table)):
t[j].append(table[j][i])
return t
@staticmethod
def format_table(s):
table = [[y.strip() for y in x.strip().split("|")] for x in s.splitlines()]
nr_cols = max(map(len, table))
table = [[x[i] if i < len(x) else "" for i in range(nr_cols)] for x in table]
table = WolframAlphaPlugin.remove_e
|
mpty_columns(table, nr_cols)
if len(table) < 2:
s2 = " | ".join(table[0])
return s2
pt = prettytable.PrettyTable()
pt.header = False
for line in table:
pt.add_row(line)
s = pt.get_string()
return s
def execute(self, connection, event, extra, dbconn):
try:
pargs = self.parser.parse_args(extra["args"])
if self.parser.help_reques
|
ted:
return self.parser.format_help().strip()
except Exception as e:
return u"Error: %s" % str(e)
txt = "\x0303[Wolfram|Alpha]\x03 "
try:
res = None
misc = []
for p in self.client.query(" ".join(pargs.query)).pods:
if p.id == "Input":
inp = " | ".join(p.text.splitlines())
elif p.id == "Result" and p.text:
res = self.format_table(p.text)
elif p.title and p.text:
misc.append("%s\n%s" % (p.title, self.format_table(p.text)))
txt += inp + "\n"
if res:
txt += res + "\n"
elif misc:
txt += "\n".join(misc)
else:
raise NameError
except (NameError, AttributeError):
txt += "No results."
except Exception as e:
txt = "Error: %s" % e
return plugins.shorten(txt, 450)
register_plugin(["wolframalpha", "wa"], WolframAlphaPlugin())
|
Outernet-Project/librarian-analytics
|
librarian_analytics/data.py
|
Python
|
gpl-3.0
| 4,521
| 0
|
import calendar
import datetime
import functools
import hashlib
import logging
import uuid
import user_agents
from bitpack import BitStream, BitField, register_data_type
from bitpack.utils import pack, unpack
from bottle_utils.common import to_bytes
from pytz import utc
FIELD_SEPARATOR = '$'
DESKTOP = 1
PHONE = 2
TABLET = 3
OTHER = 0
AGENT_TYPES = [DESKTOP, PHONE, TABLET, OTHER]
ACTIONS = ['html', 'image', 'audio', 'video', 'folder', 'download', 'file']
FILE_INDEX = ACTIONS.index('file')
# !!! DO NOT CHANGE THE ORDER OF ELEMENTS IN THE OS_FAMILIES LIST !!!
OS_FAMILIES = [
'Android',
'Arch Linux',
'BackTrack',
'Bada',
'BlackBerry OS',
'BlackBerry Tablet OS',
'CentOS',
'Chrome OS',
'Debian',
'Fedora',
'Firefox OS',
'FreeBSD',
'Gentoo',
'Intel Mac OS',
'iOS',
'Kindle',
'Linux',
'Linux Mint',
'Lupuntu',
'Mac OS',
'Mac OS X',
'Mageia',
'Mandriva',
'NetBSD',
'OpenBSD',
'openSUSE',
'PCLinuxOS',
'PPC Mac OS',
'Puppy',
'Red Hat',
'Slackware',
'Solaris',
'SUSE',
'Symbian OS',
'Ubuntu',
'Windows 10',
'Windows 2000',
'Windows 7',
'Windows 8',
'Windows 8.1',
'Windows 95',
'Windows 98',
'Windows CE',
'Windows ME',
'Windows Mobile',
'Windows Phone',
'Windows RT',
'Windows RT 8.1',
'Windows Vista',
'Windows XP',
]
def from_utc_timestamp(timestamp):
"""Converts the passed-in unix UTC timestamp into a datetime object."""
timestamp = float(unpack('>i', timestamp))
dt = datetime.datetime.utcfromtimestamp(timestamp)
return dt.replace(tzinf
|
o=utc)
def to_utc_timestamp(dt):
"""Converts the passed-in datetime object into a unix UTC timestamp."""
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
msg = "Naive datetime object passed. It is assumed that it's in UTC."
logging.warning(msg)
elif dt.tzinfo != utc:
# local datetime with tzinfo
return pack('>i', calendar.timegm(dt.utctimetuple()))
return pack('>i', calendar.timegm(dt.timetup
|
le()))
register_data_type('timestamp', to_utc_timestamp, from_utc_timestamp)
def generate_device_id():
return uuid.uuid4().hex
def generate_user_id():
return uuid.uuid4().hex[:8]
def characterize_agent(ua_string):
ua = user_agents.parse(ua_string)
os_fam = ua.os.family
if ua.is_pc:
return (os_fam, DESKTOP)
elif ua.is_tablet:
return (os_fam, TABLET)
elif ua.is_mobile:
return (os_fam, PHONE)
return (os_fam, OTHER)
def serialize_cookie_data(*args):
return FIELD_SEPARATOR.join(str(a) for a in args)
def deserialize_cookie_data(data):
try:
user_id, os_fam, agent_type = data.split(FIELD_SEPARATOR)
agent_type = int(agent_type)
assert agent_type in [DESKTOP, TABLET, PHONE, OTHER], 'Invalid type?'
assert len(user_id) == 8, 'Invalid user ID?'
return user_id, os_fam, agent_type
except (ValueError, TypeError):
return None
def round_to_nearest(n):
return round(n * 2) / 2
def get_timezone_table(start=-12, end=14, step=0.5):
tz_range = range(start, end)
return functools.reduce(lambda x, i: x + [i, i + step], tz_range, [])
class StatBitStream(BitStream):
start_marker = 'OHD'
end_marker = 'DHO'
user_id = BitField(width=32, data_type='hex')
timestamp = BitField(width=32, data_type='timestamp')
timezone = BitField(width=6, data_type='integer')
path = BitField(width=128, data_type='hex')
action = BitField(width=4, data_type='integer')
os_family = BitField(width=6, data_type='integer')
agent_type = BitField(width=2, data_type='integer')
def preprocess_path(self, value):
return hashlib.md5(to_bytes(value)).hexdigest()
def preprocess_timezone(self, value):
rounded_tz = round_to_nearest(value)
return get_timezone_table().index(rounded_tz)
def postprocess_timezone(self, value):
return get_timezone_table()[value]
def preprocess_action(self, value):
try:
return ACTIONS.index(value)
except ValueError:
# when uncategorized mime type is used, fall back to file
return FILE_INDEX
def postprocess_action(self, value):
return ACTIONS[value]
def preprocess_os_family(self, value):
return OS_FAMILIES.index(value)
def postprocess_os_family(self, value):
return OS_FAMILIES[value]
|
karies/root
|
tutorials/dataframe/df103_NanoAODHiggsAnalysis.py
|
Python
|
lgpl-2.1
| 17,701
| 0.00661
|
## \file
## \ingroup tutorial_dataframe
## \notebook -draw
## \brief An example of complex analysis with RDataFrame: reconstructing the Higgs boson.
##
## This tutorial is a simplified but yet complex example of an analysis reconstructing the Higgs boson decaying to two Z
## bosons from events with four leptons. The data and simulated events are taken from CERN OpenData representing a
## subset of the data recorded in 2012 with the CMS detector at the LHC. The tutorials follows the Higgs to four leptons
## analysis published on CERN Open Data portal ([10.7483/OPENDATA.CMS.JKB8.RR42](http://opendata.cern.ch/record/5500)).
## The resulting plots show the invariant mass of the selected four lepton systems in different decay modes (four muons,
## four electrons and two of each kind) and in a combined plot indicating the decay of the Higgs boson with a mass of
## about 125 GeV.
##
## The following steps are performed for each sample with data and simulated events in order to reconstruct the Higgs
## boson from the selected muons and electrons:
## 1.
|
Select interesting events with multiple cuts on event properties, e.g., number of leptons, kinematics of the
## leptons and quality of the t
|
racks.
## 2. Reconstruct two Z bosons of which only one on the mass shell from the selected events and apply additional cuts on
## the reconstructed objects.
## 3. Reconstruct the Higgs boson from the remaining Z boson candidates and calculate its invariant mass.
##
## Another aim of this version of the tutorial is to show a way to blend C++ and Python code. All the functions that
## make computations on data to define new columns or filter existing ones in a precise way, better suited to be written
## in C++, have been moved to a header that is then declared to the ROOT C++ interpreter. The functions that instead
## create nodes of the computational graph (e.g. Filter, Define) remain inside the main Python script.
##
## The tutorial has the fast mode enabled by default, which reads the data from already skimmed
## datasets with a total size of only 51MB. If the fast mode is disabled, the tutorial runs over
## the full dataset with a size of 12GB.
##
## \macro_image
## \macro_code
## \macro_output
##
## \date July 2019
## \author Stefan Wunsch (KIT, CERN), Vincenzo Eduardo Padulano (UniMiB, CERN)
import ROOT
import os
# Enable multi-threading
ROOT.ROOT.EnableImplicitMT()
# Include necessary header
higgs_header_path = os.path.join(os.sep, str(ROOT.gROOT.GetTutorialDir()) + os.sep, "dataframe" + os.sep,
"df103_NanoAODHiggsAnalysis_python.h")
ROOT.gInterpreter.Declare('#include "{}"'.format(higgs_header_path))
# Python functions
def reco_higgs_to_2el2mu(df):
"""Reconstruct Higgs from two electrons and two muons"""
# Filter interesting events
df_base = selection_2el2mu(df)
# Compute masses of Z systems
df_z_mass = df_base.Define("Z_mass", "compute_z_masses_2el2mu(Electron_pt, Electron_eta, Electron_phi,"
" Electron_mass, Muon_pt, Muon_eta, Muon_phi, Muon_mass)")
# Cut on mass of Z candidates
df_z_cut = filter_z_candidates(df_z_mass)
# Reconstruct H mass
df_h_mass = df_z_cut.Define("H_mass", "compute_higgs_mass_2el2mu(Electron_pt, Electron_eta, Electron_phi,"
" Electron_mass, Muon_pt, Muon_eta, Muon_phi, Muon_mass)")
return df_h_mass
def selection_2el2mu(df):
"""Select interesting events with two electrons and two muons"""
df_ge2el2mu = df.Filter("nElectron>=2 && nMuon>=2", "At least two electrons and two muons")
df_eta = df_ge2el2mu.Filter("All(abs(Electron_eta)<2.5) && All(abs(Muon_eta)<2.4)", "Eta cuts")
df_pt = df_eta.Filter("pt_cuts(Muon_pt, Electron_pt)", "Pt cuts")
df_dr = df_pt.Filter("dr_cuts(Muon_eta, Muon_phi, Electron_eta, Electron_phi)", "Dr cuts")
df_iso = df_dr.Filter("All(abs(Electron_pfRelIso03_all)<0.40) && All(abs(Muon_pfRelIso04_all)<0.40)",
"Require good isolation")
df_el_ip3d = df_iso.Define("Electron_ip3d_el", "sqrt(Electron_dxy*Electron_dxy + Electron_dz*Electron_dz)")
df_el_sip3d = df_el_ip3d.Define("Electron_sip3d_el",
"Electron_ip3d_el/sqrt(Electron_dxyErr*Electron_dxyErr + "
"Electron_dzErr*Electron_dzErr)")
df_el_track = df_el_sip3d.Filter("All(Electron_sip3d_el<4) && All(abs(Electron_dxy)<0.5) &&"
" All(abs(Electron_dz)<1.0)",
"Electron track close to primary vertex with small uncertainty")
df_mu_ip3d = df_el_track.Define("Muon_ip3d_mu", "sqrt(Muon_dxy*Muon_dxy + Muon_dz*Muon_dz)")
df_mu_sip3d = df_mu_ip3d.Define("Muon_sip3d_mu",
"Muon_ip3d_mu/sqrt(Muon_dxyErr*Muon_dxyErr + Muon_dzErr*Muon_dzErr)")
df_mu_track = df_mu_sip3d.Filter("All(Muon_sip3d_mu<4) && All(abs(Muon_dxy)<0.5) && All(abs(Muon_dz)<1.0)",
"Muon track close to primary vertex with small uncertainty")
df_2p2n = df_mu_track.Filter("Sum(Electron_charge)==0 && Sum(Muon_charge)==0",
"Two opposite charged electron and muon pairs")
return df_2p2n
def reco_higgs_to_4mu(df):
"""Reconstruct Higgs from four muons"""
# Filter interesting events
df_base = selection_4mu(df)
# Reconstruct Z systems
df_z_idx = df_base.Define("Z_idx", "reco_zz_to_4l(Muon_pt, Muon_eta, Muon_phi, Muon_mass, Muon_charge)")
# Cut on distance between muons building Z systems
df_z_dr = df_z_idx.Filter("filter_z_dr(Z_idx, Muon_eta, Muon_phi)", "Delta R separation of muons building Z system")
# Compute masses of Z systems
df_z_mass = df_z_dr.Define("Z_mass", "compute_z_masses_4l(Z_idx, Muon_pt, Muon_eta, Muon_phi, Muon_mass)")
# Cut on mass of Z candidates
df_z_cut = filter_z_candidates(df_z_mass)
# Reconstruct H mass
df_h_mass = df_z_cut.Define("H_mass", "compute_higgs_mass_4l(Z_idx, Muon_pt, Muon_eta, Muon_phi, Muon_mass)")
return df_h_mass
def selection_4mu(df):
"""Select interesting events with four muons"""
df_ge4m = df.Filter("nMuon>=4", "At least four muons")
df_iso = df_ge4m.Filter("All(abs(Muon_pfRelIso04_all)<0.40)", "Require good isolation")
df_kin = df_iso.Filter("All(Muon_pt>5) && All(abs(Muon_eta)<2.4)", "Good muon kinematics")
df_ip3d = df_kin.Define("Muon_ip3d", "sqrt(Muon_dxy*Muon_dxy + Muon_dz*Muon_dz)")
df_sip3d = df_ip3d.Define("Muon_sip3d", "Muon_ip3d/sqrt(Muon_dxyErr*Muon_dxyErr + Muon_dzErr*Muon_dzErr)")
df_pv = df_sip3d.Filter("All(Muon_sip3d<4) && All(abs(Muon_dxy)<0.5) && All(abs(Muon_dz)<1.0)",
"Track close to primary vertex with small uncertainty")
df_2p2n = df_pv.Filter("nMuon==4 && Sum(Muon_charge==1)==2 && Sum(Muon_charge==-1)==2",
"Two positive and two negative muons")
return df_2p2n
def filter_z_candidates(df):
"""Apply selection on reconstructed Z candidates"""
df_z1_cut = df.Filter("Z_mass[0] > 40 && Z_mass[0] < 120", "Mass of first Z candidate in [40, 120]")
df_z2_cut = df_z1_cut.Filter("Z_mass[1] > 12 && Z_mass[1] < 120", "Mass of second Z candidate in [12, 120]")
return df_z2_cut
def reco_higgs_to_4el(df):
"""Reconstruct Higgs from four electrons"""
# Filter interesting events
df_base = selection_4el(df)
# Reconstruct Z systems
df_z_idx = df_base.Define("Z_idx",
"reco_zz_to_4l(Electron_pt, Electron_eta, Electron_phi, Electron_mass, Electron_charge)")
# Cut on distance between Electrons building Z systems
df_z_dr = df_z_idx.Filter("filter_z_dr(Z_idx, Electron_eta, Electron_phi)",
"Delta R separation of Electrons building Z system")
# Compute masses of Z systems
df_z_mass = df_z_dr.Define("Z_mass",
"compute_z_masses_4l(Z_idx, Electron_pt, Electron_eta, Electron_phi, Electron_mass)")
# Cut on mass of Z candidates
df_z_cut = filter_z_candidates(df_z_mass)
# Reconstruct
|
alkamid/lab-scripts
|
matplotlib-scripts/matplotFF.py
|
Python
|
gpl-2.0
| 9,814
| 0.005199
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy.interpolate
from PIL import Image
matplotlib.use('Qt5Agg')
class matplotFF():
"""A class for plotting far-field measurements of lasers. It requires
the 'x z signal' format, but supports stitched measurements — the data
can be simply added at the end of a file
"""
def __init__(self, fig, BaseFilename, title='', xLen=0, zLen=0, stitch=True, distance=None, angular_direction=None,
output_filename=None):
self.BaseFilename = BaseFilename
if output_filename is None:
self.output_filename = BaseFilename
else:
self.output_filename = output_filename
self.title = title
self.fig = fig
self.rawData = np.loadtxt(self.BaseFilename)
self.zRaw = self.rawData[:,0]
self.xRaw = self.rawData[:,1]
self.sigRaw = self.rawData[:,2]
self.xlim = (None, None)
self.zlim = (None, None)
z_pixdim = (self.zRaw.max() - self.zRaw.min()) / len(self.zRaw)
x_pixdim = (self.xRaw.max() - self.xRaw.min()) / len(self.xRaw)
self.pixdim = (x_pixdim, z_pixdim)
self.distance = distance
self.angular_direction = angular_direction
if distance is not None:
if angular_direction == 'x':
self.xRaw = 180/(2*np.pi)*np.arctan(self.xRaw/distance)
elif angular_direction == 'z':
self.zRaw = 180/(2*np.pi)*np.arctan(self.zRaw/distance)
# figure out the number of steps for Z and X
stage_tolerance = 0.05 # stages don't always move to the same
# place, so numerical positions might differ slightly
if xLen == 0 and zLen == 0:
z_unique_vals = [self.zRaw[0]]
x_unique_vals = [self.xRaw[0]]
# count unique values of Z/X values in the data file
# (allows for non-rectangular data, i.e. for stiching together
# patches)
for i in range(len(self.zRaw)):
if sum(abs(z_unique_vals-self.zRaw[i]) > stage_tolerance) == len(z_unique_vals):
z_unique_vals.append(self.zRaw[i])
for i in range(len(self.xRaw)):
if sum(abs(x_unique_vals-self.xRaw[i]) > stage_tolerance) == len(x_unique_vals):
x_unique_vals.append(self.xRaw[i])
self.xLen = len(x_unique_vals)
self.zLen = len(z_unique_vals)
else:
self.xLen = xLen
self.zLen = zLen
# fill in zeros if we are plotting a stitched far field
if stitch:
self.x = np.ndarray((self.zLen,self.xLen))
self.z = np.ndarray((self.zLen,self.xLen))
self.signal = np.zeros((self.zLen, self.xLen))
for iz, z in enumerate(so
|
rted(z_unique_vals)):
for ix, x in enumerate(sorted(x_unique_vals)):
self.x[iz][ix] = x
self.z[iz][ix] = z
for i in zip(self.xRaw, self.zRaw, self.sigRaw):
if (abs(i[0]-x) < stage_tolerance) and (abs(i[1]-z) < stage_tolerance):
self.si
|
gnal[iz][ix] = i[2]
break
else:
self.x = self.xRaw.reshape((self.zLen,self.xLen))
self.z = self.zRaw.reshape((self.zLen,self.xLen))
self.signal = self.sigRaw.reshape((self.zLen,self.xLen))
# normalise the signal to [0, 1]
self.signal -= np.min(self.signal)
self.signal /= np.max(self.signal)
def trim(self, xmin=None, xmax=None, zmin=None, zmax=None):
self.x = self.x[zmin:zmax,xmin:xmax]
self.z = self.z[zmin:zmax,xmin:xmax]
self.signal = self.signal[zmin:zmax,xmin:xmax]
# normalise the signal to [0, 1]
self.signal -= np.min(self.signal)
self.signal /= np.max(self.signal)
def plotLine(self):
'''plots the cross section of far-field (averaged all points at a set z position)'''
av = [np.mean(row) for row in self.signal]
zLine = [z[0] for z in self.z]
plt.plot(av, zLine, color='#1b9e77')
def plotLineAngle(self, distance=0, phaseShift=0, mean=True, angleData=False, color='#1b9e77', label=''):
"""plots the cross section of far-field (averaged all points at a set z position)
Args:
distance: distance of the detector in mm (for conversion into theta)
phaseShift: angle in radians. Sometimes we want to shift the farfield by pi to get the plot on the other side of the polar coordinate system
angleData: if True, it means that the data were collected with a rotation stage and therefore do not have to be converted into angle
"""
if mean:
intens = np.mean(self.signal, axis=0)
else:
intens = self.signal[-5]
intens = np.mean(self.signal)
if angleData:
theta = self.x[0]*np.pi/180
else:
theta = [np.arctan(z[0]/distance)+phaseShift for z in self.z]
# normalize values to [0,1]
intens -= np.min(intens)
intens /= np.max(intens)
self.ax1 = self.fig.add_subplot(111, polar=True)
self.ax1.plot(theta, intens, color=color, linewidth=2.0, label=label)
#self.ax1.set_theta_offset(-np.pi/2)
self.ax1.get_yaxis().set_visible(False)
def plot(self, rotate=False):
self.ax1 = self.fig.add_subplot(111)
self.ax1.margins(x=0)
self.ax1.set_xlim(self.x.min(), self.x.max())
plt.pcolormesh(self.x,self.z,self.signal, edgecolors='face')
self.fig.suptitle(self.title, y=0.98, weight='bold')
self.fig.subplots_adjust(top=0.86)
self.ax1.tick_params(labelright=True, labeltop=True)
return self.fig
def crosssection_plot(self, axis=0, subplot=None):
cs = np.mean(self.signal, axis=axis)
cs /= np.max(cs)
self.ax_cutline[axis] = self.fig.add_subplot(subplot)
ax = self.ax_cutline[axis]
xlim = self.ax1.get_xlim()
ylim = self.ax1.get_ylim()
ratio = abs(xlim[1]-xlim[0])/abs(ylim[1]-ylim[0])
if axis == 0:
ax.plot(self.x[0, :], cs)
ax.set_aspect(ratio*7)
ax.set_xlim(xlim)
ax.set_ylim([0, 1.05])
ax.xaxis.set_label_position('top')
ax.xaxis.set_ticks_position('top')
ax.set_xlabel(r"$\theta$ / degrees")
ax.set_ylabel("intensity / arb. u.", fontsize=7)
self.ax1.xaxis.label.set_visible(False)
elif axis == 1:
ax.plot(cs, self.z[:, 0])
ax.set_xlim([1.05, 0])
ax.set_ylim(ylim)
ax.set_aspect(ratio/7)
if self.distance is not None and self.angular_direction == 'z':
ax.set_ylabel("$\phi$ / degrees")
else:
ax.set_ylabel("Z / mm")
ax.set_xlabel("intensity / arb. u.", fontsize=7)
self.ax1.yaxis.label.set_visible(False)
#self.gs1.update(wspace=0.025, hspace=0.05) # set the spacing between axes.
def plotInterpolate(self, xPoints, zPoints, rotate=False, origin='lower', cutlines=False):
if not cutlines:
self.ax1 = self.fig.add_subplot(111)
else:
#self.gs1 = gridspec.GridSpec(2, 2)
#self.gs1.update(wspace=0.025, hspace=0.05) # set the spacing between axes.
self.ax1 = self.fig.add_subplot(224)
self.ax_cutline = [None, None]
self.ax1.tick_params(labelright=True, labelbottom=True, labelleft=False, bottom=True, right=True, left=False)
self.ax1.set_xlabel(r"$\theta$ / degrees")
if self.distance is not None and self.angular_direction == 'z':
self.ax1.set_ylabel("$\phi$ / degrees")
else:
self.ax1.set_ylabel("Z / mm")
xi, zi = np.linspace(self.x.min(), self.x.max(), xPoints), np.linspace(self.z.min(), self.z.max(), zPoints)
xi, zi = np.me
|
hayd/pattern
|
pattern/text/nl/__main__.py
|
Python
|
bsd-3-clause
| 521
| 0.001919
|
#### PATTERN | NL | PARSER COMMAND-LINE ##################################
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
##########################################################################
# In Python 2.7+ modules invoked from the command line will look for a
# __main__.py.
from __future__ import absolute_import
fro
|
m .__init__ import co
|
mmandline, parse
commandline(parse)
|
mbouchar/xc2424scan
|
src/xc2424scan/utils/test.py
|
Python
|
gpl-2.0
| 620
| 0.003226
|
#!/usr/bin/python
import socket
HOST = raw_input("enter scanner ip : ")
PORT = 14882
if __name__ == "__main__":
socks = socket.socket()
socks.connect((HOST, PORT))
socks.settimeout(1)
try:
while T
|
rue:
command = ra
|
w_input("# ")
if command != "":
socks.send("%s\n" % command)
try:
data = socks.recv(1024)
print "Received", repr(data)
except socket.timeout:
pass
except KeyboardInterrupt:
pass
except Exception, e:
print e
socks.close()
print "\n"
|
ppizarror/korektor
|
bin/langeditor/_import.py
|
Python
|
gpl-2.0
| 1,200
| 0.003342
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
IMPORT
Permite adaptar
|
un exportado traducido a uno válido para hoa
|
Autor: PABLO PIZARRO @ github.com/ppizarror
Fecha: 2014-2015
Licencia: GPLv2
"""
__author__ = "ppizarror"
# Importación de librerías
import os
import sys
reload(sys)
# noinspection PyUnresolvedReferences
sys.setdefaultencoding('UTF8') # @UndefinedVariable
try:
namearchive = raw_input("Ingrese el nombre del archivo que desea transformar: ").replace(".txt", "")
# noinspection PyArgumentEqualDefault
archivo = open(namearchive + ".txt", "r")
except:
print "El archivo no existe!"
exit()
l = []
nw = []
# noinspection PyUnboundLocalVariable
for i in archivo:
l.append(i)
for j in range(0, len(l), 2):
num = l[j].replace("{", "").replace("}", "").replace("\n", "")
txt = l[j + 1].replace(" ", "|")
linea = num + " // " + txt
nw.append(linea)
print "Archivo importado correctamente"
archivo.close()
# noinspection PyUnboundLocalVariable
archivo2 = open(namearchive + ".txt", "w")
for i in nw:
archivo2.write(i)
archivo2.close()
try:
os.remove("_import.pyc")
except:
pass
|
merelcoin/merelcoin
|
test/functional/rpc_fundrawtransaction.py
|
Python
|
mit
| 34,177
| 0.009509
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes_bi,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
watchonly_vout = find_vout_for_address(self.nodes[0], watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": watchonly_txid, "vout": watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
#############################
|
#
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2]
|
.createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(input
|
icomms/wqmanager
|
apps/receiver/__init__.py
|
Python
|
bsd-3-clause
| 1,031
| 0.007759
|
import logging
from django.http import HttpResponse
from receiver.submitresponse import SubmitResponse
def duplicate_attachment(way_handled, additional_params):
'''Return a custom http response associated the handling
of the xform. In this case, telling the sender that
they submitted a duplicate
'''
try:
# NOTE: this possibly shouldn't be a "200" code, but it is for
# now because it's not clear how JavaRosa will handle 202.
# see: http://code.dimagi.com/JavaRosa/wiki/ServerResponseFormat
response = SubmitResponse(status_code=200, or_status_code=2020,
or_s
|
tatus="Duplicate Submission.",
submit_id=way_handled.submission.id,
**additional_params)
return response.to_response()
except Exception, e:
logging.error("Problem in properly responding to
|
instance data handling of %s" %
way_handled)
|
snegovick/dswarm_simulator
|
path_finding/smoothing_algorithms.py
|
Python
|
gpl-3.0
| 6,317
| 0.008073
|
import math
from map_utils import *
def calc_field_grad(m, pt):
w = len(m[0])
h = len(m)
if pt[0]>=w or pt[0]<0:
return None
if pt[1]>=h or pt[1]<0:
return None
px_s = pt[0]-1
px_e = pt[0]+1
if px_s>=w or px_s<0:
px_s = pt[0]
if px_e>=w or px_e<0:
px_e = pt[0]
dx = m[pt[1]][px_e] - m[pt[1]][px_s]
py_s = pt[1]-1
py_e = pt[1]+1
if py_s>=w or py_s<0:
py_s = pt[1]
if py_e>=w or py_e<0:
py_e = pt[1]
dy = m[py_e][pt[0]] - m[py_s][pt[0]]
return (dx, dy)
def smooth_path(path):
y = [(float(p[0]), float(p[1])) for p in path]
x = [(float(p[0]), float(p[1])) for p in path]
alpha = 0.01
beta = 0.2
tolerance = 0.1
change = tolerance
count = 0
while change>=tolerance:
count += 1
change = 0.0
n_y = [y[0]]
for i in range(1, len(y)-1):
t = (y[i][0], y[i][1])
t_x = y[i][0] + alpha*(2.0*(x[i][0]-y[i][0]))
t_y = y[i][1] + alpha*(2.0*(x[i][1]-y[i][1]))
dx1 = (y[i][0]-y[i-1][0])
dx2 = (y[i+1][0]-y[i][0])
dy1 = (y[i][1]-y[i-1][1])
dy2 = (y[i+1][1]-y[i][1])
vect_1_len = math.sqrt(dx1**2+dy1**2)
vect_2_len = math.sqrt(dx2**2+dy2**2)
# print "vector 1 length:", vect_1_len, "(", "dx1:", dx1, "dy1:", dy1, ")", "vector 2 length:", vect_2_len, "(", "dx2:", dx2, "dy2:", dy2, ")"
total_len = vect_1_len*vect_2_len
if total_len == 0:
dx1 = 0
dy1 = 0
total_len = 1
#t2_x = t_x - beta*(1.0-(dx1*dx2)/(total_len))
#t2_y = t_y - beta*(1.0-(dy1*dy2)/(total_len))
#delta_x = beta*(1.0-(y[i+1][0]*y[i-1][0])/total_len)
#delta_y = beta*(1.0-(y[i+1][1]*y[i-1][1])/total_len)
delta_x = beta*((y[i-1][0] + y[i+1][0] - 2.*y[i][0])/total_len)
delta_y = beta*((y[i-1][1] + y[i+1][1] - 2.*y[i][1])/total_len)
t2_x = t_x + delta_x
t2_y = t_y + delta_y
# print "delta x:", delta_x, "delta y:", delta_y
n_y.append((t2_x, t2_y))
# print "t_x:", t_x, "t_y:", t_y, "t2_x:", t2_x, "t2_y:", t2_y, "new y:", n_y[-1], "old y:", t
change += abs(t[0] - n_y[-1][0])+abs(t[1] - n_y[-1][1])
# print "change:", change
n_y.append(y[-1])
y = n_y
# break
print "smoother took", count, "cycles"
return y
def smooth_path_with_field(path, field_map, units_per_cell):
y = [(float(p[0]), float(p[1])) for p in path]
x = [(float(p[0]), float(p[1])) for p in path]
m = field_map
upc = units_per_cell
alpha = 0.01
beta = 0.2
gamma = 0.25
tolerance = 0.1
step_coeff = 0.8
change = tolerance
count = 0
while change>=tolerance:
count += 1
change = 0.0
n_y = [y[0]]
for i in range(1, len(y)-1):
t = (y[i][0], y[i][1])
t_x = y[i][0] + alpha*(2.0*(x[i][0]-y[i][0]))
t_y = y[i][1] + alpha*(2.0*(x[i][1]-y[i][1]))
dx1 = (y[i][0]-y[i-1][0])
dx2 = (y[i+1][0]-y[i][0])
dy1 = (y[i][1]-y[i-1][1])
dy2 = (y[i+1][1]-y[i][1])
vect_1_len = math.sqrt(dx1**2+dy1**2)
vect_2_len = math.sqrt(dx2**2+dy2**2)
# print "vector 1 length:", vect_1_len, "(", "dx1:", dx1, "dy1:", dy1, ")", "vector 2 length:", vect_2_len, "(", "dx2:", dx2, "dy2:", dy2, ")"
total_len = vect_1_len*vect_2_len
if total_len == 0:
dx1 = 0
dy1 = 0
total_len = 1
#t2_x = t_x - beta*(1.0-(dx1*dx2)/(total_len))
#t2_y = t_y - beta*(1.0-(dy1*dy2)/(total_len))
#delta_x = beta*(1.0-(y[i+1][0]*y[i-1][0])/total_len)
#delta_y = beta*(1.0-(y[i+1][1]*y[i-1][1])/total_len)
delta_x = beta*((y[i-1][0] + y[i+1][0] - 2.*y[i][0])/total_len)
delta_y = beta*((y[i-1][1] + y[i+1][1] - 2.*y[i][1])/total_len)
t_x = t_x + delta_x
t_y = t_y + delta_y
# print "i:", i, type(y), type(m)
pt_x = int(y[i][0])
pt_y = int(y[i][1])
# print y[i][0], y[i][1], "->", pt_x, pt_y, "upc:", upc
dx, dy = calc_field_grad(m, (pt_x, pt_y))
# print "dx, dy:", (dx, dy)
sy = (1 if dy>0 else -1)
sx = (1 if dx>0 else -1)
# print "m at", pt_x, pt_y,":",m[pt_y][pt_x]
dx = gamma*sx*m[pt_y][pt_x]
dy = gamma*sy*m[pt_y][pt_x]
# print "dx, dy:", (dx, dy)
t_x = t_x - dx
t_y = t_y - dy
# print "delta x:", delta_x, "delta y:", delta_y
n_y.append((t_x, t_y))
# print "t_x:", t_x, "t_y:", t_y, "t2_x:", t2_x, "t2_y:", t2_y, "new y:", n_y[-1], "old y:", t
change += abs(t[0] - n_y[-1][0])+abs(t[1] - n_y[-1][1])
# print "change:", change
n_y.append(y[-1])
y = n_y
# break
alpha = alpha*step_coeff
beta = beta*step_coeff
gamma = gamma*step_coeff
print "smoother took", count, "cycles"
return y
|
if __name__=="__main__":
m = [[ 0, 0.1, 0.2, 0.3],
[ 0.1, 0.2, 0.3, 0.4],
[ 0.2, 0.3, 0.4, 0.5],
[ 0.3, 0.4, 0.5, 0.6]]
path = [(0, 3), (1,3), (2, 3), (2, 2), (2, 1), (2, 0), (3, 0)]
pri
|
nt "field gradient:", calc_field_grad(m, (1, 1))
orig_path = path[:]
path = smooth_path_with_field(path, m, 1)
print "after:"
for p in path:
print p
import Image, ImageDraw
scale = 50
size = 2000
offset = 10
im = Image.new('RGBA', (size, size), (255, 255, 255, 255))
draw = ImageDraw.Draw(im)
pt_size = size/100
for p in path:
draw.ellipse((p[0]*scale+offset-pt_size/2, p[1]*scale+offset-pt_size/2, p[0]*scale+offset+pt_size/2, p[1]*scale+offset+pt_size/2), fill=(255, 0, 0, 100))
for p in orig_path:
draw.ellipse((p[0]*scale+offset-pt_size/2, p[1]*scale+offset-pt_size/2, p[0]*scale+offset+pt_size/2, p[1]*scale+offset+pt_size/2), fill=(0, 0, 0, 100))
im.save("out.png", "PNG")
|
crf1111/Bio-Informatics-Learning
|
Bio-StrongHold/src/Enumerating_Unrooted_Binary_Trees.py
|
Python
|
mit
| 3,854
| 0.003114
|
class Node():
def __init__(self, name):
self.name = name
def __str__(self):
if self.name is not None:
return self.name
else:
return "internal_{}".format(id(self))
class Edge():
def __init__(self, node1, node2):
self.nodes = [node1, node2]
def __str__(self):
return "{}--{}".format(*self.nodes)
class Tree():
def __init__(self, nodes=[], edges=[]):
self.nodes = nodes
self.edges = edges
def __str__(self):
return "tree_{} edges: {}".format(id(self), [str(x) for x in self.edges])
def copy(self):
node_conversion = {node: Node(node.name) for node in self.nodes}
new_nodes = list(node_conversion.values())
new_edges = [Edge(node_conversion[edge.no
|
des[0]], node_conversion[edge.nodes[1]]) for edge in self.edges]
new_tree = Tree(new_nodes, new_edges)
return new_tree
def enumerate_trees(leaves):
assert(len(leaves) > 1)
if len(leaves) == 2:
n1,
|
n2 = leaves
t = Tree()
t.nodes = [Node(n1), Node(n2)]
t.edges = [Edge(t.nodes[0], t.nodes[1])]
return [t]
elif len(leaves) > 2:
# get the smaller tree first
old_trees = enumerate_trees(leaves[:-1])
new_leaf_name = leaves[-1]
new_trees = []
# find the ways to add the new leaf
for old_tree in old_trees:
for i in range(len(old_tree.edges)):
new_tree = old_tree.copy()
edge_to_split = new_tree.edges[i]
old_node1, old_node2 = edge_to_split.nodes
# get rid of the old edge
new_tree.edges.remove(edge_to_split)
# add a new internal node
internal = Node(None)
new_tree.nodes.append(internal)
# add the new leaf
new_leaf = Node(new_leaf_name)
new_tree.nodes.append(new_leaf)
# make the three new edges
new_tree.edges.append(Edge(old_node1, internal))
new_tree.edges.append(Edge(old_node2, internal))
new_tree.edges.append(Edge(new_leaf, internal))
# put this new tree in the list
new_trees.append(new_tree)
return new_trees
def newick_format(tree_in):
tree = tree_in.copy()
if len(tree.nodes) == 1:
return "{};".format(tree.nodes[0])
elif len(tree.nodes) == 2:
return "({},{});".format(*tree.nodes)
elif len(tree.nodes) > 2:
# reduce one of the nodes in the tree
for candidate_node in tree.nodes:
# ignore leaves
if candidate_node.name is not None:
continue
adjacent_edges = [edge for edge in tree.edges if candidate_node in edge.nodes]
adjacent_nodes = [node for edge in adjacent_edges for node in edge.nodes if node in edge.nodes and node is not candidate_node]
adjacent_leaves = [node for node in adjacent_nodes if node.name is not None]
# find a node with two leaves
if len(adjacent_leaves) == 2 or len(adjacent_leaves) == 3:
leaf1, leaf2 = adjacent_leaves[0: 2]
edges_to_cut = [edge for edge in adjacent_edges if leaf1 in edge.nodes or leaf2 in edge.nodes]
candidate_node.name = "({},{})".format(leaf1, leaf2)
# remove leaves
tree.nodes.remove(leaf1)
tree.nodes.remove(leaf2)
for edge in edges_to_cut: tree.edges.remove(edge)
# cycle this one again
return newick_format(tree)
if __name__ == '__main__':
leaves = open('data/data.dat').read().split()
trees = enumerate_trees(leaves)
print '\n'.join([newick_format(tree) for tree in trees])
|
yeyanchao/calibre
|
src/calibre/gui2/preferences/look_feel_ui.py
|
Python
|
gpl-3.0
| 19,303
| 0.004403
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/yc/code/calibre/calibre/src/calibre/gui2/preferences/look_feel.ui'
#
# Created: Thu Oct 25 16:54:55 2012
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(820, 519)
Form.setWindowTitle(_("Form"))
self.gridLayout_2 = QtGui.QGridLayout(Form)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.tabWidget = QtGui.QTabWidget(Form)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.gridLayout_9 = QtGui.QGridLayout(self.tab)
self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9"))
self.label_7 = QtGui.QLabel(self.tab)
self.label_7.setText(_("Choose &language (requires restart):"))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_9.addWidget(self.label_7, 2, 0, 1, 1)
self.opt_language = QtGui.QComboBox(self.tab)
self.opt_language.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLengthWithIcon)
self.opt_language.setMinimumContentsLength(20)
self.opt_language.setObjectName(_fromUtf8("opt_language"))
self.gridLayout_9.addWidget(self.opt_language, 2, 1, 1, 1)
self.opt_systray_icon = QtGui.QCheckBox(self.tab)
self.opt_systray_icon.setText(_("Enable system &tray icon (needs restart)"))
self.opt_systray_icon.setObjectName(_fromUtf8("opt_systray_icon"))
self.gridLayout_9.addWidget(self.opt_systray_icon, 3, 0, 1, 1)
self.label_17 = QtGui.QLabel(self.tab)
self.label_17.setText(_("User Interface &layout (needs restart):"))
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_9.addWidget(self.label_17, 1, 0, 1, 1)
self.opt_gui_layout = QtGui.QComboBox(self.tab)
self.opt_gui_layout.setMaximumSize(QtCore.QSize(250, 16777215))
self.opt_gui_layout.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLengthWithIcon)
self.opt_gui_layout.setMinimumContentsLength(20)
self.opt_gui_layout.setObjectName(_fromUtf8("opt_gui_layout"))
self.gridLayout_9.addWidget(self.opt_gui_layout, 1, 1, 1, 1)
self.opt_disable_animations = QtGui.QCheckBox(self.tab)
self.opt_disable_animations.setToolTip(_("Disable all animations. Useful if you have a slow/old computer."))
self.opt_disable_animations.setText(_("Disable &animations"))
self.opt_disable_animations.setObjectName(_fromUtf8("opt_disable_animations"))
self.gridLayout_9.addWidget(self.opt_disable_animations, 3, 1, 1, 1)
self.opt_disable_tray_notification = QtGui.QCheckBox(self.tab)
self.opt_disable_tray_notification.setText(_("Disable ¬ifications in system tray"))
self.opt_disable_tray_notification.setObjectName(_fromUtf8("opt_disable_tray_notification"))
self.gridLayout_9.addWidget(self.opt_disable_tray_notification, 4, 0, 1, 1)
self.opt_show_splash_screen = QtGui.QCheckBox(self.tab)
self.opt_show_splash_screen.setText(_("Show &splash screen at startup"))
self.opt_show_splash_screen.setObjectName(_fromUtf8("opt_show_splash_screen"))
self.gridLayout_9.addWidget(self.opt_show_splash_screen, 4, 1, 1, 1)
self.groupBox_2 = QtGui.QGroupBox(self.tab)
self.groupBox_2.setTitle(_("&Toolbar"))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_8 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.opt_toolbar_icon_size = QtGui.QComboBox(self.groupBox_2)
self.opt_toolbar_icon_size.setObjectName(_fromUtf8("opt_toolbar_icon_size"))
self.gridLayout_8.addWidget(self.opt_toolbar_icon_size, 0, 1, 1, 1)
self.label_5 = QtGui.QLabel(self.groupBox_2)
self.label_5.setText(_("&Icon size:"))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_8.addWidget(self.label_5, 0, 0, 1, 1)
self.opt_toolbar_text = QtGui.QComboBox(self.groupBox_2)
self.opt_toolbar_text.setObjectName(_fromUtf8("opt_toolbar_text"))
self.gridLayout_8.addWidget(self.opt_toolbar_text, 1, 1, 1, 1)
self.label_8 = QtGui.QLabel(self.groupBox_2)
self.label_8.setText(_("Show &text under icons:"))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_8.addWidget(self.label_8, 1, 0, 1, 1)
self.gridLayout_9.addWidget(self.groupBox_2, 7, 0, 1, 2)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_9.addItem(spacerItem, 8, 0, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_2 = QtGui.QLabel(self.tab)
self.label_2.setText(_("Interface font:"))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.font_display = QtGui.QLineEdit(self.tab)
self.font_display.setReadOnly(True)
self.font_display.setObjectName(_fromUtf8("font_display"))
self.horizontalLayout.addWidget(self.font_display)
self.gridLayout_9.addLayout(self.horizontalLayout, 6, 0, 1, 1)
self.change_font_button = QtGui.QPushButton(self.tab)
self.change_font_button.setText(_("Change &font (needs restart)"))
self.change_font_button.setObjectName(_fromUtf8("change_font_button"))
self.gridLayout_9.addWidget(self.change_font_button, 6, 1, 1, 1)
self.label_widget_style = QtGui.QLabel(self.tab)
self.label_widget_style.setText(_("User interface &style (needs restart):"))
self.label_widget_style.setObjectName(_fromUtf8("label_widget_style"))
self.gridLayout_9.addWidget(self.label_widget_style, 0, 0, 1, 1)
self.opt_ui_style = QtGui.QComboBox(self.tab)
self.opt_ui_style.setObjectName(_fromUtf8("opt_ui_style"))
self.gridLayout_9.addWidget(self.opt_ui_style, 0, 1, 1, 1)
self.opt_book_list_tooltips = QtGui.QCheckBox(self.tab)
self.opt_book_list_tooltips.setText(_("Show &tooltips in the book l
|
ist"))
self.opt_book_list_tooltips.setObjectName(_fromUtf8("opt_book_list_tooltips"))
self.gridLayout_9.addWidget(self.opt_book_list_tooltips, 5, 0, 1, 1)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(I("lt.p
|
ng"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidget.addTab(self.tab, icon, _fromUtf8(""))
self.tab_4 = QtGui.QWidget()
self.tab_4.setObjectName(_fromUtf8("tab_4"))
self.gridLayout_12 = QtGui.QGridLayout(self.tab_4)
self.gridLayout_12.setObjectName(_fromUtf8("gridLayout_12"))
self.label_3 = QtGui.QLabel(self.tab_4)
self.label_3.setText(_("Note that <b>comments</b> will always be displayed at the end, regardless of the position you assign here."))
self.label_3.setWordWrap(True)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_12.addWidget(self.label_3, 2, 1, 1, 1)
self.opt_use_roman_numerals_for_series_number = QtGui.QCheckBox(self.tab_4)
self.opt_use_roman_numerals_for_series_number.setText(_("Use &Roman numerals for series"))
self.opt_use_roman_numerals_for_series_number.setChecked(True)
self.opt_use_roman_numerals_for_series_number.setObjectName(_fromUtf8("opt_use_roman_numerals_for_series_number"))
self.gridLayout_12.addWidget(self.opt_use_roman_numerals_for_series_number, 0, 1, 1, 1)
self.groupBox = QtGui.QGroupBox(self.tab_4)
self.groupBox.setTitle(_("Select displayed metadata"))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gri
|
AlphaCluster/NewsBlur
|
vendor/readability/encoding.py
|
Python
|
mit
| 2,034
| 0.004916
|
import re
import chardet
import sys
RE_CHARSET = re.compile(br'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
RE_PRAGMA = re.compile(br'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
RE_XML = re.compile(br'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
CHARSETS = {
'big5': 'big5hkscs',
'gb2312': 'gb18030',
'ascii': 'utf-8',
'maccyrillic': 'cp1251',
'win1251': 'cp1251',
'win-1251': 'cp1251',
'windows-1251': 'cp1251',
}
def fix_charset(encoding):
"""Overrides encoding when charset declaration
or charset determination is a subset of a larger
charset. Created because of issues with Chinese websites"""
encoding = encoding.lower()
return CHARSETS.get(encoding, encoding)
def get_encoding(page):
# Regex for XML and HTML Meta charset declaration
declared_encodings = (RE_CHARSET.findall(page) +
RE_PRAGMA.findall(page) +
RE_XML.findall(page))
# Try any declare
|
d encodings
for declared_encoding in declared_encodings:
try:
if sys.version_info[0] == 3:
# declared_encoding will actually be bytes but .decode() only
# accepts `str` type. Decode blindly with ascii because no one should
# ever use non-ascii characters in the name of an encoding.
declared_encoding = declar
|
ed_encoding.decode('ascii', 'replace')
encoding = fix_charset(declared_encoding)
# Now let's decode the page
page.decode()
# It worked!
return encoding
except UnicodeDecodeError:
pass
# Fallback to chardet if declared encodings fail
# Remove all HTML tags, and leave only text for chardet
text = re.sub(b'(\s*</?[^>]*>)+\s*', b' ', page).strip()
enc = 'utf-8'
if len(text) < 10:
return enc # can't guess
res = chardet.detect(text)
enc = res['encoding'] or 'utf-8'
#print '->', enc, "%.2f" % res['confidence']
enc = fix_charset(enc)
return enc
|
lucienimmink/scanner.py
|
scanner/_utils.py
|
Python
|
mit
| 1,322
| 0
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
class Time:
def ums(i, ignoreZero=True):
i = float(i)
hours = int(i / 3600)
rest = i % 3600
minutes = int(rest / 60)
seconds = int(rest % 60)
if hours < 10:
hours = "0" + str(hours)
if minutes < 10:
minutes = "0" + str(minutes)
if seconds < 10:
seconds = "0" + str(seconds)
if ignoreZero:
if hours == "00":
hours = ""
else:
hours = hours + ":"
else:
hours = hours + ":"
return hours + str(minutes) + ":" + str(seconds)
class force_unicode:
def force_unicode(bstr, encoding, fallback_encodings=None):
# We got unicode, we give unicode
return bstr
if isinstance(bstr, unicode):
return bstr
if fallback_encodings is None:
fallback_encodings = ['UTF-16', 'UTF-8', 'ISO-
|
8859-1']
encodings = [encoding] + fallback_encodings
for enc in encodings:
try:
return bstr.decode(enc)
except UnicodeDecodeError:
pas
|
s
except AttributeError:
pass
# Finally, force the unicode
return bstr.decode(encoding, 'ignore')
|
Chilledheart/chromium
|
content/test/gpu/gpu_tests/webgl_robustness.py
|
Python
|
bsd-3-clause
| 2,628
| 0.003044
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.page import page
from telemetry.page import page_test
from telemetry.story import story_set as story_set_module
from webgl_conformance import conformance_harness_script
from webgl_conformance import conformance_path
from webgl_conformance import WebglConformanceValidator
robustness_harness_script = conformance_harness_script + r"""
var robustnessTestHarness = {};
robustnessTestHarness._contextLost = false;
robustnessTestHarness.initialize = function() {
var canvas = document.getElementById('example');
canvas.addEventListener('webglcontextlost', function() {
robustnessTestHarness._contextLost = true;
});
}
robustnessTestHarness.runTestLoop = function() {
// Run the test in a loop until the context is lost.
main();
if (!robustnessTestHarness._contextLost)
window.requestAnimationFrame(robustnessTestHarness.runTestLoop);
else
|
robustnessTestHarness.notifyFinished();
}
robustnessTestHarness.notifyFinished = function() {
// The test may fail in unpredictable ways depending on when the context is
// lost. We ignore such errors and only require that the browser doesn't
// crash.
webglTestHarness._allTestSucc
|
eeded = true;
// Notify test completion after a delay to make sure the browser is able to
// recover from the lost context.
setTimeout(webglTestHarness.notifyFinished, 3000);
}
window.confirm = function() {
robustnessTestHarness.initialize();
robustnessTestHarness.runTestLoop();
return false;
}
window.webglRobustnessTestHarness = robustnessTestHarness;
"""
class WebglRobustnessPage(page.Page):
def __init__(self, story_set, base_dir):
super(WebglRobustnessPage, self).__init__(
url='file://extra/lots-of-polys-example.html',
page_set=story_set,
base_dir=base_dir)
self.script_to_evaluate_on_commit = robustness_harness_script
def RunNavigateSteps(self, action_runner):
super(WebglRobustnessPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition('webglTestHarness._finished')
class WebglRobustness(benchmark.Benchmark):
test = WebglConformanceValidator
@classmethod
def Name(cls):
return 'webgl_robustness'
def CreateStorySet(self, options):
ps = story_set_module.StorySet(
base_dir=conformance_path,
serving_dirs=[''])
ps.AddStory(WebglRobustnessPage(ps, ps.base_dir))
return ps
|
ndim/weight-calendar-grid
|
weight_cal_grid/log.py
|
Python
|
mit
| 5,614
| 0.004453
|
########################################################################
"""Generic and custom log message infrastructure"""
########################################################################
"""\
log - simple logging module
This is so much simpler than Python's stock 'logging' module and thus
much less error prone in use.
"""
########################################################################
import os
import sys
import traceback
########################################################################
DATA = 3
DEBUG = 2
VERBOSE = 1
INFO = 0
QUIET = -1
WARN = -2
ERROR = -3
########################################################################
level = None
startup_level = INFO
########################################################################
outfile = sys.stderr
########################################################################
try:
# Two variants possible: For make -j processing with PID, and without.
# No way to distinguish between the cases, so always print the PID.
try:
_prog = os.path.basename(sys.argv[0])
except:
_prog = 'wcg-unknown'
prog = "%s(%d)" % (_prog, os.getpid())
del _prog
except ImportError:
prog = None
########################################################################
def log(lvl, msg, *args, **kwargs):
"""Generic logging function"""
if ((level != None) and (level >= lvl)) or (startup_level >= lvl):
if 'exc_info' in kwargs:
exc_info = kwargs['exc_info']
if not isinstance(exc_info, tuple):
e
|
xc_info = sys.exc_info()
traceback.print_exception(
exc_info[0], exc_info[1], exc_info[2],
None, sy
|
s.stderr)
if msg:
p = {}
if args:
p['message'] = msg % args
else:
p['message'] = msg
p['prog'] = prog
p['catmsg'] = {
DATA: 'DATA: ',
DEBUG: 'DEBUG: ',
VERBOSE: 'VERB: ',
INFO: 'INFO: ',
QUIET: 'QUIET: ',
WARN: 'WARN: ',
ERROR: 'ERROR: ',
}[lvl]
print("%(prog)s: %(catmsg)s%(message)s" % p, file=outfile)
########################################################################
def data(msg=None, *args, **kwargs):
"""Log message at DATA level"""
log(DATA, msg, *args, **kwargs)
########################################################################
def debug(msg=None, *args, **kwargs):
"""Log message at DEBUG level"""
log(DEBUG, msg, *args, **kwargs)
########################################################################
def verbose(msg=None, *args, **kwargs):
"""Log message at VERBOSE level"""
log(VERBOSE, msg, *args, **kwargs)
########################################################################
def info(msg=None, *args, **kwargs):
"""Log message at INFO level"""
log(INFO, msg, *args, **kwargs)
########################################################################
def quiet(msg=None, *args, **kwargs):
"""Log message at QUIET level"""
log(QUIET, msg, *args, **kwargs)
########################################################################
def warn(msg=None, *args, **kwargs):
"""Log message at WARN level"""
log(WARN, msg, *args, **kwargs)
########################################################################
def error(msg=None, *args, **kwargs):
"""Log message at ERROR level"""
log(ERROR, msg, *args, **kwargs)
########################################################################
env_name = 'WCG_LOG_LEVEL'
if env_name in os.environ:
env_value = os.environ[env_name]
value_map = {'data': DATA, 'DATA': DATA,
'3': DATA, '+3': DATA,
'debug': DEBUG, 'DEBUG': DEBUG,
'2': DEBUG, '+2': DEBUG,
'verbose': VERBOSE, 'VERBOSE': VERBOSE,
'1': VERBOSE, '+1': VERBOSE,
'info': INFO, 'INFO': INFO,
'0': INFO, '+0': INFO, '-0': INFO,
'': INFO,
'quiet': QUIET, 'QUIET': QUIET, '-1': QUIET,
'warn': WARN, 'WARN': WARN, '-2': WARN,
'error': ERROR, 'ERROR': ERROR, '-3': ERROR,
}
if env_value in value_map:
startup_level = value_map[env_value]
else:
error('Invalid value for %s OS environment variable: %s',
env_name, repr(env_value))
error('%s must be one of:', env_name)
rev_map = {}
for k, v in value_map.items():
if v not in rev_map:
rev_map[v] = ([], [])
if k == '':
# rev_map[v][1].append("''")
pass
else:
try:
i = int(k)
rev_map[v][0].append(k)
except ValueError:
rev_map[v][1].append(k)
for n in range(3, -3-1, -1):
error(' %-10s %s',
' '.join(["%2s" % a
for a in sorted(rev_map[n][0])]),
' '.join(["%-7s" % a
for a in reversed(sorted(rev_map[n][1]))]))
error('If %s is unset or empty, the startup log level is INFO.',
env_name)
sys.exit(2)
########################################################################
|
public/flake8-import-order
|
tests/test_cases/missing_newline.py
|
Python
|
lgpl-3.0
| 184
| 0.01087
|
# appnexus cryptography edited google pep8 smarkets
import ast
# This comment should not prevent
|
the I201 below, it is not a newline.
import X # I201
import flake8_im
|
port_order # I201
|
Khroki/MCEdit-Unified
|
pymclevel/pocket.py
|
Python
|
isc
| 15,372
| 0.001821
|
from level import FakeChunk
import logging
from materials import pocketMaterials
from mclevelbase import ChunkNotPresent, notclosing
from nbt import TAG_List
from numpy import array, fromstring, zeros
import os
import struct
# values are usually little-endian, unlike Minecraft PC
logger = logging.getLogger(__name__)
class PocketChunksFile(object):
holdFileOpen = False # if False, reopens and recloses the file on each access
SECTOR_BYTES = 4096
CHUNK_HEADER_SIZE = 4
@property
def file(self):
openfile = lambda: file(self.path, "rb+")
if PocketChunksFile.holdFileOpen:
if self._file is None:
self._file = openfile()
return notclosing(self._file)
else:
return openfile()
def close(self):
if PocketChunksFile.holdFileOpen:
self._file.close()
self._file = None
def __init__(self, path):
self.path = path
self._file = None
if not os.path.exists(path):
file(path, "w").close()
with self.file as f:
filesize = os.path.getsize(path)
if filesize & 0xfff:
filesize = (filesize | 0xfff) + 1
f.truncate(filesize)
if filesize == 0:
filesize = self.SECTOR_BYTES
f.truncate(filesize)
f.seek(0)
offsetsData = f.read(self.SECTOR_BYTES)
self.freeSectors = [True] * (filesize / self.SECTOR_BYTES)
self.freeSectors[0] = False
self.offsets = fromstring(offsetsData, dtype='<u4')
needsRepair = False
for index, offset in enumerate(self.offsets):
sector = offset >> 8
count = offset & 0xff
for i in xrange(sector, sector + count):
if i >= len(self.freeSectors):
# raise RegionMalformed("Region file offset table points to sector {0} (past the end of the file)".format(i))
print "Region file offset table points to sector {0} (past the end of the file)".format(i)
needsRepair = True
break
if self.freeSectors[i] is False:
logger.debug("Double-allocated sector number %s (offset %s @ %s)", i, offset, index)
needsRepair = True
self.freeSectors[i] = False
if needsRepair:
self.repair()
logger.info("Found region file {file} with {used}/{total} sectors used and {chunks} chunks present".format(
file=os.path.basename(path), used=self.usedSectors, total=self.sectorCount, chunks=self.chunkCount))
@property
def usedSectors(self):
return len(self.freeSectors) - sum(self.freeSectors)
@property
def sectorCount(self):
return len(self.freeSectors)
@property
def chunkCount(self):
return sum(self.offsets > 0)
def repair(self):
pass
# lostAndFound = {}
# _freeSectors = [True] * len(self.freeSectors)
# _freeSectors[0] = _freeSectors[1] = False
# deleted = 0
# recovered = 0
# logger.info("Beginning repairs on {file} ({chunks} chunks)".format(file=os.path.basename(self.path), chunks=sum(self.offsets > 0)))
# rx, rz = self.regionCoords
# for index, offset in enumerate(self.offsets):
# if offset:
# cx = index & 0x1f
# cz = index >> 5
# cx += rx << 5
# cz += rz << 5
# sectorStart = offset >> 8
# sectorCount = offset & 0xff
# try:
#
# if sectorStart + sectorCount > len(self.freeSectors):
# raise RegionMalformed("Offset {start}:{end} ({offset}) at index {index} pointed outside of the file".fo
|
rmat()
# start=sectorStart, end=sectorStart + sectorCount, index=index, offset=offset)
#
# compressedData = self._readChunk(cx, cz)
# if compressedData is None:
#
|
raise RegionMalformed("Failed to read chunk data for {0}".format((cx, cz)))
#
# format, data = self.decompressSectors(compressedData)
# chunkTag = nbt.load(buf=data)
# lev = chunkTag["Level"]
# xPos = lev["xPos"].value
# zPos = lev["zPos"].value
# overlaps = False
#
# for i in xrange(sectorStart, sectorStart + sectorCount):
# if _freeSectors[i] is False:
# overlaps = True
# _freeSectors[i] = False
#
#
# if xPos != cx or zPos != cz or overlaps:
# lostAndFound[xPos, zPos] = (format, compressedData)
#
# if (xPos, zPos) != (cx, cz):
# raise RegionMalformed("Chunk {found} was found in the slot reserved for {expected}".format(found=(xPos, zPos), expected=(cx, cz)))
# else:
# raise RegionMalformed("Chunk {found} (in slot {expected}) has overlapping sectors with another chunk!".format(found=(xPos, zPos), expected=(cx, cz)))
#
#
#
# except Exception, e:
# logger.info("Unexpected chunk data at sector {sector} ({exc})".format(sector=sectorStart, exc=e))
# self.setOffset(cx, cz, 0)
# deleted += 1
#
# for cPos, (format, foundData) in lostAndFound.iteritems():
# cx, cz = cPos
# if self.getOffset(cx, cz) == 0:
# logger.info("Found chunk {found} and its slot is empty, recovering it".format(found=cPos))
# self._saveChunk(cx, cz, foundData[5:], format)
# recovered += 1
#
# logger.info("Repair complete. Removed {0} chunks, recovered {1} chunks, net {2}".format(deleted, recovered, recovered - deleted))
#
def _readChunk(self, cx, cz):
cx &= 0x1f
cz &= 0x1f
offset = self.getOffset(cx, cz)
if offset == 0:
return None
sectorStart = offset >> 8
numSectors = offset & 0xff
if numSectors == 0:
return None
if sectorStart + numSectors > len(self.freeSectors):
return None
with self.file as f:
f.seek(sectorStart * self.SECTOR_BYTES)
data = f.read(numSectors * self.SECTOR_BYTES)
assert (len(data) > 0)
logger.debug("REGION LOAD %s,%s sector %s", cx, cz, sectorStart)
return data
def loadChunk(self, cx, cz, world):
data = self._readChunk(cx, cz)
if data is None:
raise ChunkNotPresent((cx, cz, self))
chunk = PocketChunk(cx, cz, data[4:], world)
return chunk
def saveChunk(self, chunk):
cx, cz = chunk.chunkPosition
cx &= 0x1f
cz &= 0x1f
offset = self.getOffset(cx, cz)
sectorNumber = offset >> 8
sectorsAllocated = offset & 0xff
data = chunk._savedData()
sectorsNeeded = (len(data) + self.CHUNK_HEADER_SIZE) / self.SECTOR_BYTES + 1
if sectorsNeeded >= 256:
return
if sectorNumber != 0 and sectorsAllocated >= sectorsNeeded:
logger.debug("REGION SAVE {0},{1} rewriting {2}b".format(cx, cz, len(data)))
self.writeSector(sectorNumber, data, format)
else:
# we need to allocate new sectors
# mark the sectors previously used for this chunk as free
for i in xrange(sectorNumber, sectorNumber + sectorsAllocated):
self.freeSectors[i] = True
runLength = 0
try:
runStart = self.freeSectors.index(True)
for i in range(runStart, len(self.freeSectors)):
|
robmcmullen/peppy
|
peppy/hsi/plotters.py
|
Python
|
gpl-2.0
| 9,351
| 0.008662
|
# peppy Copyright (c) 2006-2010 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""Plotting minor modes for HSI major mode
"""
import os, struct, mmap
from cStringIO import StringIO
import wx
from peppy.actions.minibuffer import *
from peppy.actions import *
from peppy.minor import *
from peppy.hsi.common import *
# hsi mode and the plotting utilities require numpy, the check for which is
# handled by the major mode wrapper
import numpy
import wx.lib.plot as plot
import peppy.lib.plotter as plotter
class HSIMinorModeMixin(MinorMode):
@classmethod
def worksWithMajorMode(self, modecls):
return modecls.keyword == "HSI"
def setCubeView(self, cubeview):
pass
def paneInfoHook(self, paneinfo):
# adjust the width of the minor mode windows if they are placed on the
# top or bottom -- the default width is generally too wide and the
# plots will overlap.
if self.classprefs.side in ['top', 'bottom']:
paneinfo.MinSize(wx.Size(self.classprefs.min_width/2,
self.classprefs.min_height))
class HSIPlotMinorMode(HSIMinorModeMixin, plotter.MultiPlotter,
plotter.PlotProxy):
"""Abstract base class for x-y plot of cube data.
This displays a plot using the plotter.MultiPlotter class. The
plot_proxy attribute specifies which type of plot proxy defined in
the MultiPlotter class to use. Also must specify a keyword that
uniquely identifies the minor mode.
"""
keyword = None
default_classprefs = (
IntParam('best_width', 400),
IntParam('best_height', 400),
IntParam('min_width', 300),
IntParam('min_height', 100),
)
plot_proxy = None
def __init__(self, parent, **kwargs):
MinorMode.__init__(self, parent, **kwargs)
plotter.MultiPlotter.__init__(self, parent, statusbarframe=self.mode.frame)
self.listeners=[]
self.rgblookup=['red','green','blue']
self.setProxy(self)
self.last_coords = (0,0)
def isPlottableView(self, cubeview):
return True
def setCubeView(self, cubeview):
if self.isPlottableView(cubeview):
self.getPaneInfo().Show(True)
else:
self.getPaneInfo().Show(False)
self.mode.updateAui()
self.setupTitle()
self.setupAxes(cubeview)
def setupTitle(self):
self.title=self.keyword
def setupAxes(self, cubeview):
dprint("Here instead of subclass")
pass
def updateYAxis(self, yaxis):
#dprint(yaxis)
if yaxis != self.yaxis:
# make copy of list. The list returned by
# getUpdatedExtrema is modified in place, so the compare
# will always return true if we don't copy it.
# Note: the plot widget expects the extrema values to be in
# floating point, which isn't necessarily the case with the
# computed extrema values. Cast them to float here to be sure.
self.yaxis=(float(yaxis[0]), float(yaxis[1]))
# dprint("yaxis=%s" % self.yaxis)
self.updateListenerExtrema()
def sizeYAxis(self, lines):
"""Calculate Y axis extrema based on the wx.lib.plot.Polyline objects
If the overall cube extrema is not used to set up the Y axis scale,
this routine can be used to calculate the extrema based on the lines
that will be shown.
"""
#dprint(lines[0].points[:,1])
lo = min([min(line.points[:,1]) for line in lines])
hi = max([max(line.points[:,1]) for line in lines])
if lo == hi:
if lo == 0.0:
hi = 1.0
else:
lo = lo / 2
hi = lo * 3
self.yaxis=(float(lo), float(hi))
#dprint("yaxis=%s" % str(self.yaxis))
self.updateListenerExtrema()
def updateProxies(self, *coords):
plotproxy = self.proxies[0]
plotproxy.updateLines(*coords)
try:
plotproxy.updateListeners()
except Exception, e:
import
|
traceback
dprint(traceback.format_exc())
self.last_coords = coords
def redisplayProxies(self):
self.updateProxies(*self.last_coords)
class SpectrumXLabelAction(HSIActionMixin, RadioAction):
"""Change the X axis label of the
|
spectrum plot"""
name = "X Axis Label"
def getIndex(self):
cubeview = self.mode.cubeview
labels = cubeview.getAvailableXAxisLabels()
minor = self.popup_options['minor_mode']
current = minor.xlabel
return labels.index(current)
def getItems(self):
cubeview = self.mode.cubeview
labels = cubeview.getAvailableXAxisLabels()
return labels
def action(self, index=-1, multiplier=1):
assert self.dprint("index=%d" % index)
cubeview = self.mode.cubeview
labels = cubeview.getAvailableXAxisLabels()
label = labels[index]
minor = self.popup_options['minor_mode']
minor.setupAxes(cubeview, label)
minor.redisplayProxies()
class HSISpectrumMinorMode(HSIPlotMinorMode):
"""Display a spectrum at the current crosshair point.
"""
keyword = "Depth Profile"
def getPopupActions(self, evt, x, y):
return [
SpectrumXLabelAction,
]
def isPlottableView(self, cubeview):
return cubeview.isDepthPlottable()
def setupAxes(self, cubeview, label=None):
labels = cubeview.getAvailableXAxisLabels()
if label:
self.xlabel = label
else:
self.xlabel = labels[0]
self.xaxis = cubeview.getDepthXAxisExtrema(self.xlabel)
# syncing over the whole cube takes too long, so we'll grow
# the axis as it gets bigger. Start with the extrema of the
# current band so we aren't too far off.
self.ylabel='value'
self.yaxis=(float(cubeview.extrema[0]), float(cubeview.extrema[1]))
def getLines(self, x, y):
cubeview = self.mode.cubeview
# dprint("SpectrumPlotProxy: (%d,%d)" % (x,y))
profile = cubeview.getDepthProfile(x, y)
num = len(profile)
data=numpy.zeros((num, 2))
data[:,0] = cubeview.getDepthXAxis(self.xlabel)
data[:,1] = profile
yaxis=cubeview.cube.getUpdatedExtrema()
self.updateYAxis(yaxis)
line = plot.PolyLine(data, legend= '%d, %d' % (x, y), colour='blue')
return [line]
class HSIXProfileMinorMode(HSIPlotMinorMode):
"""Display the X profile at the current crosshair line.
"""
keyword="Horizontal Profile"
def isPlottableView(self, cubeview):
return cubeview.isHorizontalProfilePlottable()
def setupAxes(self, cubeview):
self.xlabel = cubeview.xProfileXAxisLabel
self.xaxis=(0,cubeview.width)
self.ylabel='value'
self.yaxis=(float(cubeview.extrema[0]), float(cubeview.extrema[1]))
def getLines(self, x, y):
cubeview = self.mode.cubeview
profiles=cubeview.getHorizontalProfiles(y)
abscissas = numpy.arange(1,cubeview.width+1,1)
colorindex=0
lines=[]
for values in profiles:
data=numpy.zeros((cubeview.width,2))
data[:,0] = abscissas
data[:,1] = self.mode.filter.getXProfile(y, values)
#line=plot.PolyLine(data, legend= 'band #%d' % cubeview.bands[colorindex][0], colour=self.rgblookup[colorindex])
line=plot.PolyLine(data, legend=cubeview.getBandLegend(cubeview.bands[colorindex][0]), colour=self.rgblookup[colorindex])
lines.append(line)
colorindex+=1
if self.mode.classprefs.use_cube_min_max:
yaxis=cubeview.cube.getUpdatedExtrema()
self.updateYAxis(yaxis)
else:
self.sizeYAxis(lines)
return lines
class HSIYProfileMinorMode(HSIPlotMinorMode):
"""Display the Y profile at the current crosshair lin
|
histograph/aws
|
staging/scripts/register_staging.py
|
Python
|
mit
| 947
| 0.026399
|
from boto.connection import AWSAuthConnection
import os
class ESConnection(AWSAuthConnection):
def __init__(self, region, **kwargs):
super(ESConnection, self).__init__(**kwargs)
self._set_auth_region_name(region)
self._set_auth_service_name("es")
def _required_auth_capability(self):
return ['hmac-v4']
if __name__ == "__main__":
client = ESConnection(
region='eu-central-1',
host='search-histograph-staging-fsuaepsiqkaydkv2w6bxhxmiji.eu-central-1.es.amazonaws.com',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_
|
access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
is_secure=False)
print('Registering Snapshot Repository')
resp = client.make_request(method='POST',
path='/_snapshot/histograph-dump',
data='{"type": "s3","settings": { "bucket": "histograph-es-dump","region": "eu-central-1","role_arn": "arn:aws:iam::441915505712:role/elasticsearch-s3-dump"}}')
body = resp.read()
print(body
|
)
|
jaredkoontz/leetcode
|
Python/random-pick-index.py
|
Python
|
mit
| 1,340
| 0.001493
|
# Time: O(n)
# Space: O(1)
# Given an array of integers with possible duplicates,
# randomly output the index of a given target number.
# You can assume that the given target number must exist in the array.
#
# Note:
# The array
|
size can be very large.
# Solution that uses too much extra space will not pass the judge.
#
# Example:
#
# int[] nums = new int[] {1,2,3,3,3};
# Solution solution = new Solution(nums);
#
# // pick(3) should return either index 2, 3, or 4 randomly.
# Each index should have equal probability of returning.
# solution.pick(3);
#
# // pick(1) should return 0. Since in the array only nums[0] is equal to 1.
# solution.pick(
|
1);
from random import randint
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
:type numsSize: int
"""
self.__nums = nums
def pick(self, target):
"""
:type target: int
:rtype: int
"""
reservoir = -1
n = 0
for i in xrange(len(self.__nums)):
if self.__nums[i] != target:
continue
reservoir = i if n == 0 or randint(1, n+1) == 1 else reservoir
n += 1
return reservoir
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.pick(target)
|
tboyce021/home-assistant
|
tests/components/bayesian/test_binary_sensor.py
|
Python
|
apache-2.0
| 24,735
| 0.000323
|
"""The test for the bayesian sensor platform."""
import json
from os import path
from homeassistant import config as hass_config
from homeassistant.components.bayesian import DOMAIN, binary_sensor as bayesian
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_RELOAD,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
)
from homeassistant.core import Context, callback
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
async def test_load_values_when_added_to_hass(hass):
"""Test that sensor initializes with observations of relevant entities."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
async def test_unknown_state_does_not_influence_probability(hass):
"""Test that an unknown state does not change the output probability."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
hass.states.async_set("sensor.test_monitored", STATE_UNKNOWN)
await hass.async_block_till_done()
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations") == []
async def test_sensor_numeric_state(hass):
"""Test sensor on numeric state platform observations."""
config = {
"binary_sensor": {
"platform": "bayesian",
"name": "Test_Binary",
"observations": [
{
"platform": "numeric_state",
"entity_id": "sensor.test_monitored",
"below": 10,
"above": 5,
"prob_given_true": 0.6,
},
{
"platform": "numeric_state",
"entity_id": "sensor.test_monitored1",
"below": 7,
"above": 5,
"prob_given_true": 0.9,
"prob_given_false": 0.1,
},
],
"prior": 0.2,
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 4)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 6)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 4)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 6)
hass.states.async_set("sensor.test_monitored1", 6)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.6
assert state.attributes.get("observations")[1]["prob_given_true"] == 0.9
assert state.attributes.get("observations")[1]["prob_given_false"] == 0.1
assert round(abs(0.77 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", 6)
hass.states.async_set("sensor.test_monitored1", 0)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", 4)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", 15)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.state == "off"
async def test_sensor_state(hass):
"""Test sensor on state platform observations."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
state = hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.test_binary")
assert round(abs(0.2 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
async def test_sensor_value_template(hass):
"""Test sensor on template platform observations."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "template",
"value_template": "{{states('sensor.test_monitored') == 'off'}}",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_block_till_done()
hass.states.async_set("sensor.test_monitored", "on")
state = hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.g
|
et("observations")
assert 0.2 == state.attributes.get("p
|
robability")
assert state.state == "off"
hass.states.async_set("sensor.test_monitored", "off")
await hass.async_block_til
|
bcrochet/eve
|
eve/tests/io/mongo.py
|
Python
|
bsd-3-clause
| 14,202
| 0
|
# -*- coding: utf-8 -*-
from datetime import datetime
import simplejson as json
from bson import ObjectId
from bson.dbref import DBRef
from cerberus import SchemaError
from unittest import TestCase
from eve.io.mongo import Validator, Mongo, MongoJSONEncoder
from eve.io.mongo.parser import parse, ParseError
from eve.tests import TestBase
from eve.tests.test_settings import MONGO_DBNAME
class TestPythonParser(TestCase):
def test_Eq(self):
r = parse('a == "whatever"')
self.assertEqual(type(r), dict)
self.assertEqual(r, {'a': 'whatever'})
def test_Gt(self):
r = parse('a > 1')
self.assertEqual(type(r), dict)
self.assertEqual(r, {'a': {'$gt': 1}})
def test_GtE(self):
r = parse('a >= 1')
self.assertEqual(type(r), dict)
self.assertEqual(r, {'a': {'$gte': 1}})
def test_Lt(self):
r = parse('a < 1')
self.assertEqual(type(r), dict)
self.assertEqual(r, {'a': {'$lt': 1}})
def test_LtE(self):
r = parse('a <= 1')
self.assertEqual(type(r), dict)
self.assertEqual(r, {'a': {'$lte': 1}})
def test_NotEq(self):
r = parse('a != 1')
self.assertEqual(type(r), dict)
self.assertEqual(r, {'a': {'$ne': 1}})
def test_And_BoolOp(self):
r = parse('a == 1 and b == 2')
self.assertEqual(type(r), dict)
self.assertEqual(r, {'$and': [{'a': 1}, {'b': 2}]})
def test_Or_BoolOp(self):
r = parse('a == 1 or b == 2')
self.assertEqual(type(r), dict)
self.assertEqual(r, {'$or': [{'a': 1}, {'b': 2}]})
def test_nested_BoolOp(self):
r = parse('a == 1 or (b == 2 and c == 3)')
self.assertEqual(type(r), dict)
self.assertEqual(r, {'$or': [{'a': 1},
{'$and': [{'b': 2}, {'c': 3}]}]})
def test_ObjectId_Call(self):
r = parse('_id == ObjectId("4f4644fbc88e20212c000000")')
self.assertEqual(type(r), dict)
self.assertEqual(r, {'_id': ObjectId("4f4644fbc88e20212c000000")})
def test_datetime_Call(self):
r = parse('born == datetime(2012, 11, 9)')
self.assertEqual(type(r), dict)
self.assertEqual(r, {'born': datetime(2012, 11, 9)})
def test_Attribute(self):
r = parse('Invoice.number == 1')
self.assertEqual(type(r), dict)
self.assertEqual(r, {'Invoice.number': 1})
def test_unparsed_statement(self):
self.assertRaises(ParseError, parse, 'print ("hello")')
def test_bad_Expr(self):
self.assertRaises(
|
ParseError, parse, 'a | 2')
class TestMongoValidator(TestCase):
def test_unique_fail(self):
""" relying on POST and PATCH tests since we don't have an active
app_context running here """
pass
def test_unique_success(self):
""" relying on POST and PATCH tests since we don't have an active
app_context running here """
|
pass
def test_objectid_fail(self):
schema = {'id': {'type': 'objectid'}}
doc = {'id': 'not_an_object_id'}
v = Validator(schema, None)
self.assertFalse(v.validate(doc))
self.assertTrue('id' in v.errors)
self.assertTrue('ObjectId' in v.errors['id'])
def test_objectid_success(self):
schema = {'id': {'type': 'objectid'}}
doc = {'id': ObjectId('50656e4538345b39dd0414f0')}
v = Validator(schema, None)
self.assertTrue(v.validate(doc))
def test_dbref_fail(self):
schema = {'id': {'type': 'dbref'}}
doc = {'id': 'not_an_object_id'}
v = Validator(schema, None)
self.assertFalse(v.validate(doc))
self.assertTrue('id' in v.errors)
self.assertTrue('DBRef' in v.errors['id'])
def test_dbref_success(self):
schema = {'id': {'type': 'dbref'}}
doc = {'id': DBRef("SomeCollection",
ObjectId("50656e4538345b39dd0414f0"))}
v = Validator(schema, None)
self.assertTrue(v.validate(doc))
def test_transparent_rules(self):
schema = {'a_field': {'type': 'string'}}
v = Validator(schema)
self.assertFalse(v.transparent_schema_rules)
def test_reject_invalid_schema(self):
schema = {'a_field': {'foo': 'bar'}}
self.assertRaises(SchemaError, lambda: Validator(schema))
def test_enable_transparent_rules(self):
schema = {'a_field': {'type': 'string'}}
v = Validator(schema, transparent_schema_rules=True)
self.assertTrue(v.transparent_schema_rules)
def test_transparent_rules_accept_invalid_schema(self):
schema = {'a_field': {'foo': 'bar'}}
Validator(schema, transparent_schema_rules=True)
def test_geojson_not_compilant(self):
schema = {'location': {'type': 'point'}}
doc = {'location': [10.0, 123.0]}
v = Validator(schema)
self.assertFalse(v.validate(doc))
self.assertTrue('location' in v.errors)
self.assertTrue('Point' in v.errors['location'])
def test_geometry_not_compilant(self):
schema = {'location': {'type': 'point'}}
doc = {'location': {"type": "Point", "geometries": [10.0, 123.0]}}
v = Validator(schema)
self.assertFalse(v.validate(doc))
self.assertTrue('location' in v.errors)
self.assertTrue('Point' in v.errors['location'])
def test_geometrycollection_not_compilant(self):
schema = {'location': {'type': 'geometrycollection'}}
doc = {'location': {"type": "GeometryCollection",
"coordinates": [10.0, 123.0]}}
v = Validator(schema)
self.assertFalse(v.validate(doc))
self.assertTrue('location' in v.errors)
self.assertTrue('GeometryCollection' in v.errors['location'])
def test_point_success(self):
schema = {'location': {'type': 'point'}}
doc = {'location': {"type": "Point", "coordinates": [100.0, 0.0]}}
v = Validator(schema)
self.assertTrue(v.validate(doc))
def test_point_fail(self):
schema = {'location': {'type': 'point'}}
doc = {'location': {'type': "Point", 'coordinates': ["asdasd", 123.0]}}
v = Validator(schema)
self.assertFalse(v.validate(doc))
self.assertTrue('location' in v.errors)
self.assertTrue('Point' in v.errors['location'])
def test_point_integer_success(self):
schema = {'location': {'type': 'point'}}
doc = {'location': {'type': "Point", 'coordinates': [10, 123.0]}}
v = Validator(schema)
self.assertTrue(v.validate(doc))
def test_linestring_success(self):
schema = {'location': {'type': 'linestring'}}
doc = {'location': {"type": "LineString",
"coordinates": [[100.0, 0.0], [101.0, 1.0]]
}}
v = Validator(schema)
self.assertTrue(v.validate(doc))
def test_linestring_fail(self):
schema = {'location': {'type': 'linestring'}}
doc = {'location': {'type': "LineString",
'coordinates': [[12.0, 123.0], [12, 'eve']]}}
v = Validator(schema)
self.assertFalse(v.validate(doc))
self.assertTrue('location' in v.errors)
self.assertTrue('LineString' in v.errors['location'])
def test_polygon_success(self):
schema = {'location': {'type': 'polygon'}}
doc = {'location': {"type": "Polygon",
"coordinates": [[[100.0, 0.0], [101.0, 0.0],
[101.0, 1.0], [100.0, 1.0],
[100.0, 0.0]]
]
}
}
v = Validator(schema)
self.assertTrue(v.validate(doc))
def test_polygon_fail(self):
schema = {'location': {'type': 'polygon'}}
doc = {'location': {'type': "Polygon",
'coordinates': [[[12.0, 23.0], [12.3, 12.5]],
["eve"]]}}
v = Validator(schema)
self.assertFalse(v.validate(doc))
self.as
|
tgbugs/pyontutils
|
neurondm/neurondm/models/phenotype_direct.py
|
Python
|
mit
| 4,341
| 0.008754
|
#!/usr/bin/env python3
from pathlib import Path
import rdflib
from pyontutils.core import makeGraph
from pyontutils.utils import relative_path
from pyontutils.namespaces import makePrefixes, TEMP
from pyontutils.namespaces import rdf, rdfs, owl
from neurondm import *
from neurondm.lang import *
from neurondm.core import auth, MeasuredNeuron, PHENO_ROOT, MOD_ROOT
def main():
# load in our existing graph
# note: while it would be nice to allow specification of phenotypes to be decoupled
# from insertion into the graph... maybe we could enable this, but it definitely seems
# to break a number of nice features... and we would need the phenotype graph anyway
Config('temporary-graph')
EXISTING_GRAPH = graphBase.in_graph
#EXISTING_GRAPH = rdflib.Graph()
#graphBase.in_graph = EXISTING_GRAPH
#graphBase.core_graph = EXISTING_GRAPH
local_prefix = auth.get_path('ontology-local-repo') / 'ttl'
sources = (f'{local_prefix}/NIF-Neuron-Defined.ttl',
f'{local_prefix}/NIF-Neuron.ttl',
f'{local_prefix}/NIF-Neuron-Phenotype.ttl',
f'{local_prefix}/phenotype-core.ttl',
f'{local_prefix}/phenotypes.ttl',
f'{local_prefix}/hbp-special.ttl')
for file in sources:
EXISTING_GRAPH.parse(file, format='turtle')
#EXISTING_GRAPH.namespace_manager.bind('PR', makePrefixes('PR')['PR'])
#graphBase.core_graph = EXISTING_GRAPH
#graphBase.out_graph = rdflib.Graph()
graphBase.__import_name__ = 'neurondm.lang'
proot = graphBase.core_graph.qname(PHENO_ROOT)
mroot = graphBase.core_graph.qname(MOD_ROOT)
graphBase._predicates, _psupers = getPhenotypePredicates(EXISTING_GRAPH, proot, mroot)
g = makeGraph('merged', prefixes={k:str(v) for k, v in EXISTING_GRAPH.namespaces()}, graph=EXISTING_GRAPH)
reg_neurons = list(g.g.subjects(rdfs.subClassOf, _NEURON_CLASS))
tc_neurons = [_ for (_,) in g.g.query('SELECT DISTINCT ?match WHERE {?match rdfs:subClassOf+ %s}' % g.g.qname(_NEURON_CLASS))]
def_neurons = g.get_equiv_inter(_NEURON_CLASS)
nodef = sorted(set(tc_neurons) - set(def_neurons))
og1 = MeasuredNeuron.out_graph = rdflib.Graph() # there is only 1 out_graph at a time, load and switch
mns = [MeasuredNeuron(id_=n) for n in nodef]
mnsp = [n for n in mns if n.pes]
graphBase.out_graph = rdflib.Graph() # XXX NEVER DO THIS IT IS EVIL ZALGO WILL EAT YOUR FACE
graphBase.ng.g = graphBase.out_graph
# and he did, had to swtich to graphBase for exactly this reason >_<
dns = [Neuron(id_=n) for n in sorted(def_neurons)]
#dns += [Neuron(*m.pes) if m.pes else m.id_ for m in mns]
dns += [Neuron(*m.pes) for m in mns if m.pes]
# reset everything for export
config = Config('phenotype-direct', source_file=relativ
|
e_path(__file__))
#Neuron.out_graph = graphBase.out_graph # each subclass of graphBase has a distinct out graph IF it was set manually
#Neuron.out_graph
|
= rdflib.Graph()
#ng = makeGraph('', prefixes={}, graph=Neuron.out_graph)
#ng.filename = Neuron.ng.filename
Neuron.mro()[1].existing_pes = {} # wow, new adventures in evil python patterns mro()[1]
dns = [Neuron(*d.pes) for d in set(dns)] # TODO remove the set and use this to test existing bags?
#from neurons.lang import WRITEPYTHON
#WRITEPYTHON(sorted(dns))
#ng.add_ont(TEMP['defined-neurons'], 'Defined Neurons', 'NIFDEFNEU',
#'VERY EXPERIMENTAL', '0.0.0.1a')
#ng.add_trip(TEMP['defined-neurons'], owl.imports, rdflib.URIRef('file:///home/tom/git/NIF-Ontology/ttl/phenotype-core.ttl'))
#ng.add_trip(TEMP['defined-neurons'], owl.imports, rdflib.URIRef('file:///home/tom/git/NIF-Ontology/ttl/phenotypes.ttl'))
#ng.write()
ontinfo = (
(Neuron.ng.ontid, rdf.type, owl.Ontology),
(Neuron.ng.ontid, rdfs.label, rdflib.Literal('phenotype direct neurons')),
(Neuron.ng.ontid, rdfs.comment, rdflib.Literal('Neurons derived directly from phenotype definitions')),
)
[Neuron.out_graph.add(t) for t in ontinfo]
Neuron.write()
Neuron.write_python()
bads = [n for n in Neuron.ng.g.subjects(rdf.type, owl.Class)
if len(list(Neuron.ng.g.predicate_objects(n))) == 1]
if __name__ == '__main__':
breakpoint()
return config
config = main()
|
undertherain/benchmarker
|
benchmarker/kernels/dimenet/pytorch.py
|
Python
|
mpl-2.0
| 528
| 0
|
from benchmarker.kernels.helpers_torch
|
import Regression
from torch_geometric.nn import DimeNet
def get_kernel(params):
# TODO: make these parameters
net = DimeNet(
hidden_channels=params["problem"]["hidden_channels"],
out_channels=1,
num_blocks=6,
num_bilinear=8,
num_spherical=7,
num_radial=6,
cutoff=5.0,
envelope_exponent=5,
num_before_skip=1,
num_after_skip=2,
num_output_layers=3)
return Regr
|
ession(params["mode"], net)
|
bearicc/python-wavelet-transform
|
cwt_demo.py
|
Python
|
agpl-3.0
| 380
| 0
|
import scipy as sp
from mycwt
|
import cwt
pi = sp.pi
mu = [100.0, 500.0, 900.0]
sigma = [5.0, 10.0, 20.0]
a = [3.0, 1.0, 0.5]
t = sp.arange(0, 1000)*1.0
x = sp.zeros(t.shape)
for i in range(0, len(mu)):
x += 1/sp.sqrt(2*pi)/sigma[i]*sp.exp(-0.5*((t-mu[i])/sigma[i])**2)
smax = 128
wname = 'bior2.6'
scale
|
s = sp.arange(1, smax+1)*1.0
coefs = cwt(x, scales, wname, bplot=True)
|
nekohayo/snowy
|
lib/django_openid_auth/models.py
|
Python
|
agpl-3.0
| 2,355
| 0.000425
|
# django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2007 Simon Willison
# Copyright (C) 2008-2010 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from django.contrib.auth.models import User
from django.db impor
|
t models
class Nonce(models.Model):
server_url = models.CharField(max_length=2047)
timestamp = models.IntegerField()
salt = models.CharField(max_length=40)
def __unicode__(self):
return u"Nonce: %s, %s" % (self.server_url, self.salt)
class Association(models.Model):
server_url = models.TextField(max_length=2047)
handle = models.CharField(max_length=255)
secret = models.TextField(max_length=255) # Stored base64 encoded
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.TextField(max_length=64)
def __unicode__(self):
return u"Association: %s, %s" % (self.server_url, self.handle)
class UserOpenID(models.Model):
user = models.ForeignKey(User)
claimed_id = models.TextField(max_length=255, unique=True)
display_id = models.TextField(max_length=2047)
|
PeachstoneIO/peachbox
|
tutorials/tutorial_movie_reviews/tasks/importer.py
|
Python
|
apache-2.0
| 1,848
| 0.010823
|
# general
import time
# peachbox imports
from peachbox.task import Task
from peachbox.connector import sink, source
from peachbox.pipeline import Chain, Validator
# tutorial
from pipelines.importer import UserReviewEdge, ProductReviewEdge, ReviewProperties
import model.master
class ImportMovieReviews(ScheduledTask):
def __init__(self):
super(ImportMovieReviews, self).__init__()
self.source = source.KafkaJSON(topic='movie_reviews')
self.sink = sink.MasterData()
def execute(self):
input = self.source.emit()['data']
# Import 'review by user edges'
user_review_validator = Validator(['time', 'user_id', 'product_id'])
user_review_chain = Chain([user_review_validator, UserReviewEdge()])
user_review_edges = user_review_chain.execute(input)
# Import 'product review edges'
product_review_validator = Validator(['time', 'user_id', 'product_id'])
product_review_chain = Chain([product_review_validator, ProductReviewEdge()])
product_review_edges = product_review_chain.execute(input)
# Import 'review properties'
required_fields = ['time', 'user_id', 'product_id', 'helpfulness', 'score', 'summary', 'review']
review_property_validator = Validator(required_fields)
review_properties = Chain([review_property_validator, ReviewProperties()]).execute(input)
self.sink.absorb([{'data':user_review_ed
|
ges, 'model':model.master.UserReviewEdge},
{'data':product_review_edges, 'model':model.master.ProductReviewEdge},
{'data':review_prop
|
erties, 'model':model.master.ReviewProperties}])
# Payload is sent with 'Finished Event'
self.payload = {'import_finished':int(time.time()), 'latest_kafka_offset':self.source.latest_offset}
|
ThomasBrouwer/BNMTF
|
data_toy/bnmf/generate_bnmf.py
|
Python
|
apache-2.0
| 3,430
| 0.032362
|
"""
Generate a toy dataset for the matrix factorisation case, and store it.
We use dimensions 100 by 50 for the dataset, and 10 latent factors.
As the prior for U and V we take value 1 for all entries (so exp 1).
As a result, each value in R has a value of around 20, and a variance of 100-120.
For contrast, the Sanger dataset of 705 by 140 shifted to nonnegative has mean
31.522999753779082 and variance 243.2427345740027.
We add Gaussian noise of precision tau = 1 (prior for gamma: alpha=1,beta=1).
(Simply using the expectation of our Gamma distribution over tau)
"""
import sys, os
project_location = os.path.dirname(__file__)+"/../../../"
sys.path.append(project_location)
from BNMTF.code.models.distributions.exponential import exponential_draw
from BNMTF.code.models.distributions.normal import normal_draw
from BNMTF.code.cross_validation.mask import generate_M
import numpy, itertools, matplotlib.pyplot as plt
def generate_dataset(I,J,K,lambdaU,lambdaV,tau):
# Generate U, V
U = numpy.zeros((I,K))
for i,k in itertools.product(xrange(0,I),xrange(0,K)):
U[i,k] = exponential_draw(lambdaU[i,k])
V = numpy.zeros((J,K))
for j,k in itertools.product(xrange(0,J),xrange(0,K)):
V[j,k] = exponential_draw(lambdaV[j,k])
# Generate R
true_R = numpy.dot(U,V.T)
R = add_noise(true_R,tau)
return (U,V,tau,true_R,R)
def add_noise(true_R,tau):
if numpy.isinf(tau):
return numpy.copy(true_R)
(I,J) = true_R.shape
R = numpy.zeros((I,J))
for i,j in itertools.product(xrange(0,I),xrange(0,J)):
R[i,j] = normal_draw(true_R[i,j],tau)
return R
def try_generate_M(I,J,fraction_unknown,attempts):
for attempt in range(1,attempts+1):
try:
M = generate_M(I,J,fraction_unknown)
sums_columns = M.sum(axis=0)
sums_rows = M.sum(axis=1)
for i,c in enumerate(sums_rows):
assert c != 0, "Fully unobserved row in M, row %s. Fraction %s." % (i,fraction_unknown)
for j,c in enumerate(sums_columns):
assert c != 0, "Fully unobserved column in M, column %s. Fraction %s." % (j,fraction_unknown)
print "Took %s attempts to generate M." % attempt
return M
except AssertionError:
pass
raise Exception("Tried to generate M %s times, with I=%s, J=%s, fraction=%s, but failed." % (attempts,I,J,fraction_unknown))
##########
if __name__ == "__main__":
output_folder = project_location+"BNMTF/data_toy/bnmf/"
I,J,K = 100, 80, 10 #20, 10, 5 #
fraction_unknown = 0.1
alpha, beta = 1., 1.
lambdaU = numpy.ones((I,K))
lambdaV = numpy.ones((I,K))
tau = alpha / beta
(U,V,tau,true_R,R) = generate_dataset(I,J,K,lambdaU,lambdaV,ta
|
u)
# Try to generate M
M = try_generate_M(I,J,fraction_unknown,attempts=1000)
# Store all matrices in text files
numpy.savetxt(open(output_folder+"U.txt",'w'),U)
numpy.savetxt(open(output_folder+"V.txt",'w'),V)
numpy.savetxt(open(output_folder+"R_true.txt",'w'),true_R)
numpy.savetxt(open(output_folder+"R.txt",'w'),R)
numpy.savetxt(open(output_folder+"M.txt",'w'),M)
print "Mean R: %s. Variance R: %s.
|
Min R: %s. Max R: %s." % (numpy.mean(R),numpy.var(R),R.min(),R.max())
fig = plt.figure()
plt.hist(R.flatten(),bins=range(0,int(R.max())+1))
plt.show()
|
portfors-lab/sparkle
|
test/tests/gui/plotting/test_protocol_display.py
|
Python
|
gpl-3.0
| 3,063
| 0.001306
|
import sys
import time
import numpy as np
from sparkle.QtWrapper.QtGui import QApplication
from sparkle.gui.plotting.protocoldisplay import ProtocolDisplay
from test.sample import samplewav
PAUSE = 0
class TestProtocolDisplay():
def setUp(self):
self.t = np.arange(200)
def data_func(self, f):
return 2*np.sin(2*np.pi*f*self.t/len(self.t))
def test_display(self):
display = ProtocolDisplay()
display.show()
display.setNreps(5)
display.updateSpec(samplewav())
display.specAutoRange()
QApplication.processEvents()
assert display.specPlot.hasImg()
time.sleep(PAUSE)
display.updateSpec(None)
QApplication.processEvents()
assert not display.specPlot.hasImg()
time.sleep(PAUSE)
display.showSpec(samplewav())
QApplication.processEvents()
assert display.specPlot.hasImg()
data = self.data_func(3)
nbins = 50
bin_centers = np.linspace(0, self.t[-1], nbins)
points = np.ones(nbins)
display.updateSignal(self.t, data)
display.updateSpiketrace(self.t, data)
display.updateFft(self.t, data)
display.addRasterPoints(bin_centers, points)
display.addRasterPoints(bin_centers, points*2)
display.setXlimits((self.t[0], self.t[-1]))
QApplication.processEvents()
display.clearRaster()
time.sleep(PAUSE)
display.close()
def test_multiple_recordings_display(self):
display = ProtocolDisplay('chan0')
display.show()
display.addResponsePlot('chan1', 'chan2')
display.setNreps(5)
data = self.data_func(3)
display.updateSpiketrace(self.t, data, 'chan0')
display.updateSpiketrace(self.t, data, 'chan1')
display.updateSpiketrace(self.t, data, 'chan2')
# check range matching
lims = [0.11, 0.66]
display.responsePlots['chan1'].setXlim(lims)
# print 'lims', lims, display.responsePlots['chan0
|
'].viewRange()[0], display.responsePlots['chan2'].viewRange()[0], display.specPlot.viewRange()[0]
assert lims == display.responsePlots['chan0'].viewRange()[0] \
== display.responsePlots['chan2'].viewRange()[0] \
== display.specPlot.viewRange()[0]
def test_add_remove_plots(self):
display = ProtocolDisplay('chan0')
display.show()
display.addRes
|
ponsePlot('chan1', 'chan2')
assert display.responsePlotCount() == 3
display.removeResponsePlot('chan1', 'chan2')
assert display.responsePlotCount() == 1
display.addResponsePlot('chan1')
assert display.responsePlotCount() == 2
display.removeResponsePlot('chan0', 'chan1')
assert display.responsePlotCount() == 0
def test_remove_non_existant_plot(self):
display = ProtocolDisplay('chan0')
display.show()
display.removeResponsePlot('chan1')
assert display.responseNameList() == ['chan0']
|
Ninad998/FinalYearProject
|
django_app/migrations/0001_initial.py
|
Python
|
mit
| 1,051
| 0.003806
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-21 07:28
from __future__ import unicode_literals
from django.conf import settings
from django.db import m
|
igrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
|
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
Timdawson264/acd_cli
|
acdcli/cache/__init__.py
|
Python
|
gpl-2.0
| 43
| 0.023256
|
__al
|
l__ =
|
('db', 'format', 'query', 'sync')
|
Elhodred/python-digitaloceanmanager
|
doma.py
|
Python
|
lgpl-3.0
| 4,258
| 0.005402
|
#!/usr/bin/env python
import argparse
import ConfigParser
import os.path
import digitaloceanmanager
import getpass
from passlib.hash import sha512_crypt
token = None
user = None
passwd = None
ssh_port = None
if __name__ == "__main__":
config = ConfigParser.RawConfigParser()
if (os.path.isfile('doma.cfg')):
config.read('doma.cfg')
token = config.get('General', 'token')
user = config.get('General', 'user')
passwd = config.get('General', 'password')
ssh_port = config.get('General', 'ssh_port')
else :
token = raw_input('Please insert your Digital Ocean API Token: ')
user = raw_input('Please insert the user to create in the droplets: ')
passwd = sha512_crypt.encrypt(getpass.getpass('Please insert the password for the user: '))
ssh_port = raw_input('Please insert port where you want sshd to run: ')
config.add_section('General')
config.set('General', 'token', token)
config.set('General', 'user', user)
config.set('General', 'password', passwd)
config.set('General', 'ssh_port', ssh_port)
with open('doma.cfg','wb') as configfile:
config.write(configfile)
manager = digitaloceanmanager.DigitalOceanManager(token=token, user=user, passwd=passwd, ssh_port=ssh_port)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='commands', description='list of available actions', help='commands help')
# Create a Droplet command
parser_droplet = subparsers.add_parser('droplet', help='manage droplets')
droplet_subparsers = parser_droplet.add_subparsers(title='droplet commands', description='list of available actions',
help='droplet commands help')
parser_create = droplet_subparsers.add_parser('create', help='create a droplet')
parser_create.add_argument('--name', required=True, help='droplet name')
parser_create.add_argument('--region', required=True, help='region where to create droplet')
parser_create.add_argument('--image', required=True, help='image to use for the droplet')
parser_create.add_argument('--size', required=True, help='size of the droplet, tied to cost')
parser_create.add_argument('--ssh_key', help='id of the ssh key to add to droplet')
parser_create.add_argument('--backups', default=False, help='activate backups')
parser_create.add_argument('--user_data', help='fills droplet metadata with file content')
parser_create.set_defaults(func=manager.create_droplet)
# Destroy a Droplet command
parser_destroy = droplet_subparsers.add_parser('destroy', help='destroy a droplet')
parser_destroy.add_argument('id', help='id of the droplet to destroy')
parser_destroy.set_defaults(func=manager.destroy_droplet)
# List all Droplets command
parser_list = droplet_subparsers.add_parser('list', help='list all droplets')
parser_list.set_defaults(func=manager.list_droplets)
# Reboot a Droplet
parser_reboot = droplet_subparsers.add_parser('reboot', help='reboot a droplet')
parser_reboot.add_argument('id', help='id of the droplet to reboot')
parser_reboot.set_defaults(func=manager.reboot_droplet)
#Start a Droplet
parser_start = droplet_subparsers.add_parser('start', help='start a droplet')
parser_start.add_argument('id', help='id of the droplet to start')
parser_start.set_defaults(func=manager.power_on_droplet)
# Shutdown a Droplet
parser_shutdown = droplet_sub
|
parsers.add_parse
|
r('shutdown', help='shutdown a droplet')
parser_shutdown.add_argument('id', help='id of the droplet to shutdown')
parser_shutdown.set_defaults(func=manager.shutdown_droplet)
# List available images
parser_images = subparsers.add_parser('images', help='list all images')
parser_images.set_defaults(func=manager.list_images)
# List available Regions
parser_regions = subparsers.add_parser('regions', help='list all regions available')
parser_regions.set_defaults(func=manager.list_regions)
# List SSH keys
parser_ssh_keys = subparsers.add_parser('ssh_keys', help='list all ssh keys')
parser_ssh_keys.set_defaults(func=manager.list_ssh_keys)
args = parser.parse_args()
args.func(args)
|
jeongyoonlee/Kaggler
|
tests/conftest.py
|
Python
|
mit
| 1,121
| 0.000892
|
import numpy as np
import pandas as pd
import pytest
from .const import RANDOM_SEED, TARGET_COL
N_CATEGORY = 50
N_OBS = 10000
N_CAT_FEATURE = 10
N_NUM_FEATURE = 5
@pytest.fixture(scope="module")
def generate_data():
generated = False
def _generate_data():
if not generated:
assert N_CAT_FEATURE > 1
assert N_NUM_FEATURE > 3
np.random.seed(RANDOM_SEED)
X_num = np.random.normal(size=(N_OBS, N_NUM_FEATURE))
X_cat = np.random.randint(0, N_CATEGORY, size=(N_OBS, N_CAT_FEATURE))
df = pd.DataFrame(
np.hstack((X_num, X_cat)),
columns=["num_{}".format(x) for x in range(N_NUM_FEATURE)]
+ ["cat_{}".format(x) for x in range(N_CAT_FEATURE)],
)
df[TARGET_COL] = (
1
+ X_num[:, 0] * X_
|
num[:, 1]
- np.log1p(np.exp(X_num[:, 1] + X_num[:, 2]))
+ 10
|
* (X_cat[:, 0] == 0).astype(int)
+ np.random.normal(scale=0.01, size=N_OBS)
)
return df
yield _generate_data
|
BackupTheBerlios/tops
|
totalopenstation/output/tops_sql.py
|
Python
|
gpl-3.0
| 2,442
| 0.00041
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# filename: tops_sql.py
# Copyright 2008-2010 Stefano Costa <steko@iosa.it>
#
# This file is part of Total Open Station.
#
# Total Open Station is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Total Open Station is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Total Open Station. If not, see
# <http://www.gnu.org/licenses/>.
def to_sql(point, tablename):
'''Generate SQL line corresponding to th
|
e input point.
At this moment the column names are fixed, but they could change in the
future. The default names are reasonable.'''
params = {
'wkt': to_wkt(point),
'tablename': tablename,
'pid': point[0],
|
'text': point[4]}
sql_string = "INSERT INTO %(tablename)s" % params
sql_string += "(point_id, point_geom, point_text) VALUES"
sql_string += "(%(pid)s,GeomFromText('%(wkt)s'),'%(text)s');\n" % params
return sql_string
def to_wkt(point):
pid, x, y, z, text = point
wkt_representation = 'POINT(%s %s)' % (x, y)
return wkt_representation
class OutputFormat:
"""
Exports points data in SQL format suitable for use with PostGIS & friends.
http://postgis.refractions.net/documentation/manual-1.3/ch04.html#id2986280
has an example of loading an SQL file into a PostgreSQL database.
``data`` should be an iterable (e.g. list) containing one iterable (e.g.
tuple) for each point. The default order is PID, x, x, z, TEXT.
This is consistent with our current standard.
"""
def __init__(self, data, tablename='topsdata'):
self.data = data
self.tablename = tablename
def process(self):
lines = [to_sql(e, self.tablename) for e in self.data]
lines.insert(0, 'BEGIN;\n')
lines.append('COMMIT;\n')
output = "".join(lines)
return output
if __name__ == "__main__":
TotalOpenSQL(
[(1, 2, 3, 4, 'qwerty'),
("2.3", 42, 45, 12, 'asdfg')],
'prova')
|
wizardofozzie/simpybtc
|
btc/mnemonic.py
|
Python
|
mit
| 7,793
| 0.011549
|
#!/usr/bin/python
from btc.main import *
#from btc.pyspecials import *
# get wordlists
def open_wordlist(wordlist):
try:
if wordlist in ('Electrum1', 'electrum1', 'electrum'):
from btc._electrum1wordlist import ELECTRUM1_WORDLIST
assert len(ELECTRUM1_WORDLIST) == 1626
return ELECTRUM1_WORDLIST
elif wordlist in ('bip39', 'BIP39'):
from btc._bip39wordlist import BIP0039_WORDLIST
assert len(BIP0039_WORDLIST) == 2048
return BIP0039_WORDLIST
except: return None
def download_wordlist(wordlist):
try:
from btc.bci import make_request
if wordlist in ('Electrum1', 'electrum1', 'electrum'):
ELECTRUM1WORDS = make_request("https://gist.githubusercontent.com/anonymous" \
+ "/f58f57780245db3cafc4/raw/1b5a9e81c0a356373e9e13aa720baef89d8fa856" \
+ "/electrum1_english_words").decode('utf-8').strip().split()
assert len(ELECTRUM1WORDS) == 1626
return ELECTRUM1WORDS
elif wordlist in ('bip39', 'BIP39'):
BIP0039_WORDLIST = make_request("https://raw.githubusercontent.com" \
+ "/btc/bips/master/bip-0039/english.txt"
).decode('utf-8').strip().split('\n')
assert len(BIP0039_WORDLIST) == 2048
return BIP0039_WORDLIST
except: return None
def bip39_hex_to_mnemonic(hexvalue):
"""
Convert hex seed to BIP39 mnemonic
https://github.com/btc/bips/blob/master/bip-0039.mediawiki#generating-the-mnemonic
Essentially converts hex value to binary (with appended checksum),
and splits into 11-bit binary chun
|
ks, each indexing a 2048 (=2**11)
word list (in BIP39WORDS)
hexseed: hexadecimal bytes or bytearray object
>>> bip
|
39_hex_to_mnemonic('eaebabb2383351fd31d703840b32e9e2')
'turtle front uncle idea crush write shrug there lottery flower risk shell'
"""
try: BIP39WORDS = open_wordlist('bip39')
except: BIP39WORDS = download_wordlist('bip39')
if isinstance(hexvalue, string_or_bytes_types) and re.match('^[0-9a-fA-F]*$', from_bytestring_to_string(hexvalue)):
hexvalue = from_string_to_bytes(hexvalue)
else:
raise TypeError("Enter a hex seed!")
if len(hexvalue) % 4 != 0:
raise Exception("Value not a multiple of 4 bytes!")
elif len(hexvalue) not in range(4, 125, 4):
raise Exception("32 < entropy < 992 bits only!")
hexvalue = safe_unhexlify(hexvalue)
cs = hashlib.sha256(hexvalue).hexdigest() # sha256 hexdigest
bstr = (changebase( safe_hexlify(hexvalue), 16, 2, len(hexvalue)*8) +
changebase( cs, 16, 2, 256)[ : len(hexvalue) * 8 // 32])
return " ".join( [BIP39WORDS[int(x, 2)] for x in
[bstr[i:i+11] for i in range(0, len(bstr), 11)] ] )
def bip39_mnemonic_to_hex(mnemonic, saltpass=None):
"""
Convert BIP39 mnemonic to hex seed
https://github.com/btc/bips/blob/master/bip-0039.mediawiki#generating-the-mnemonic
mnemonic: single spaced, lower-case words
>>>bip39_mnemonic_to_hex("board flee heavy tunnel powder denial science ski answer betray cargo cat")
'18ab19a9f54a9274f03e5209a2ac8a91'
"""
if isinstance(mnemonic, string_or_bytes_types):
try:
mn_string = st(mnemonic)
mn_array = mnemonic.lower().strip().split(" ")
except:
raise TypeError("Bad mnemonic input. Enter lower case, string of words")
elif isinstance(mnemonic, list):
mn_array = map(st, mnemonic)
else: raise TypeError("Enter a lower case, single-spaced mnemonic!!")
if len(mn_array) not in range(3, 124, 3):
raise TypeError("32 < entropy < 992 bits; Input too big or too small")
if len(mn_array) % 3 != 0:
raise TypeError("Too many or too few words")
#assert all(map(lambda x: x in BIP39WORDS, mnemonic_array)) # check all words are in list
mnem = ' '.join(mn_array)
try:
assert bip39_check_mnemonic(mnem)
seed = pbkdf2(mnem, 'mnemonic'+saltpass)
return safe_hexlify(seed)
except:
raise IOError("Mnemonic checksum is bad!")
def bip39_check_mnemonic(mnemonic):
"""
Assert mnemonic is BIP39 standard
"""
try: BIP39WORDS = open_wordlist('bip39')
except: BIP39WORDS = download_wordlist('bip39')
if isinstance(mnemonic, string_types):
try:
mn_array = from_string_to_bytes(mnemonic).lower().strip().split(" ")
except:
raise TypeError("Enter a lower case, single-spaced mnemonic!")
else: raise TypeError("Enter a lower case, single-spaced mnemonic!!")
if len(mn_array) not in range(3, 124, 3):
raise TypeError("32 < entropy < 992 bits; Input too big or too small")
# if len(mn_array) % 3 != 0:
# raise TypeError("Too many or too few words")
assert all(map(lambda x: x in BIP39WORDS, mn_array)) # check all words are in list
try: binstr = ''.join([ changebase(str(BIP39WORDS.index(x)), 10, 2, 11) for x in mn_array])
except: raise IOError("Are the words in the right order?")
L = len(binstr)
bd = binstr[:L // 33 * 32]
cs = binstr[-L // 33:]
hexd = safe_unhexlify(changebase(bd, 2, 16, L // 33 * 8))
hexd_cs = changebase(hashlib.sha256(hexd).hexdigest(), 16, 2, 256)[:L // 33]
return cs == hexd_cs
def bip39_generate(bits=128):
"""Generates a tuple of (hex seed, mnemonic)"""
if bits % 32 != 0:
raise Exception('Should be divisible by 32, but is .. %d' % bits)
seed = safe_hexlify(by(random_string(bits // 8)))
return (seed, bip39_hex_to_mnemonic(seed))
def random_bip39_seed(bits=128):
return bip39_generate(bits=bits)[0]
def random_bip39_mnemonic(bits=128):
return bip39_generate(bits=bits)[1]
def electrum1_mnemonic_decode(mnemonic):
"""Decodes Electrum 1.x mnemonic phrase to hex seed"""
try: ELECTRUM1WORDS = open_wordlist('electrum1')
except: ELECTRUM1WORDS = download_wordlist('electrum1')
if isinstance(mnemonic, string_or_bytes_types):
try: mn_array = from_string_to_bytes(mnemonic).lower().strip().split(" ")
except: raise TypeError("Enter the Electrum 1.x mnemonic as a string")
elif isinstance(mnemonic, list):
mn_array = mnemonic[:]
else: raise TypeError("Bad input type")
wlist, words, n = mn_array, ELECTRUM1WORDS, len(ELECTRUM1WORDS)
# https://github.com/spesmilo/electrum/blob/1b6abf6e028cbabd5e125784cff6d4ada665e722/lib/old_mnemonic.py#L1672
output = ''
for i in range(len(wlist)/3):
word1, word2, word3 = wlist[3*i:3*i+3]
w1 = words.index(word1)
w2 = (words.index(word2))%n
w3 = (words.index(word3))%n
x = w1 +n*((w2-w1)%n) +n*n*((w3-w2)%n)
output += '%08x'%x
return output
def electrum1_mnemonic_encode(hexvalue):
"""Encodes a hex seed as Electrum 1.x mnemonic phrase"""
try: ELECTRUM1WORDS = open_wordlist('electrum1')
except: ELECTRUM1WORDS = download_wordlist('electrum1')
if isinstance(hexvalue, string_or_bytes_types) and re.match('^[0-9a-fA-F]*$', from_bytes_to_string(hexvalue)):
hexvalue = from_string_to_bytes(hexvalue)
else: raise TypeError("Enter a hex value!")
message, words, n = hexvalue, ELECTRUM1WORDS, len(ELECTRUM1WORDS)
# https://github.com/spesmilo/electrum/blob/1b6abf6e028cbabd5e125784cff6d4ada665e722/lib/old_mnemonic.py#L1660
assert len(message) % 8 == 0
out = []
for i in range(len(message)/8):
word = message[8*i:8*i+8]
x = int(word, 16)
w1 = (x%n)
w2 = ((x/n) + w1)%n
w3 = ((x/n/n) + w2)%n
out += [ words[w1], words[w2], words[w3] ]
return out
|
savoirfairelinux/num2words
|
tests/test_ko.py
|
Python
|
lgpl-2.1
| 4,547
| 0
|
# -*- coding: utf-8 -*-
# Copyright (c) 2003, Taro Ogawa. All Rights Reserved.
# Copyright (c) 2013, Savoir-faire Linux inc. All Rights Reserved.
# This library is free software; you can redistribute it and/or
# modify it under the terms
|
of the GNU Lesser General Public
# Lice
|
nse as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
from __future__ import division, print_function, unicode_literals
from unittest import TestCase
from num2words import num2words
def n2k(*args, **kwargs):
return num2words(*args, lang='ko', **kwargs)
class Num2WordsKOTest(TestCase):
def test_low(self):
cases = [(0, "영"), (1, "일"), (2, "이"), (3, "삼"), (4, "사"), (5, "오"),
(6, "육"), (7, "칠"), (8, "팔"), (9, "구"), (10, "십"),
(11, "십일"), (12, "십이"), (13, "십삼"), (14, "십사"),
(15, "십오"), (16, "십육"), (17, "십칠"),
(18, "십팔"), (19, "십구"), (20, "이십"), (25, "이십오"),
(31, "삼십일"), (42, "사십이"), (54, "오십사"), (63, "육십삼"),
(76, "칠십육"), (89, "팔십구"), (98, "구십팔")]
for num, out in cases:
self.assertEqual(n2k(num), out)
def test_mid(self):
cases = [(100, "백"), (121, "백이십일"), (160, "백육십"), (256, "이백오십육"),
(285, "이백팔십오"), (486, "사백팔십육"), (627, "육백이십칠"),
(808, "팔백팔"), (999, "구백구십구"), (1004, "천사"),
(2018, "이천십팔"), (7063, "칠천육십삼")]
for num, out in cases:
self.assertEqual(n2k(num), out)
def test_high(self):
cases = [(10000, "만"), (11020, "만 천이십"), (25891, "이만 오천팔백구십일"),
(64237, "육만 사천이백삼십칠"), (241572, "이십사만 천오백칠십이"),
(100000000, "일억"), (5000500000000, "오조 오억")]
for num, out in cases:
self.assertEqual(n2k(num), out)
def test_negative(self):
cases = [(-11, "마이너스 십일"), (-15, "마이너스 십오"),
(-18, "마이너스 십팔"), (-241572, "마이너스 이십사만 천오백칠십이")]
for num, out in cases:
self.assertEqual(n2k(num), out)
def test_year(self):
cases = [(2000, "이천년"), (2002, "이천이년"), (2018, "이천십팔년"),
(1954, "천구백오십사년"), (1910, "천구백십년"), (-1000, "기원전 천년")]
for num, out in cases:
self.assertEqual(n2k(num, to="year"), out)
def test_currency(self):
cases_krw = [(8350, "팔천삼백오십원"), (14980, "만사천구백팔십원"),
(250004000, "이억오천만사천원")]
cases_usd = [(4, "사달러 영센트"), (19.55, "십구달러 오십오센트")]
cases_jpy = [(15, "십오엔"), (50, "오십엔")]
for num, out in cases_krw:
self.assertEqual(n2k(num, to="currency"), out)
for num, out in cases_usd:
self.assertEqual(n2k(num, to="currency", currency="USD"), out)
for num, out in cases_jpy:
self.assertEqual(n2k(num, to="currency", currency="JPY"), out)
with self.assertRaises(ValueError):
n2k(190.55, to="currency")
with self.assertRaises(NotImplementedError):
n2k(4, to="currency", currency="EUR")
def test_ordinal(self):
cases = [(1, "첫 번째"), (101, "백 한 번째"), (2, "두 번째"), (5, "다섯 번째"),
(10, "열 번째"), (25, "스물다섯 번째"), (137, "백 서른일곱 번째")]
for num, out in cases:
self.assertEqual(n2k(num, to="ordinal"), out)
def test_ordinal_num(self):
cases = [(1, "1 번째"), (101, "101 번째"), (25, "25 번째")]
for num, out in cases:
self.assertEqual(n2k(num, to="ordinal_num"), out)
|
rtts/qqq
|
qqq/collections/urls.py
|
Python
|
gpl-3.0
| 730
| 0.005479
|
from django.conf.urls.defaults import *
from django.utils.translation import ugettext as _
urlpatterns = patterns('qqq.collections.views',
(r'^%s/$' % _('collecti
|
ons'), 'collections'),
(r'^%s/(\d+)/$' % _('collection'), 'saved_collection'),
(r'^%s/(\d+)/([^/]+)/$' % _('collection'), 'saved_collection'),
(r'^%s/$' % _('vote-for-collection'), 'vote_for_collection'),
(r'^%s/$' % _('add-collection'), 'add_collection'),
(r'^%s/$' % _('collection'), 'collection'),
(r'^%s/%s/$' % (_('collection'), _('add-questions')) , 'add_to_saved_collection'),
(r'^%
|
s/%s\.(\w{3})$' % (_('collection'), _('download')), 'download'),
(r'^%s/(\d+)/%s\.(\w{3})$' % (_('collection'), _('download')), 'download_saved_collection'),
)
|
fugwenna/bunkbot
|
src/core/event_hook.py
|
Python
|
mit
| 537
| 0.001862
|
class EventHook(object):
"""
Basi
|
c "event system" from:
http://www.voidspace.org.uk/python/weblog/arch_d7_2007_02_03.shtml#e616
"""
def __init__(self):
self.__handlers = []
def __iadd__(self, handler):
self.__handlers.append(handler)
return self
def __isub__(self, handler):
self.__handlers.remove(handler)
return self
async def emit(self, *args, **keywargs):
for handler in self.__handlers:
|
await handler(*args, **keywargs)
|
nburn42/tensorflow
|
tensorflow/contrib/boosted_trees/examples/boston.py
|
Python
|
apache-2.0
| 6,159
| 0.006495
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Demonstrates a regression on Boston housing data.
This example demonstrates how to run experiments with TF Boosted Trees on
a regression dataset. We split all the data into 20% test and 80% train,
and are using l2 loss and l2 regularization.
Example Usage:
python tensorflow/contrib/boosted_trees/examples/boston.py \
--batch_size=404 --output_dir="/tmp/boston" --depth=4 --learning_rate=0.1 \
--num_eval_steps=1 --num_trees=500 --l2=0.001 \
--vmodule=training_ops=1
When training is done, mean squared error on eval data is reported.
Point tensorboard to the directory for the run to see how the training
progresses:
tensorboard --logdir=/tmp/boston
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
from tensorflow.contrib.boosted_trees.estimator_batch import custom_export_strategy
from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientBoostedDecisionTreeRegressor
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn import learn_runner
_BOSTON_NUM_FEATURES = 13
# Main config - creates a TF Boosted Trees Estimator based on flags.
def _get_tfbt(output_dir, feature_cols):
"""Configures TF Boosted Trees estimator based on flags."""
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = FLAGS.learning_rate
learner_config.regularization.l1 = 0.0
learner_config.regularization.l2 = FLAGS.l2
learner_config.constraints.max_tree_depth = FLAGS.depth
run_config = tf.contrib.learn.RunConfig(save_checkpoints_secs=300)
# Create a TF Boosted trees regression estimator.
estimator = GradientBoostedDecisionTreeRegressor(
learner_config=learner_config,
# This should be the number of examples. For large datasets it can be
# larger than the batch_size.
examples_per_layer=FLAGS.batch_size,
feature_columns=feature_cols,
label_dimension=1,
model_dir=output_dir,
num_trees=FLAGS.num_trees,
center_bias=False,
config=run_config)
return estimator
def _convert_fn(dtec, sorted_feature_names, num_dense, num_sparse_float,
num_sparse_int, export_dir, unused_eval_result):
universal_format = custom_export_strategy.convert_to_universal_format(
dtec, sorted_feature_names, num_dense, num_sparse_float, num_sparse_int)
with tf.gfile.GFile(os.path.join(export_dir, "tree_proto"), "w") as f:
f.write(str(universal_format))
def _make_experiment_fn(output_dir):
"""Creates experiment for gradient boosted decision trees."""
(x_train, y_train), (x_test,
y_test) = tf.keras.datasets.boston_housing.load_data()
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": x_train},
y=y_train,
batch_size=FLAGS.batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": x_test}, y=y_test, num_epochs=1, shuffle=False)
feature_columns = [
feature_column.real_valued_column("x", dimension=_BOSTON_NUM_FEATURES)
]
feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = tf.contrib.learn.utils.build_parsing_serving_input_fn(
feature_spec)
# An export strategy that outputs the feature importance and also exports
# the internal tree representation in another format.
export_strategy = custom_export_strategy.make_custom_export_strategy(
"exports",
convert_fn=_convert_fn,
feature_columns=feature_columns,
export_input_fn=serving_input_fn)
return tf.contrib.learn.Experiment(
estimator=_get_tfbt(output_dir, feature_columns),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None,
eval_steps=FLAGS.num_eval_steps,
eval_metrics=None,
export_strategies=[export_strategy])
def main(unused_argv):
learn_runner.run(
experiment_fn=_make_experiment_fn,
output_dir=FLAGS.output_dir,
schedule="train_and_evaluate")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
# Define the list of flags that users can change.
parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="The batch size for reading data.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Choose the dir for the output.")
parser.add_argument(
"--num_eval_steps",
type=int,
default=1,
help="The number of steps to run evaluation for.")
# Flags for gradient boosted trees config.
parser.add_argument(
"--depth", type=int, default=4, help="Maximum depth of weak learners.")
pars
|
er.add_argument(
"--l2", type=float, default=1.0, help="l2 regularization per batch.")
parser.add_argument(
"--learning_rate",
type=float,
default=0.1,
help="Learning rate (shrinkage weight) with which each new tree is added."
)
|
parser.add_argument(
"--num_trees",
type=int,
default=None,
required=True,
help="Number of trees to grow before stopping.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
Arcanemagus/SickRage
|
sickbeard/providers/speedcd.py
|
Python
|
gpl-3.0
| 7,229
| 0.003182
|
# coding=utf-8
# Author: Dustyn Gibson <miigotu@gmail.com>
#
# URL: https://sick-rage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
import string
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class SpeedCDProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "Speedcd")
# Credentials
self.username = None
self.password = None
# Torrent Stats
self.minseed = None
self.minleech = None
self.freeleech = False
# URLs
self.url = 'https://speed.cd'
self.urls = {
'login': urljoin(self.url, 'takeElogin.php'),
'search': urljoin(self.url, 'browse.php'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK']
# Cache
self.cache = tvcache.TVCache(self)
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
}
# Yay lets add another request to the process since they are unreasonable.
response = self.get_url(self.url, returns='text')
with BS4Parser(response, 'html5lib') as html:
form = html.find('form', id='loginform')
if form:
self.urls['login'] = urljoin(self.url, form['action'])
response = self.get_url(self.urls['login'], post_data=login_params, returns='text')
if not response:
logger.log("Unable to connect to provider", logger.WARNING)
return False
if re.search('Incorrect username or Password. Please try again.', response):
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
if not self.login():
return results
# http://speed.cd/browse.php?c49=1&c50=1&c52=1&c41=1&c55=1&c2=1&c30=1&freeleech=on&search=arrow&d=on
# Search Params
search_params = {
'c30': 1, # Anime
'c41': 1, # TV/Packs
|
'c49': 1, # TV
|
/HD
'c50': 1, # TV/Sports
'c52': 1, # TV/B-Ray
'c55': 1, # TV/Kids
'search': '',
}
# Units
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def process_column_header(td):
result = ''
img = td.find('img')
if img:
result = img.get('alt')
if not result:
result = td.get_text(strip=True)
return result
if self.freeleech:
search_params['freeleech'] = 'on'
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_params['search'] = search_string.translate(None, string.punctuation)
data = self.get_url(self.urls['search'], params=search_params, returns='text')
if not data:
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('div', class_='boxContent')
torrent_table = torrent_table.find('table') if torrent_table else []
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
labels = [process_column_header(label) for label in torrent_rows[0]('th')]
# Skip column headers
for result in torrent_rows[1:]:
try:
cells = result('td')
title = cells[labels.index('Title')].find('a', class_='torrent').get_text()
download_url = urljoin(self.url, cells[labels.index('Download') - 1].a['href'])
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index('Seeders') - 1].get_text(strip=True))
leechers = try_int(cells[labels.index('Leechers') - 1].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = cells[labels.index('Size') - 1].get_text()
torrent_size = torrent_size[:-2] + ' ' + torrent_size[-2:]
size = convert_size(torrent_size, units=units) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
provider = SpeedCDProvider()
|
tadgh/ArgoRevisit
|
third_party/nltk/sourcedstring.py
|
Python
|
apache-2.0
| 54,572
| 0.00317
|
# Nat
|
ural Language Toolkit: Sourced Strings
#
# Copyrigh
|
t (C) 2001-2009 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
X{Sourced strings} are strings that are annotated with information
about the location in a document where they were originally found.
Sourced strings are subclassed from Python strings. As a result, they
can usually be used anywhere a normal Python string can be used.
>>> newt_contents = '''\
... She turned me into a newt!
... I got better.'''
>>> newt_doc = SourcedString(newt_contents, 'newt.txt')
>>> print repr(newt_doc)
'She turned me into a newt!\nI got better.'@[0:40]
>>> newt = newt_doc.split()[5] # Find the sixth word.
>>> print repr(newt)
'newt!'@[21:26]
"""
import re, sys
from nltk.internals import slice_bounds, abstract
__all__ = [
'StringSource',
'ConsecutiveCharStringSource', 'ContiguousCharStringSource',
'SourcedString', 'SourcedStringStream', 'SourcedStringRegexp',
'SimpleSourcedString', 'CompoundSourcedString',
'SimpleSourcedByteString', 'SimpleSourcedUnicodeString',
'CompoundSourcedByteString', 'CompoundSourcedUnicodeString',
]
#//////////////////////////////////////////////////////////////////////
# String Sources
#//////////////////////////////////////////////////////////////////////
class StringSource(object):
"""
A description of the location of a string in a document. Each
C{StringSource} consists of a document identifier, along with
information about the begin and end offsets of each character in
the string. These offsets are typically either byte offsets or
character offsets. (Note that for unicode strings, byte offsets
and character offsets are not the same thing.)
C{StringSource} is an abstract base class. Two concrete
subclasses are used depending on the properties of the string
whose source is being described:
- L{ConsecutiveCharStringSource} describes the source of strings
whose characters have consecutive offsets (in particular, byte
strings w/ byte offsets; and unicode strings with character
offsets).
- L{ContiguousCharStringSource} describes the source of strings
whose characters are contiguous, but do not necessarily have
consecutive offsets (in particular, unicode strings with byte
offsets).
@ivar docid: An identifier (such as a filename) that specifies
which document contains the string.
@ivar offsets: A list of offsets specifying the location of each
character in the document. The C{i}th character of the string
begins at offset C{offsets[i]} and ends at offset
C{offsets[i+1]}. The length of the C{offsets} list is one
greater than the list of the string described by this
C{StringSource}.
@ivar begin: The document offset where the string begins. (I.e.,
the offset of the first character in the string.)
C{source.begin} is always equal to C{source.offsets[0]}.
@ivar end: The document offset where the string ends. (For
character offsets, one plus the offset of the last character;
for byte offsets, one plus the offset of the last byte that
encodes the last character). C{source.end} is always equal
to C{source.offsets[-1]}.
"""
def __new__(cls, docid, *args, **kwargs):
# If the StringSource constructor is called directly, then
# choose one of its subclasses to delegate to.
if cls is StringSource:
if args:
raise TypeError("Specifcy either begin and end, or "
"offsets, using keyword arguments")
if 'begin' in kwargs and 'end' in kwargs and 'offsets' not in kwargs:
cls = ConsecutiveCharStringSource
elif ('begin' not in kwargs and 'end' not in kwargs and
'offsets' in kwargs):
cls = ContiguousCharStringSource
else:
raise TypeError("Specify either begin and end, or offsets "
"(but not both)")
# Construct the object.
return object.__new__(cls)
def __init__(self, docid, **kwargs):
"""
Create a new C{StringSource}. When the C{StringSource}
constructor is called directly, it automatically delegates to
one of its two subclasses:
- If C{begin} and C{end} are specified, then a
L{ConsecutiveCharStringSource} is returned.
- If C{offsets} is specified, then a
L{ContiguousCharStringSource} is returned.
In both cases, the arguments must be specified as keyword
arguments (not positional arguments).
"""
def __getitem__(self, index):
"""
Return a L{StringSource} describing the location where the
specified character was found. In particular, if C{s} is the
string that this source describes, then return a
L{StringSource} describing the location of C{s[index]}.
@raise IndexError: If index is out of range.
"""
if isinstance(index, slice):
start, stop = slice_bounds(self, index)
return self.__getslice__(start, stop)
else:
if index < 0: index += len(self)
if index < 0 or index >= len(self):
raise IndexError('StringSource index out of range')
return self.__getslice__(index, index+1)
@abstract
def __getslice__(self, start, stop):
"""
Return a L{StringSource} describing the location where the
specified substring was found. In particular, if C{s} is the
string that this source describes, then return a
L{StringSource} describing the location of C{s[start:stop]}.
"""
@abstract
def __len__(self):
"""
Return the length of the string described by this
C{StringSource}. Note that this may not be equal to
C{self.end-self.begin} for unicode strings described using
byte offsets.
"""
def __str__(self):
if self.end == self.begin+1:
return '@%s[%s]' % (self.docid, self.begin,)
else:
return '@%s[%s:%s]' % (self.docid, self.begin, self.end)
def __cmp__(self, other):
return (cmp(self.docid, self.docid) or
cmp([(charloc.begin, charloc.end) for charloc in self],
[(charloc.begin, charloc.end) for charloc in other]))
def __hash__(self):
# Cache hash values.
if not hasattr(self, '_hash'):
self._hash = hash( (self.docid,
tuple((charloc.begin, charloc.end)
for charloc in self)) )
return self._hash
class ConsecutiveCharStringSource(StringSource):
"""
A L{StringSource} that specifies the source of strings whose
characters have consecutive offsets. In particular, the following
two properties must hold for all valid indices:
- source[i].end == source[i].begin + 1
- source[i].end == source[i+1].begin
These properties allow the source to be stored using just a start
offset and an end offset (along with a docid).
This C{StringSource} can be used to describe byte strings that are
indexed using byte offsets or character offsets; or unicode
strings that are indexed using character offsets.
"""
def __init__(self, docid, begin, end):
if not isinstance(begin, (int, long)):
raise TypeError("begin attribute expected an integer")
if not isinstance(end, (int, long)):
raise TypeError("end attribute expected an integer")
if not end >= begin:
raise ValueError("begin must be less than or equal to end")
self.docid = docid
self.begin = begin
self.end = end
@property
def offsets(self):
return tuple(range(self.begin, self.end+1))
def __len__(self):
|
hongzhouye/frankenstein
|
sgscf/sgopt.py
|
Python
|
bsd-3-clause
| 6,925
| 0.002455
|
"""Gradient descent
"""
import numpy as np
from frankenstein.tools.perf_utils import TIMER
from pyscf.lib import logger
""" Helper functions
"""
def get_gHp_fd(get_grad, p, order=1, eps=1.E-4):
""" Compute gradient-Hessian product using finite difference
Inps:
get_grad (callable):
grad(p) --> gradient given a direction p
p (np.ndarray):
initial gradient
order (int, default=1):
order 1 --> foward FD (err ~ O(eps))
order 2 --> central FD (err ~ O(eps^2))
eps (float, default=1.E-4):
strength of perturbation
"""
p_f = get_grad(eps*p)
if order == 1:
return 2. * (p_f-p) / eps
elif order == 2:
p_b = get_grad(-eps*p)
return (p_f-p_b) / eps
else:
raise ValueError("Invalid order (must be 1 or 2)!")
# Newton-raphson (for debug)
class NR:
def __init__(self, mf, eps=1.E-3, fd=2):
self.verbose = mf.verbose
self.stdout = mf.stdout
self.comment = ""
self.eps = eps
self.fd = fd
try:
stdout = mf.stdout
except:
stdout = None
self.timer = TIMER(4, stdout=stdout)
self.iteration = 0
def next_step(self, mf):
f = mf.get_value_gdm()
g = mf.get_grad_gdm()
# build fd hessian
def dphi(i, eps):
|
mf.back_to_origin()
mf.ov = np.zeros([mf.ov_size])
|
mf.ov[i] = eps
mf.update_all()
mf.ov[i] = 0.
return mf.get_grad_gdm()
self.timer.start(0)
mf.save_new_origin()
H = np.zeros([mf.ov_size]*2)
for i in range(mf.ov_size):
if self.fd == 1:
H[i] = (dphi(i,self.eps) - g) / self.eps
elif self.fd == 2:
H[i] = (dphi(i,self.eps) - dphi(i,-self.eps)) / (2.*self.eps)
else:
raise ValueError("fd must be 1 or 2.")
mf.back_to_origin()
self.timer.stop(0)
# get raw NR step
self.timer.start(1)
lbd = 1.E-5
du = -np.linalg.solve(H+lbd*np.eye(H.shape[1]), g)
self.timer.stop(1)
# line search
fc = [0]
def phi(alp):
fc[0] += 1
mf.back_to_origin()
mf.ov = alp * mf.regularize_step_gdm(du)
mf.update_all(skip_grad=True)
return mf.get_value_gdm()
self.timer.start(2)
mf.save_new_origin()
fold = f
dphi0 = g @ du
alp, fnew = scopt_linsrc.scalar_search_armijo(
phi, fold, dphi0, c1=1.E-4, alpha0=1.)
self.timer.stop(2)
fc = fc[0]
if alp is None:
raise RuntimeError("Line search failed.")
if fc == 1:
self.comment = "NR"
else:
self.comment = "LnSr (%d,%.2f)"%(fc,alp)
self.timer.start(3)
mf.update_gdm()
self.timer.stop(3)
self.iteration += 1
def report_timing(self):
self.timer.report(tnames=["hess", "linsolve", "linsrch", "grad"])
# Direct minimization (for debug)
class DM:
def __init__(self, mf, bounds=[-1,0], method="bf", plot=False):
if method == "bf":
self.alps = np.arange(*bounds, 0.05)
elif method == "interpolate":
self.amin = min(bounds)
self.amax = max(bounds)
self.ninter = 5
self.neval = 100
else:
raise ValueError("Unknown method '%s'." % method)
self.method = method
self.plot = plot
self.verbose = mf.verbose
self.stdout = mf.stdout
self.comment = ""
try:
stdout = mf.stdout
except:
stdout = None
self.timer = TIMER(2, stdout=stdout)
self.iteration = 0
def next_step(self, mf):
from scipy import interpolate as itplt
from matplotlib import pyplot as plt
g = mf.get_grad_gdm()
def phi(alp):
mf.back_to_origin()
mf.ov = alp * g
mf.update_all(skip_grad=True)
mf.ov = np.zeros(mf.ov_size)
return mf.get_value_gdm()
mf.save_new_origin()
E0 = mf.get_value_gdm()
self.timer.start(0)
if self.method == "bf":
alps = self.alps
Es = np.asarray([phi(alp) for alp in alps]) - E0
elif self.method == "interpolate":
amin = self.amin
amax = self.amax
err_g = np.mean(g**2)**0.5
if err_g > 1.E-3:
xs = np.linspace(amin, amax, self.ninter)
ys = np.asarray([phi(x) for x in xs])
xyrep = itplt.splrep(xs, ys)
fp = lambda x: itplt.splev(x, xyrep)
else:
xs = np.linspace(amin, amax, 3)
ys = np.asarray([phi(x) for x in xs])
p = np.polyfit(xs, ys, 2)
fp = np.poly1d(p)
alps = np.linspace(amin, amax, self.neval)
Es = fp(alps)
idmin = np.argmin(Es)
alp = alps[idmin]
E = Es[idmin]
self.timer.stop(0)
if self.plot:
plt.plot(alps, Es, "-")
if self.method == "interpolate": plt.plot(xs, ys, "o")
plt.plot(alp, E, "rx")
plt.show()
self.comment = "alp = % .2f" % alp
self.timer.start(1)
mf.back_to_origin()
mf.ov = alp * g
mf.update_all()
self.timer.stop(1)
self.iteration += 1
def report_timing(self):
self.timer.report(["lnsrch", "update me"])
# Direct inversion of iterative subspace (DIIS)
from pyscf.lib.diis import DIIS as pyDIIS
class DIIS:
def __init__(self, mf, ndiis=50, diis_start=1):
self.adiis = pyDIIS()
self.adiis.space = ndiis
self.adiis.min_space = diis_start
self.iteration = 0
self.comment = ""
try:
stdout = mf.stdout
except:
stdout = None
self.timer = TIMER(4, stdout=stdout)
def next_step(self, mf):
self.iteration += 1
self.timer.start(0)
f = mf.get_fock_diis()
ferr = mf.get_err_diis()
self.timer.stop(0)
self.timer.start(1)
f = self.adiis.update(f, ferr)
self.timer.stop(1)
self.timer.start(2)
if hasattr(mf, "mom_start"):
mom = self.iteration >= mf.mom_start
else:
mom = False
comment = mf.update_diis(f, mom=mom)
self.timer.stop(2)
self.timer.start(3)
mf.update_all()
self.timer.stop(3)
self.comment = "DIIS" if self.iteration > 0 else "Roothaan"
self.comment += " %s" % comment
def report_timing(self):
self.timer.report(tnames=["diis prep", "diis extrap", "roothaan",
"fock build"])
|
mercycorps/TolaActivity
|
tola/forms.py
|
Python
|
apache-2.0
| 3,887
| 0.004116
|
from crispy_forms.helper import FormHelper
from crispy_forms.layout import *
from crispy_forms.bootstrap import *
from crispy_forms.layout import Layout, Submit, Reset, Div
from django import forms
from django.contrib.auth.forms import UserCreationForm
from workflow.models import TolaUser
from django.contrib.auth.models import User
from django.utils.translation import (
ugettext_lazy as _,
activate as set_language
)
class ProfileUpdateForm(forms.ModelForm):
"""
Form for registering a new account.
"""
def __init__(self, *args, **kwargs):
user = kwargs.pop('user')
# moving helper button description to init so translations will re-init on reload:
self.helper.layout = Layout(
Field( 'language' ),
Div(
FormActions(
Submit('submit', _('Save changes'), css_class=''),
Reset('reset', _('Cancel'), css_class='')
),
),
)
super(ProfileUpdateForm, self).__init__(*args, **kwargs)
self.fields['language'].label = _('Language')
class Meta:
model = TolaUser
fields = [ 'language', ]
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'hide-askerisks'
helper.label_class = ''
helper.field_class = ''
helper.form_error_title = _('Form Errors')
helper.error_text_inline = True
helper.help_text_inline = True
helper.html5_required = True
def save(self, *args, **kwargs):
model = super(ProfileUpdateForm, self).save(*args, **kwargs)
# explicitly update the language on form save so success messages are in the correct lang:
set_language(model.language)
return model
class NewUserRegistrationForm(UserCreationForm):
"""
Form for registering a new account.
"""
class Meta:
model = User
fields = ['first_name',
|
'last_name','email','username']
def __init__(self, *args, **kwargs):
super(NewUserRegistrationForm, self).__init__(*args, **kwargs)
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'form-ho
|
rizontal'
helper.label_class = 'col-sm-2'
helper.field_class = 'col-sm-6'
helper.form_error_title = 'Form Errors'
helper.error_text_inline = True
helper.help_text_inline = True
helper.html5_required = True
helper.form_tag = False
class NewTolaUserRegistrationForm(forms.ModelForm):
"""
Form for registering a new account.
"""
class Meta:
model = TolaUser
fields = ['title', 'country', 'privacy_disclaimer_accepted']
def __init__(self, *args, **kwargs):
super(NewTolaUserRegistrationForm, self).__init__(*args, **kwargs)
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'form-horizontal'
helper.label_class = 'col-sm-2'
helper.field_class = 'col-sm-6'
helper.form_error_title = 'Form Errors'
helper.error_text_inline = True
helper.help_text_inline = True
helper.html5_required = True
helper.form_tag = False
helper.layout = Layout(
Fieldset('Information','title', 'country'),
Fieldset('Privacy Statement','privacy_disclaimer_accepted',),
)
class NonLocalizedDecimalField(forms.DecimalField):
"""Decimal Field which accepts "," as floating-point separator regardless of locale
Tola users are from a variety of cultures, and use of English does not guarantee preference for "." as
floating-point separator, as such we accept "," for floating-point separator for any language/locale.
This input displays values according to locale settings ("," for ES/FR, "." for EN)
"""
def to_python(self, value):
if ',' in str(value):
value = str(value).replace(',', '.')
return super().to_python(value)
|
joergdietrich/astropy
|
astropy/vo/validator/tests/test_validate.py
|
Python
|
bsd-3-clause
| 3,067
| 0.000652
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for `astropy.vo.validator.validate`.
.. note::
This test will fail if external URL query status
changes. This is beyond the control of AstroPy.
When this happens, rerun or update the test.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# STDLIB
import os
import shutil
import tempfile
import warnings
# LOCAL
from .. import conf
from .. import validate
from ...client.vos_catalog import VOSDatabase
from ....tests.helper import pytest, remote_data
from ....utils.data import get_pkg_data_filename
from ....utils.exceptions import AstropyDeprecationWarning
from ....utils import data
__doctest_skip__ = ['*']
def setup_module():
"""Ignore all deprecation warnings here."""
warnings.simplefilter('ignore', AstropyDeprecationWarning)
def teardown_module():
warnings.resetwarnings()
@remote_data
class TestConeSearchValidation(object):
"""Validation on a small subset of Cone Search sites."""
def setup_class(self):
self.datadir = 'data'
self.out_dir = tempfile.mkdtemp()
self.filenames = {
'good': 'conesearch_good.json',
'warn': 'conesearch_warn.json',
'excp': 'conesearch_exception.json',
'nerr': 'conesearch_error.json'}
conf.conesearch_master_list = get_pkg_data_filename(os.path.join(
self.datadir, 'vao_conesearch_sites_121107_subset.xml'))
data.conf.remote_timeout = 30
@staticmethod
def _compare_catnames(fname1, fname2):
db1 = VOSDatabase.from_json(fname1)
db2 = VOS
|
Database.from_json(fname2)
assert db1.list_catalogs() == db2.list_catalogs()
@pytest.mark.parametrize(('parallel'), [True, False])
def test_validation(self, parallel):
if os.path.exists(self.out_dir):
shutil.rmtree(self.out_dir)
validate.check_c
|
onesearch_sites(
destdir=self.out_dir, parallel=parallel, url_list=None)
for val in self.filenames.values():
self._compare_catnames(get_pkg_data_filename(
os.path.join(self.datadir, val)),
os.path.join(self.out_dir, val))
@pytest.mark.parametrize(('parallel'), [True, False])
def test_url_list(self, parallel):
local_outdir = os.path.join(self.out_dir, 'subtmp1')
local_list = [
'http://www.google.com/foo&',
'http://vizier.u-strasbg.fr/viz-bin/votable/-A?-out.all&-source=I/252/out&']
validate.check_conesearch_sites(destdir=local_outdir,
parallel=parallel,
url_list=local_list)
self._compare_catnames(get_pkg_data_filename(
os.path.join(self.datadir, self.filenames['good'])),
os.path.join(local_outdir, self.filenames['good']))
def teardown_class(self):
conf.reset('conesearch_master_list')
data.conf.reset('remote_timeout')
shutil.rmtree(self.out_dir)
|
yantrabuddhi/FreeCAD
|
src/Mod/OpenSCAD/OpenSCADUtils.py
|
Python
|
lgpl-2.1
| 24,151
| 0.02385
|
#***************************************************************************
#* *
#* Copyright (c) 2012 Sebastian Hoogen <github@sebastianhoogen.de> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="FreeCAD OpenSCAD Workbench - Utility Fuctions"
__author__ = "Sebastian Hoogen"
__url__ = ["http://www.freecadweb.org"]
'''
This Script includes various pyhton helper functions that are shared across
the module
'''
def translate(context,text):
"convenience function for Qt translator"
from PySide import QtGui
return QtGui.QApplication.translate(context, text, None, \
QtGui.QApplication.UnicodeUTF8)
try:
import FreeCAD
BaseError = FreeCAD.Base.FreeCADError
except ImportError,AttributeError:
BaseError = RuntimeError
class OpenSCADError(BaseError):
def __init__(self,value):
self.value= value
#def __repr__(self):
# return self.msg
def __str__(self):
return repr(self.value)
def searchforopenscadexe():
import os,sys,subprocess
if sys.platform == 'win32':
testpaths = [os.path.join(os.environ.get('Programfiles(x86)','C:'),\
'OpenSCAD\\openscad.exe')]
if 'ProgramW6432' in os.environ:
testpaths.append(os.path.join(os.environ.get('ProgramW6432','C:')\
,'OpenSCAD\\openscad.exe'))
for testpath in testpaths:
if os.path.isfile(testpath):
return testpath
elif sys.platform == 'darwin':
ascript = ('tell application "Finder"\n'
'POSIX path of (application file id "org.openscad.OpenSCAD"'
'as alias)\n'
'end tell')
p1=subprocess.Popen(['osascript','-'],stdin=subprocess.PIPE,\
stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout,stderr = p1.communicate(ascript)
if p1.returncode == 0:
opathl=stdout.split('\n')
if len(opathl) >=1:
return opathl[0]+'Contents/MacOS/OpenSCAD'
#test the default path
testpath="/Applications/OpenSCAD.app/Contents/MacOS/OpenSCAD"
if os.path.isfile(testpath):
return testpath
else: #unix
p1=subprocess.Popen(['which','openscad'],stdout=subprocess.PIPE)
if p1.wait() == 0:
opath=p1.stdout.read().split('\n')[0]
return opath
def workaroundforissue128needed():
'''sets the import path depending on the OpenSCAD Verion
for versions <= 2012.06.23 to the current working dir
for versions above to the inputfile dir
see https://github.com/openscad/openscad/issues/128'''
vdate=getopenscadversion().split(' ')[2].split('.')
year,mon=int(vdate[0]),int(vdate[1])
return (year<2012 or (year==2012 and (mon <6 or (mon == 6 and \
(len(vdate)<3 or int(vdate[2]) <=23)))))
#ifdate=int(vdate[0])+(int(vdate[1])-1)/12.0
#if len(vdate)>2:
# fdate+=int((vdate[2])-1)/12.0/31.0
#return fdate < 2012.4759
def getopenscadversion(osfilename=None):
import os,subprocess,time
if not osfilename:
import FreeCAD
osfilename = FreeCAD.ParamGet(\
"User parameter:BaseApp/Preferences/Mod/OpenSCAD").\
GetString('openscadexecutable')
if osfilename and os.path.isfile(osfilename):
p=subprocess.Popen([osfilename,'-v'],\
stdout=subprocess.PIPE,stderr=subprocess.PIPE,universal_newlines=True)
p.wait()
stdout=p.stdout.read().strip()
stderr=p.stderr.read().strip()
return (stdout or stderr)
def newtempfilename():
import os,time
formatstr='fc-%05d-%06d-%06d'
count = 0
while True:
count+=1
yield formatstr % (os.getpid(),int(time.time()*100) % 1000000,count)
tempfilenamegen=newtempfilename()
def callopenscad(inputfilename,outputfilename=None,outputext='csg',keepname=False):
'''call the open scad binary
returns the filename of the result (or None),
please delete the file afterwards'''
import FreeCAD,os,subprocess,tempfile,time
def check_output2(*args,**kwargs):
kwargs.update({'stdout':subprocess.PIPE,'stderr':subprocess.PIPE})
p=subprocess.Popen(*args,**kwargs)
stdoutd,stderrd = p.communicate()
if p.returncode != 0:
raise OpenSCADError('%s %s\n' % (stdoutd.strip(),stderrd.strip()))
#raise Exception,'stdout %s\n stderr%s' %(stdoutd,stderrd)
if stderrd.strip():
FreeCAD.Console.PrintWarning(stderrd+u'\n')
|
if stdoutd.strip():
FreeCAD.Console.PrintMessage(stdoutd+u'\n')
|
return stdoutd
osfilename = FreeCAD.ParamGet(\
"User parameter:BaseApp/Preferences/Mod/OpenSCAD").\
GetString('openscadexecutable')
if osfilename and os.path.isfile(osfilename):
if not outputfilename:
dir1=tempfile.gettempdir()
if keepname:
outputfilename=os.path.join(dir1,'%s.%s' % (os.path.split(\
inputfilename)[1].rsplit('.',1)[0],outputext))
else:
outputfilename=os.path.join(dir1,'%s.%s' % \
(tempfilenamegen.next(),outputext))
check_output2([osfilename,'-o',outputfilename, inputfilename])
return outputfilename
else:
raise OpenSCADError('OpenSCAD executeable unavailable')
def callopenscadstring(scadstr,outputext='csg'):
'''create a tempfile and call the open scad binary
returns the filename of the result (or None),
please delete the file afterwards'''
import os,tempfile,time
dir1=tempfile.gettempdir()
inputfilename=os.path.join(dir1,'%s.scad' % tempfilenamegen.next())
inputfile = open(inputfilename,'w')
inputfile.write(scadstr)
inputfile.close()
outputfilename = callopenscad(inputfilename,outputext=outputext,\
keepname=True)
os.unlink(inputfilename)
return outputfilename
def reverseimporttypes():
'''allows to search for supported filetypes by module'''
def getsetfromdict(dict1,index):
if index in dict1:
return dict1[index]
else:
set1=set()
dict1[index]=set1
return set1
importtypes={}
import FreeCAD
for key,value in FreeCAD.getImportType().iteritems():
if type(value) is str:
getsetfromdict(importtypes,value).add(key)
else:
for vitem in value:
getsetfromdict(importtypes,vitem).add(key)
return importtypes
def fcsubmatrix(m):
"""Extracts the 3x3 Submatrix from a freecad Matrix Object
as a list of row vectors"""
return [[m.A11,m.A12,m.A13],[m.A21,m.A22,m.A23],[m.A31,m.A32,m.A33]]
def multiplymat(l,r):
"""multiply matrices given as lists of row vectors"""
rt=zip(*r)
|
kailIII/emaresa
|
trunk.pe.bk/l10n_pe_vat/__init__.py
|
Python
|
agpl-3.0
| 1,401
| 0
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequen
|
ces resulting from its eventual inadequacies and bugs
# En
|
d users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import base_vat
|
Joergen/olympia
|
sites/identitystage/settings_base.py
|
Python
|
bsd-3-clause
| 5,553
| 0.00036
|
"""private_base will be populated from puppet and placed in this directory"""
import logging
import os
import dj_database_url
from lib.s
|
ettings_base import (
CACHE_PREFIX, ES_INDEXES, KNOWN_PROXIES, LOGGING, CSP_SCRIPT_SRC,
CSP_FRAME_SRC)
from .. import splitstrip
import private_base as private
ENGAGE_ROBOTS = False
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = private.EMAIL_HOST
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DEBUG_PROPAGATE_EXCEPTIONS = False
SESSION_COOKIE_SECURE = True
REDIRECT_SECRET_KEY = private.REDIRECT_SECRET_
|
KEY
ADMINS = ()
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'mysql_pool'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DATABASES['slave'] = dj_database_url.parse(private.DATABASES_SLAVE_URL)
DATABASES['slave']['ENGINE'] = 'mysql_pool'
DATABASES['slave']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
SERVICES_DATABASE = dj_database_url.parse(private.SERVICES_DATABASE_URL)
DATABASE_POOL_ARGS = {
'max_overflow': 10,
'pool_size': 5,
'recycle': 30
}
SLAVE_DATABASES = ['slave']
CACHES = {
'default': {
'BACKEND': 'caching.backends.memcached.CacheClass',
#'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
#'BACKEND': 'memcachepool.cache.UMemcacheCache',
'LOCATION': splitstrip(private.CACHES_DEFAULT_LOCATION),
'TIMEOUT': 500,
'KEY_PREFIX': CACHE_PREFIX,
},
}
SECRET_KEY = private.SECRET_KEY
LOG_LEVEL = logging.DEBUG
# Celery
BROKER_URL = private.BROKER_URL
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
NETAPP_STORAGE = private.NETAPP_STORAGE_ROOT + '/shared_storage'
MIRROR_STAGE_PATH = private.NETAPP_STORAGE_ROOT + '/public-staging'
GUARDED_ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/guarded-addons'
UPLOADS_PATH = NETAPP_STORAGE + '/uploads'
USERPICS_PATH = UPLOADS_PATH + '/userpics'
ADDON_ICONS_PATH = UPLOADS_PATH + '/addon_icons'
COLLECTION_ICONS_PATH = UPLOADS_PATH + '/collection_icons'
IMAGEASSETS_PATH = UPLOADS_PATH + '/imageassets'
REVIEWER_ATTACHMENTS_PATH = UPLOADS_PATH + '/reviewer_attachment'
PREVIEWS_PATH = UPLOADS_PATH + '/previews'
SIGNED_APPS_PATH = NETAPP_STORAGE + '/signed_apps'
SIGNED_APPS_REVIEWER_PATH = NETAPP_STORAGE + '/signed_apps_reviewer'
PREVIEW_THUMBNAIL_PATH = PREVIEWS_PATH + '/thumbs/%s/%d.png'
PREVIEW_FULL_PATH = PREVIEWS_PATH + '/full/%s/%d.%s'
HERA = []
LOGGING['loggers'].update({
'z.task': {'level': logging.DEBUG},
'z.hera': {'level': logging.INFO},
'z.redis': {'level': logging.DEBUG},
'z.pool': {'level': logging.ERROR},
})
REDIS_BACKEND = private.REDIS_BACKENDS_CACHE
REDIS_BACKENDS = {
'cache': private.REDIS_BACKENDS_CACHE,
'cache_slave': private.REDIS_BACKENDS_CACHE_SLAVE,
'master': private.REDIS_BACKENDS_MASTER,
'slave': private.REDIS_BACKENDS_SLAVE,
}
CACHE_MACHINE_USE_REDIS = True
RECAPTCHA_PUBLIC_KEY = private.RECAPTCHA_PUBLIC_KEY
RECAPTCHA_PRIVATE_KEY = private.RECAPTCHA_PRIVATE_KEY
RECAPTCHA_URL = (
'https://www.google.com/recaptcha/api/challenge?k=%s' %
RECAPTCHA_PUBLIC_KEY)
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
PACKAGER_PATH = os.path.join(TMP_PATH, 'packager')
ADDONS_PATH = private.NETAPP_STORAGE_ROOT + '/files'
PERF_THRESHOLD = 20
SPIDERMONKEY = '/usr/bin/tracemonkey'
# Remove DetectMobileMiddleware from middleware in production.
detect = 'mobility.middleware.DetectMobileMiddleware'
csp = 'csp.middleware.CSPMiddleware'
RESPONSYS_ID = private.RESPONSYS_ID
CRONJOB_LOCK_PREFIX = 'marketplace-identity-stage'
BUILDER_SECRET_KEY = private.BUILDER_SECRET_KEY
BUILDER_VERSIONS_URL = (
"https://builder-addons.allizom.org/repackage/sdk-versions/")
ES_HOSTS = splitstrip(private.ES_HOSTS)
ES_URLS = ['http://%s' % h for h in ES_HOSTS]
ES_INDEXES = dict((k, '%s_identity_stage' % v) for k, v in ES_INDEXES.items())
BUILDER_UPGRADE_URL = "https://builder-addons.allizom.org/repackage/rebuild/"
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
GRAPHITE_HOST = private.GRAPHITE_HOST
GRAPHITE_PORT = private.GRAPHITE_PORT
GRAPHITE_PREFIX = private.GRAPHITE_PREFIX
CEF_PRODUCT = STATSD_PREFIX
ES_TIMEOUT = 60
EXPOSE_VALIDATOR_TRACEBACKS = True
KNOWN_PROXIES += ['10.2.83.105',
'10.2.83.106',
'10.2.83.107',
'10.8.83.200',
'10.8.83.201',
'10.8.83.202',
'10.8.83.203',
'10.8.83.204',
'10.8.83.210',
'10.8.83.211',
'10.8.83.212',
'10.8.83.213',
'10.8.83.214',
'10.8.83.215',
'10.8.83.251',
'10.8.83.252',
'10.8.83.253',
]
NEW_FEATURES = True
PERF_TEST_URL = (
'http://talos-addon-master1.amotest.scl1.mozilla.com/trigger/trigger.cgi')
REDIRECT_URL = 'https://outgoing.allizom.org/v1/'
CLEANCSS_BIN = 'cleancss'
UGLIFY_BIN = 'uglifyjs'
CELERYD_TASK_SOFT_TIME_LIMIT = 240
LESS_PREPROCESS = True
XSENDFILE_HEADER = 'X-Accel-Redirect'
ALLOW_SELF_REVIEWS = True
GEOIP_URL = 'http://geo.marketplace.allizom.org'
API_THROTTLE = False
CSP_SCRIPT_SRC = CSP_SCRIPT_SRC + ("https://firefoxos.anosrep.org",)
CSP_FRAME_SRC = CSP_FRAME_SRC + ("https://firefoxos.anosrep.org",)
AES_KEYS = private.AES_KEYS
|
DXCanas/kolibri
|
kolibri/core/content/apps.py
|
Python
|
mit
| 410
| 0
|
from __future__ import absolut
|
e_import
from __future__ import print_function
from __future__ import unicode_literals
from django.apps import
|
AppConfig
class KolibriContentConfig(AppConfig):
name = 'kolibri.core.content'
label = 'content'
verbose_name = 'Kolibri Content'
def ready(self):
from kolibri.core.content.utils.sqlalchemybridge import prepare_bases
prepare_bases()
|
mzdaniel/oh-mainline
|
vendor/packages/twisted/doc/core/howto/listings/TwistedQuotes/pbquote.py
|
Python
|
agpl-3.0
| 193
| 0.010363
|
from twisted.spread import pb
class QuoteReader(pb.Root):
|
def __init__(self, quoter):
self.quoter = quoter
def remote_nextQuote(self):
return self.quoter.getQuote()
| |
FedeMPouzols/Savu
|
doc/source/files_and_images/example_test.py
|
Python
|
gpl-3.0
| 1,241
| 0
|
# -
|
*- coding: utf-8 -*-
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expr
|
ess or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: r1
:platform: r2
:synopsis: r3
.. moduleauthor:: r4
"""
import unittest
import tempfile
import savu.test.test_utils as tu
from savu.test.plugin_runner_test import run_protected_plugin_runner
class "r5"(unittest.TestCase):
def "r6"(self):
options = {
"transport": "hdf5",
"process_names": "CPU0",
"data_file": tu.get_test_data_path("r7"),
"process_file": tu.get_test_process_path("r8"),
"out_path": tempfile.mkdtemp()
}
run_protected_plugin_runner(options)
if __name__ == "__main__":
unittest.main()
|
ops-org/sistema-ops-backend
|
parlamentar/models.py
|
Python
|
gpl-3.0
| 772
| 0.001295
|
fr
|
om django.db import models
class Profissao(models.Model):
nome = models.CharField(max_length=128)
class Partido(models.Model):
nome = models.CharField(max_length=128)
sigla = models.CharField(max_length=32)
class Deputado(models.Model):
DEPUTADO_SEXO_CHOICES =
|
(
('m', 'Masculino'),
('f', 'Feminino')
)
nome = models.CharField(max_length=1024)
nome_civil = models.CharField(max_length=254, null=True, blank=True)
email = models.EmailField()
profissao = models.ForeignKey(Profissao, related_name='deputados')
sexo = models.CharField(max_length=1, default='m', choices=DEPUTADO_SEXO_CHOICES)
nascimento = models.DateField(null=True, blank=True)
falecimento = models.DateField(null=True, blank=True)
|
mpihlak/skytools-dev
|
setup_skytools.py
|
Python
|
isc
| 1,526
| 0.017038
|
#! /usr/bin/env python
# this script does not perform full installation,
# it is meant for use from Makefile
import sys, os.path, re
from distutils.core import setup
from distutils.extension import Extension
# check if configure has run
if not os.path.isfile('config.mak'):
print "please run ./configure && make first"
print "Note: setup.py is supposed to be run from Makefile"
sys.exit(1)
# load version
buf = open("configure.ac","r").read(256)
m = re.search("AC_INIT[(][^,]*,\s+([^)]*)[)]", buf)
ac_ver = m.group(1)
def getvar(name):
cf = open('config.mak').read()
m = re.search(r'^%s\s*=\s*(.*)' % name, cf, re.M)
return m.group(1).strip()
sfx = getvar('SUFFIX')
share_dup_files = [
'sql/pgq/pgq.sql',
'sql/londiste/londiste.sql',
'sql/pgq_ext/pgq_ext.sql',
'sql/pgq_node/pgq_node.sql',
]
if os.path.isfile('sql/txid/txid.sql'):
sh
|
are_dup_files.append('sql/txid/txid.sql')
# run actual setup
setup(
name = "skytools",
license = "BSD",
version = ac_ver,
maintainer = "Marko Kreen",
maintainer_email = "markokr@gmail.com",
url = "http://pgfoundry.org/projects/skytools/",
package_dir = {'': 'python'},
packages = ['skytools', 'londiste', 'pgq', 'pgq.cascade'],
data_files = [
('share/doc/skytools%s/conf' % sfx, [
'pyt
|
hon/conf/wal-master.ini',
'python/conf/wal-slave.ini',
]),
('share/skytools' + sfx, share_dup_files)],
ext_modules=[Extension("skytools._cquoting", ['python/modules/cquoting.c'])],
)
|
dturner-tw/pants
|
src/python/pants/fs/archive.py
|
Python
|
apache-2.0
| 5,886
| 0.009004
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from abc import abstractmethod
from collections import OrderedDict
from zipfile import ZIP_DEFLATED, ZIP_STORED
from pants.util.contextutil import open_tar, open_zip
from pants.util.dirutil import safe_walk
from pants.util.meta import AbstractClass
from pants.util.strutil import ensure_text
"""Support for wholesale archive creation and extraction in a uniform API across archive types."""
class Archiver(AbstractClass):
@classmethod
def extract(cls, path, outdir):
"""Extracts an archive's contents to the specified outdir."""
raise NotImplementedError()
@abstractmethod
def create(self, basedir, outdir, name, prefix=None):
"""Creates an archive of all files found under basedir to a file at outdir of the given name.
If prefix is specified, it should be prepended to all archive paths.
"""
class TarArchiver(Archiver):
"""An archiver that stores files in a tar file with optional compression."""
@classmethod
def extract(cls, path, outdir):
with open_tar(path, errorlevel=1) as tar:
tar.extractall(outdir)
def __init__(self, mode, extension):
super(TarArchiver, self).__init__()
self.mode = mode
self.extension = extension
def create(self, basedir, outdir, name, prefix=None):
basedir = ensure_text(basedir)
tarpath = os.path.join(outdir, '{}.{}'.format(ensure_text(name), self.extension))
with open_tar(tarpath, self.mode, dereference=True, errorlevel=1) as tar:
tar.add(basedir, arcname=prefix or '.')
return tarpath
class ZipArchiver(Archiver):
"""An archiver that stores files in a zip file with optional compression."""
@classmethod
def extract(cls, path, outdir, filter_func=None):
"""Extract from a zip file, with an optional filter
:param string path: path to the zipfile to extract from
:param string outdir: directory to extract files into
:param function filter_func: optional filter with the filename as the parameter. Returns True if
the file should be extracted.
"""
with open_zip(path) as archive_file:
for name in archive_file.namelist():
# While we're at it, we also perform this safety test.
if name.startswith(b'/') or name.startswith(b'..'):
raise ValueError('Zip file contains unsafe path: {}'.format(name))
# Ignore directories. extract() will create parent dirs as needed.
# OS X's python 2.6.1 has a bug in zipfile that makes it unzip directori
|
es as regular files.
# This method should work on for python 2.6-3.x.
# TODO(Eric Ayers) Pants no longer builds with python 2.6. Can this be removed?
if not name.endswith(b'/'):
if (not filter_func or filter_func(name)):
archive_file.extract(name, outdir)
def __init__(self, compression, extension):
super(ZipArchiver, self).__init__()
self.compression = compression
self.extension = extension
def create(se
|
lf, basedir, outdir, name, prefix=None):
zippath = os.path.join(outdir, '{}.{}'.format(name, self.extension))
with open_zip(zippath, 'w', compression=self.compression) as zip:
# For symlinks, we want to archive the actual content of linked files but
# under the relpath derived from symlink.
for root, _, files in safe_walk(basedir, followlinks=True):
root = ensure_text(root)
for file in files:
file = ensure_text(file)
full_path = os.path.join(root, file)
relpath = os.path.relpath(full_path, basedir)
if prefix:
relpath = os.path.join(ensure_text(prefix), relpath)
zip.write(full_path, relpath)
return zippath
TAR = TarArchiver('w:', 'tar')
TGZ = TarArchiver('w:gz', 'tar.gz')
TBZ2 = TarArchiver('w:bz2', 'tar.bz2')
ZIP = ZipArchiver(ZIP_DEFLATED, 'zip')
JAR = ZipArchiver(ZIP_STORED, 'jar')
_ARCHIVER_BY_TYPE = OrderedDict(tar=TAR, tgz=TGZ, tbz2=TBZ2, zip=ZIP, jar=JAR)
TYPE_NAMES = frozenset(_ARCHIVER_BY_TYPE.keys())
def archiver(typename):
"""Returns Archivers in common configurations.
The typename must correspond to one of the following:
'tar' Returns a tar archiver that applies no compression and emits .tar files.
'tgz' Returns a tar archiver that applies gzip compression and emits .tar.gz files.
'tbz2' Returns a tar archiver that applies bzip2 compression and emits .tar.bz2 files.
'zip' Returns a zip archiver that applies standard compression and emits .zip files.
'jar' Returns a jar archiver that applies no compression and emits .jar files.
Note this is provided as a light way of zipping input files into a jar, without the
need to prepare Manifest etc. For more advanced usages, please refer to :class:
`pants.backend.jvm.subsystems.jar_tool.JarTool` or :class:
`pants.backend.jvm.tasks.jar_task.JarTask`.
"""
archiver = _ARCHIVER_BY_TYPE.get(typename)
if not archiver:
raise ValueError('No archiver registered for {!r}'.format(typename))
return archiver
def archiver_for_path(path_name):
"""Returns an Archiver for the given path name.
:param string path_name: The path name of the archive - need not exist.
:raises: :class:`ValueError` If the path name does not uniquely identify a supported archive type.
"""
if path_name.endswith('.tar.gz'):
return TGZ
elif path_name.endswith('.tar.bz2'):
return TBZ2
else:
_, ext = os.path.splitext(path_name)
if ext:
ext = ext[1:] # Trim leading '.'.
if not ext:
raise ValueError('Could not determine archive type of path {}'.format(path_name))
return archiver(ext)
|
BelgianBiodiversityPlatform/Astapor
|
website/specimens/management/commands/full_import.py
|
Python
|
bsd-2-clause
| 792
| 0.005051
|
from django.
|
core import management
from ._utils
|
import AstaporCommand
class Command(AstaporCommand):
help = 'Call other commands in sequence to perform the full data import and initial processing.'
def add_arguments(self, parser):
parser.add_argument('specimen_csv_file')
parser.add_argument('taxonomy_csv_file')
def handle(self, *args, **options):
self.w('1. Importing specimens')
management.call_command('import_specimens', '--truncate', '{f}'.format(f=options['specimen_csv_file']))
self.w('2. Importing taxonomy')
management.call_command('import_taxonomy', '--truncate', '{f}'.format(f=options['taxonomy_csv_file']))
self.w('3. Reconcile taxonomy')
management.call_command('reconcile_taxonomy', '--all')
|
praekelt/molo
|
molo/core/migrations/0004_configure_root_page.py
|
Python
|
bsd-2-clause
| 1,214
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def configure_root_page(apps, schema_editor):
# Get models
ContentType = apps.get_model('conte
|
nttypes.ContentType')
Site = apps.get_model('wagtailcore.Site')
Main = apps.get_model('core.Main')
HomePage = apps.get_model('core.HomePage')
# Delete the default homepage
HomePage.objects.all().delete()
# Create content type for main model
main_content_type, created = ContentType.objects.get_or_create(
model='main', app_label='core')
# Create a new homepage
main = Main.objects.create(
title="Main
|
",
slug='main',
content_type=main_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.all().delete()
Site.objects.create(
hostname='localhost', root_page=main, is_default_site=True)
class Migration(migrations.Migration):
dependencies = [
('core', '0003_articlepage_languagepage_main_sectionpage'),
]
operations = [
migrations.RunPython(configure_root_page),
]
|
jcurry/ZenPacks.community.PredictiveThreshold
|
ZenPacks/community/PredictiveThreshold/interfaces.py
|
Python
|
gpl-2.0
| 2,440
| 0.004098
|
##########################################################################
# Author: Jane Curry, jane.curry@skills-1st.co.uk
# Date: April 19th, 2011
# Revised:
#
# interfaces.py for Predictive Threshold ZenPack
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""interfaces.py
Representation of Predictive Threshold components.
$Id: info.py,v 1.2 2010/12/14 20:45:46 jc Exp $"""
__version__ = "$Revision: 1.4 $"[11:-2]
from Products.Zuul.interfaces import IInfo, IFacade
from Products.Zuul.interfaces.template import IThresholdInfo
from Products.Zuul.form import schema
from Products.Zuul.utils import ZuulMessageFactory as _t
class IPredThresholdInfo(IThresholdInfo):
"""
Interfaces for Predictive Threshold
"""
# pointval = schema.List(title=_t(u"Data Point"), xtype='datapointitemselector', order=6)
escalateCount = schema.Int(title=_t(u'Escalate Count'), order=9)
alpha = schema.Text(title=_t(u'Alpha'), order=10)
beta = schema.Text(title=_t(u'Beta'), order=11)
gamma = schema.Text(title=_t(u'Gamma'), order=12)
rows = schema.Text(title=_t(u'Rows'), order=13)
season = schema.Text(title=_t(u'Season'), order=14)
window = schema.Text(title=_t(u'Window'), order=15)
threshold = schema.Text(title=_t(u'Threshold'), order=16)
delta = schema.Text(title=_t(u'Delta'), order=17)
predcolor = schema.Text(title=_t(u'Prediction Color'), order=18)
cbcolor = schema.Text(title=_t(u'Confidence Band Color'), order=19)
tkcolor = schema.Text(title=_t(u'Tick Color'), order=20)
# pointval = schema.List(title=_t(u"Data Point"), xtype='datapointitemselector')
# escalateCount = schema.Int(title=_t(u'Escalate
|
Count'))
# alpha = schema.Text(title=_t(u'Alpha'))
# beta = schema.Text(tit
|
le=_t(u'Beta'))
# gamma = schema.Text(title=_t(u'Gamma'))
# rows = schema.Text(title=_t(u'Rows'))
# season = schema.Text(title=_t(u'Season'))
# window = schema.Text(title=_t(u'Window'))
# threshold = schema.Text(title=_t(u'Threshold'))
# delta = schema.Text(title=_t(u'Delta'))
# predcolor = schema.Text(title=_t(u'Prediction Color'))
# cbcolor = schema.Text(title=_t(u'Confidence Band Color'))
# tkcolor = schema.Text(title=_t(u'Tick Color'))
|
jai1/pulsar
|
pulsar-functions/instance/src/main/python/util.py
|
Python
|
apache-2.0
| 2,346
| 0.008951
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
"""util.py: Some misc utility functions
"""
import os
import inspect
import sys
import importlib
import log
Log = log.Log
PULSAR_API_ROOT = 'pulsar'
PULSAR_FUNCTIONS_API_ROOT = 'functions'
def import_class(from_path, full_class_name):
from_path = str(from_path)
full_class_name = str(full_class_name)
try:
return import_class_from_path(from_path, full_class_name)
except Exception as e:
our_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
|
api_dir = os.path.join(our_dir, PULSAR_API_ROOT, PULSAR_FUNCTIONS_API_ROOT)
try:
return import_class_from_path(api_dir, full_class_name)
except Exception as e:
Log.info("Failed to import class %s from path %s" % (full_class_name, from_path))
Log.info(e, exc_i
|
nfo=True)
return None
def import_class_from_path(from_path, full_class_name):
Log.debug('Trying to import %s from path %s' % (full_class_name, from_path))
split = full_class_name.split('.')
classname_path = '.'.join(split[:-1])
class_name = full_class_name.split('.')[-1]
if from_path not in sys.path:
Log.debug("Add a new dependency to the path: %s" % from_path)
sys.path.insert(0, from_path)
if not classname_path:
mod = importlib.import_module(class_name)
return mod
else:
mod = importlib.import_module(classname_path)
retval = getattr(mod, class_name)
return retval
def getFullyQualifiedFunctionName(tenant, namespace, name):
return "%s/%s/%s" % (tenant, namespace, name)
|
sergeyfarin/pyqt-fit
|
pyqt_fit/loader.py
|
Python
|
gpl-3.0
| 3,942
| 0.001776
|
from __future__ import print_function, absolute_import
import inspect
from path import path
import imp
import sys
import re
bad_chars = re.compile(u'\W')
python_version = sys.version_info
if python_version.major == 2 and python_version.minor == 7:
if sys.platform == 'win32' or sys.platform == 'cygwin':
module_exts = ['.dll']
elif sys.platform == 'darwin':
module_exts = ['.dylib']
else:
module_exts = ['.so']
module_exts += ['.pyx', '.pyc', '.py']
def load_module(pack_name, module_name, search_path):
""" Version for Python 2.7 """
mod_desc = imp.find_module(module_name, [search_path])
return imp.load_module(pack_name, *mod_desc)
elif python_version.major == 3 and python_version.minor >= 3:
from importlib import machinery as ilm
module_exts = ilm.all_suffixes()
module_exts.append('.pyx')
module_exts = module_exts[::-1]
def create_loader(pack_name, filepath):
ext = filepath.ext
if ext in ilm.SOURCE_SUFFIXES:
return ilm.SourceFileLoader(pack_name, str(filepath))
if ext in ilm.BYTECODE_SUFFIXES:
return ilm.SourcelessFileLoader(pack_name, str(filepath))
if ext in ilm.EXTENSION_SUFFIXES:
return ilm.ExtensionFileLoader(pack_name, str(filepath))
if python_version.minor == 3:
def create_module(loader):
" Version for Python 3.3 "
return loader.load_module()
else:
from types import ModuleType
|
def create_module(loader):
" Version for Python 3.4 or later "
mod = ModuleType(loader.name)
loader.exec_module(mod)
return mod
module_loaders = [ (ilm.EXTENSION_SUFFIXES, ilm.ExtensionFileLoader),
(ilm.SOURCE_SUFFIXES, ilm.SourceFileLoader),
(ilm.BYTECODE_SUFFIXES, ilm.SourcelessFileLoader) ]
de
|
f load_module(pack_name, module_name, search_path):
pth = path(search_path) / module_name
for exts, loader_cls in module_loaders:
for ext in exts:
filename = pth + ext
if filename.exists():
loader = loader_cls(pack_name, str(filename))
mod = create_module(loader)
if mod is not None:
return mod
else:
raise ImportError("This module can only be imported with python 2.7 and 3.x where x >= 3")
def load(find_functions, search_path=None):
"""
Load the modules in the search_path.
If search_path is None, then load modules in the same folder as the function looking for them.
"""
caller_module = inspect.getmodule(inspect.stack()[1][0])
system_files = [caller_module.__file__]
module_path = path(caller_module.__file__).abspath().dirname()
sys_files = set()
for f in system_files:
if f.endswith(".pyo") or f.endswith(".pyc"):
f = f[:-3] + "py"
sys_files.add(path(f).abspath())
if search_path is None:
search_path = module_path
else:
search_path = path(search_path).abspath()
fcts = {}
# Search for python, cython and modules
modules = set()
for ext in module_exts:
for f in search_path.files("*" + ext):
if f.basename()[:2] != '__':
module_name = f.namebase
modules.add(module_name)
for module_name in modules:
pack_name = '%s.%s_%s' % (caller_module.__name__,
bad_chars.sub('_', module_path),
module_name)
try:
mod = load_module(pack_name, module_name, search_path)
fcts.update(find_functions(mod))
except ImportError as ex:
print("Warning, cannot import module '{0}' from {1}: {2}"
.format(module_name, caller_module.__name__, ex), file=sys.stderr)
return fcts
|
PearsonIOKI/compose-forum
|
askbot/bin/rebuildlocales.py
|
Python
|
gpl-3.0
| 365
| 0.005479
|
import os
import subprocess
locales = os.listdir('locale')
de
|
f call_command(command):
print command
subprocess.call(command.split())
for locale in locales:
call_command(
'python ../manage.py jinja2_makemessages -l
|
%s -e html,py,txt' % locale
)
call_command(
'python ../manage.py makemessages -l %s -d djangojs' % locale
)
|
saltastro/salt-data-quality-site
|
test_bokeh_model.py
|
Python
|
mit
| 2,995
| 0.004341
|
import argparse
import importlib
import inspect
import os
import sys
import traceback
from bokeh.plotting import output_file, show
from fabulous.color import bold, red
from app import create_app
def error(msg, stacktrace=None):
"""Print an error message and exit.
Params:
-------
msg: str
Error message.
stacktra
|
ce: str
Stacktrace.
"""
if stacktrace:
print(stacktrace)
print(bold(red(msg)))
sys.exit(1)
# get command line arguments
parser = argparse.ArgumentParser(description='Test a Bokeh model.')
parser.add_argument('module_file',
type=str,
h
|
elp='Python file containing the Bokeh model')
parser.add_argument('model_function',
type=str,
help='Function returning the Bokeh model')
parser.add_argument('func_args',
type=str,
nargs='*',
help='Arguments to pass to the model function')
args = parser.parse_args()
# directory in which this script is located
# note: this must not be app or a subdirectory thereof
this_file = os.path.realpath(__file__)
base_dir = os.path.abspath(os.path.join(this_file, os.path.pardir))
# ensure the given module file isd a Python file
module_file = os.path.abspath(args.module_file)
if not module_file.lower().endswith('.py'):
error('The module filename must end with ".py".')
# find the path of the module file relative to the base directory of the project
module_path = os.path.relpath(module_file, os.path.commonprefix([module_file, this_file]))
# convert the path into a module name (remove ".py" and replace separators with dots)
module = module_path[:-3].replace(os.path.sep, '.')
# import the module and find the requested function to test
try:
imported_module = importlib.import_module(module, __package__)
except:
error('The module {module} couldn\'t be imported. Does the model exist?'.format(module=module))
functions = [member for member in inspect.getmembers(imported_module) if member[0] == args.model_function]
if len(functions) == 0:
error('There is no function "{func}" defined in {module}'.format(func=args.model_function, module=module))
if len(functions) > 1:
error('The name "{func}" is ambiguous in the module {module}.'.format(func=args.model_function, module=module))
# set up Flask app context
# if we don't do this SQLAlchemy will fail
app = create_app(os.getenv('FLASK_CONFIG') or 'development')
app_context = app.app_context()
app_context.push()
# get the Bokeh model
func = functions[0][1]
try:
model = func(*args.func_args)
except:
error('The call to function "{func}" failed.'.format(func=args.model_function),
traceback.format_exc(1))
# output the model
output_file('/tmp/bokeh_test.html')
try:
show(model)
except:
error('The Bokeh model couldn\'t be output. (Is your function returning a Bokeh model?)',
traceback.format_exc())
# clean up
app_context.pop()
|
harshadyeola/easyengine
|
tests/cli/a_test_site_disable.py
|
Python
|
mit
| 394
| 0
|
from
|
ee.utils import test
from ee.cli.main import get_test_app
class CliTestCaseSite(test.EETestCase):
def test_ee_cli(self):
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_site_disable(self):
self.app = get_test_app(argv=['site', 'disable', 'example2.com'])
self.app.setup()
self.app.run()
self.app.c
|
lose()
|
pyblish/pyblish-mindbender
|
run_maya_tests.py
|
Python
|
mit
| 1,052
| 0
|
"""Use Mayapy for testing
Usage:
$ mayapy run_maya_tests.py
"""
import sys
import nose
import warnings
from nose_exclude import NoseExclude
warnings.filterwarnings("ignore", category=DeprecationWarning)
if __name__ == "__main__":
from maya import standalone
stan
|
dalone.initialize()
argv = sys.argv[:]
argv.extend([
|
# Sometimes, files from Windows accessed
# from Linux cause the executable flag to be
# set, and Nose has an aversion to these
# per default.
"--exe",
"--verbose",
"--with-doctest",
"--with-coverage",
"--cover-html",
"--cover-tests",
"--cover-erase",
"--exclude-dir=mindbender/nuke",
"--exclude-dir=mindbender/houdini",
"--exclude-dir=mindbender/schema",
"--exclude-dir=mindbender/plugins",
# We can expect any vendors to
# be well tested beforehand.
"--exclude-dir=mindbender/vendor",
])
nose.main(argv=argv,
addplugins=[NoseExclude()])
|
SANBI-SA/tools-iuc
|
tools/kraken_taxonomy_report/kraken_taxonomy_report.py
|
Python
|
mit
| 12,936
| 0.022727
|
#!/usr/bin/env python
# Reports a summary of Kraken's results
# and optionally creates a newick Tree
# Copyright (c) 2016 Daniel Blankenberg
# Licensed under the Academic Free License version 3.0
# https://github.com/blankenberg/Kraken-Taxonomy-Report
from __future__ import print_function
import optparse
import os
import re
import sys
__VERSION__ = '0.0.2'
__URL__ = "https://github.com/blankenberg/Kraken-Taxonomy-Report"
# Rank names were pulled from ncbi nodes.dmp on 02/02/2016
# cat nodes.dmp | cut -f 5 | sort | uniq
# "root" is added manually
NO_RANK_NAME = "no rank"
RANK_NAMES = [ NO_RANK_NAME,
"root",
"superkingdom",
"kingdom",
"subkingdom",
"superphylum",
"phylum",
"subphylum",
"superclass",
"class",
"subclass",
"infraclass",
"superorder",
"order",
"suborder",
"infraorder",
"parvorder",
"superfamily",
"family",
"subfamily",
"tribe",
"subtribe",
"genus",
"subgenus",
"species group",
"species subgroup",
"species",
"subspecies",
"varietas",
"forma" ]
# NB: We put 'no rank' at top of list for generating trees, due to e.g.
# root (root) -> cellular organisms (no rank) -> bacteria (superkingdom)
RANK_NAME_TO_INTS = dict( [ (y, x) for (x, y) in enumerate( RANK_NAMES ) ] )
RANK_NAMES_INTS = range( len( RANK_NAMES ) )
NO_RANK_INT = RANK_NAMES.index( NO_RANK_NAME )
NO_RANK_CODE = 'n'
PRIMARY_RANK_NAMES = [ 'species', 'genus', 'family', 'order', 'class', 'phylum', 'kingdom' ]
RANK_INT_TO_CODE = {}
for name in PRIMARY_RANK_NAMES:
RANK_INT_TO_CODE[ RANK_NAMES.index( name ) ] = name[0]
RANK_INT_TO_CODE[ RANK_NAMES.index( 'superkingdom' ) ] = 'd'
PRIMARY_RANK_NAMES.append( 'superkingdom' )
NAME_STUB = "%s__%s"
NAME_RE = re.compile( "(\t| |\||\.;)" )
NAME_REPL = "_"
def get_kraken_db_path( db ):
assert db, ValueError( "You must provide a kraken database" )
k_db_path = os.getenv('KRAKEN_DB_PATH', None )
if k_db_path:
db = os.path.join( k_db_path, db )
return db
def load_taxonomy( db_path, sanitize_names=False ):
child_lists = {}
name_map = {}
rank_map = {}
names = {} # Store names here to look for duplicates (id, True/False name fixed)
with open( os.path.join( db_path, "taxonomy/names.dmp" ) ) as fh:
for line in fh:
line = line.rstrip( "\n\r" )
if line.endswith( "\t|" ):
line = line[:-2]
fields = line.split( "\t|\t" )
node_id = fields[0]
name = fields[1]
if sanitize_names:
name = NAME_RE.sub( NAME_REPL, name )
name_type = fields[3]
if name_type == "scientific name":
if name in names:
print( 'Warning: name "%s" found at node "%s" but already exists originally for node "%s".' % ( name, node_id, names[name][0] ), file=sys.stderr )
new_name = "%s_%s" % ( name, node_id )
print( 'Transforming node "%s" named "%s" to "%s".' % ( node_id, name, new_name ), file=sys.stderr )
assert new_name not in names, 'Transformed Name "%s" already exists. Cannot recover at this time.' % new_name
if not names[name][1]:
orig_new_name = "%s_%s" % ( name, names[name][0] )
print( 'Transforming node "%s" named "%s" to "%s".' % ( names[name][0], name, orig_new_name ), file=sys.stderr )
assert orig_new_name not in names, 'Transformed Name "%s" already exists. Cannot recover at this time.' % orig_new_name
name_map[names[name][0]] = orig_new_name
names[name] = ( names[name][0], True )
name = new_name
else:
names[name] = ( node_id, False )
name_map[ node_id ] = name
with open( os.path.join( db_path, "taxonomy/nodes.dmp" ) ) as fh:
for line in fh:
line = line.rstrip( "\n\r" )
fields = line.split( "\t|\t" )
node_id = fields[0]
parent_id = fields[1]
rank = RANK_NAME_TO_INTS.get( fields[2].lower(), None )
if rank is None:
# This should never happen, unless new taxonomy ranks are created
print( 'U
|
nrecognized rank: Node "%s" is "%s", setting to "%s"' % ( node_id, fields[2], NO_RANK_NAME ), file=sys.stderr )
rank = NO_RANK_INT
if node_id == '1':
|
parent_id = '0'
if parent_id not in child_lists:
child_lists[ parent_id ] = []
child_lists[ parent_id ].append( node_id )
rank_map[node_id] = rank
return ( child_lists, name_map, rank_map )
def dfs_summation( node, counts, child_lists ):
children = child_lists.get( node, None )
if children:
for child in children:
dfs_summation( child, counts, child_lists )
counts[ node ] = counts.get( node, 0 ) + counts.get( child, 0 )
def dfs_report( node, file_data, hit_taxa, rank_map, name_map, child_lists, output_lines, options, name=None, tax=None ):
rank_int = rank_map[node]
code = RANK_INT_TO_CODE.get( rank_int, NO_RANK_CODE )
if ( code != NO_RANK_CODE or options.intermediate ) and ( options.show_zeros or node in hit_taxa):
if name is None:
name = ""
else:
name = "%s|" % name
if tax is None:
tax = ''
else:
tax = "%s;" % tax
sanitized_name = name_map[ node ]
name_stub = NAME_STUB % ( code, sanitized_name )
name = name + name_stub
tax = tax + name_stub
if options.name_id:
output = node
elif options.name_long:
output = name
else:
output = sanitized_name
for val in file_data:
output = "%s\t%i" % ( output, val.get( node, 0 ) )
if options.show_rank:
output = "%s\t%s" % ( output, RANK_NAMES[ rank_int ] )
if options.taxonomy:
output = "%s\t%s" % ( output, tax )
output_lines[ rank_int ].append( output )
children = child_lists.get( node )
if children:
for child in children:
dfs_report( child, file_data, hit_taxa, rank_map, name_map, child_lists, output_lines, options, name=name, tax=tax )
def write_tree( child_lists, name_map, rank_map, options, branch_length=1 ):
# Uses Biopython, only load if making tree
import Bio.Phylo
from Bio.Phylo import BaseTree
def _get_name( node_id ):
if options.name_id:
return node_id
return name_map[node_id]
nodes = {}
root_node_id = child_lists["0"][0]
nodes[root_node_id] = BaseTree.Clade( name=_get_name( root_node_id), branch_length=branch_length )
def recurse_children( parent_id ):
if options.cluster is not None and rank_map[parent_id] == options.cluster:
# Short circuit if we found our rank, prevents 'hanging' no ranks from being output
# e.g. clustering by "species" (Escherichia coli), but have "no rank" below (Escherichia coli K-12) in test_db
return
if parent_id not in nodes:
nodes[parent_id] = BaseTree.Clade( name=_get_name( parent_id ), branch_length=branch_length )
for child_id in child_lists.get( parent_id, [] ):
if options.cluster is None or ( rank_map[child_id] <= options.cluster ):
if child_id not in nodes:
nodes[child_id] = BaseTree.Clade(name=_get_name( child_id ), branch_length=branch_length)
nodes[parent_id].clades.append(nodes[child_id])
recurse_children( child_id )
recurse_children( root_node_id )
tree = BaseTree.Tree(root=nodes[r
|
probml/pyprobml
|
scripts/bayes_change_of_var.py
|
Python
|
mit
| 1,657
| 0.006035
|
# Based on https://github.com/probml/pmtk3/blob/master/demos/bayesChangeOfVar.m
# MC on change of variables and empirical distribution, highlighting that
# modes are not, in general, preserved.
import superimport
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import os
from pyprobml_utils import save_fig
# Ensure stochastic reproducibility.
np.random.seed(42)
# Define a mapping from x-space to y-space.
def ginv(x):
"""transform func"""
return 1 / (1 + np.exp(5 - x))
# Define a probability density on x-space, and sample from it.
mu = 6
sigma = 1
n = 10 ** 6
x_samples = norm.rvs(size=n, loc=mu, scale=sigma)
# Calculate a histogram for the samples in x-space and a histogram
# for their transformations to y-space.
hist_x, bin_edges_x = np.histogram(x_samples, bins=50, density=True)
hist_y, bin_edges_y = np.histogram(ginv(x_samples), bins=50, density=True)
# Plot the histograms, t
|
he mapping function, and an indication of how
# the x-distribution's mean maps to y-space.
linewidth = 5
plt.bar(bin_edges_x[:-1], hist_x, color='red', align='edge', width=bin_
|
edges_x[1] - bin_edges_x[0])
plt.barh(bin_edges_y[:-1], hist_y, color='green', align='edge', height=bin_edges_y[1] - bin_edges_y[0])
x_range = np.arange(0, 10, 0.01)
plt.plot(x_range, ginv(x_range), 'blue', linewidth=linewidth)
plt.vlines(mu, ymin=0, ymax=ginv(mu), color='yellow', linewidth=linewidth)
plt.hlines(ginv(mu), xmin=0, xmax=mu, color='yellow', linewidth=linewidth)
plt.text(9, 1/10, r'$p_X$');
plt.text(2/3, 2/10, r'$p_Y$');
plt.text(9, ginv(9) - 1/10, r'$g$');
## Save the figure.
save_fig('bayesChangeOfVar.pdf')
plt.show()
|
hasadna/knesset-data-pipelines
|
votes/join_kmmbr_mk_individuals.py
|
Python
|
mit
| 4,076
| 0.002699
|
from datapackage_pipelines.wrapper import ingest, spew
# this members have a problem with their names
# we can match them directly
KMMBR_IDS_DIRECT_MATCH_TO_PERSON_ID = {'000000431': 431}
def get_mk_individuals(resource, data):
data['mks'] = []
for mk in resource:
yield mk
knesset_nums = set()
for position in mk['positions']:
if position.get('KnessetNum'):
knesset_nums.add(position['KnessetNum'])
data['mks'].append({'id': mk['mk_individual_id'],
'first_name': mk['mk_individual_first_name'].strip(),
'last_name': mk['mk_individual_name'].strip(),
'altnames': [name.strip() for name in mk['altnames']],
'knesset_nums': knesset_nums,
'person_id': mk['PersonID']})
def get_mk_individual_id(knesset_nums, kmmbr, data):
if kmmbr['id'] in KMMBR_IDS_DIRECT_MATCH_TO_PERSON_ID:
person_id = KMMBR_IDS_DIRECT_MATCH_TO_PERSON_ID[kmmbr['id']]
mk_individual_ids = set((mk['id'] for mk in data['mks'] if mk['person_id'] == person_id))
else:
mk_individual_ids = set()
for mk in data['mks']:
if any([knesset_num in mk['knesset_nums'] for knesset_num in knesset_nums]):
if '{} {}'.format(mk['first_name'], mk['last_name']) in kmmbr['names']:
mk_individual_ids.add(mk['id'])
if '{} {}'.format(mk['last_name'], mk['first_name']) in kmmbr['names']:
mk_individual_ids.add(mk['id'])
if any([name in kmmbr['names'] for name in mk['altnames']]):
mk_individual_ids.add(mk['id'])
if len(mk_individual_ids) == 0:
return None
else:
assert len(mk_individual_ids) == 1, \
'num of mk ids is not 1 for kmmbr names {}: {}'.format(kmmbr['names'], mk_individual_ids)
return mk_individual_ids.pop()
def get_kmmbr_results(kmmbr, data):
knesset_nums = set()
for vote_rslt in kmmbr['vote_rslts']:
knesset_nums.add(vote_rslt['knesset_num'])
mk_individual_id = get_mk_individual_id(knesset_nums, kmmbr, data)
for vote_rslt in kmmbr['vote_rslts']:
vote_rslt['mk_individual_id'] = mk_individual_id if mk_individual_id is not None else -1
yield vote_rslt
|
def get_vote_rslts(resource, data):
kmmbr = None
for vote_rslt in resource:
if not kmmbr or kmmbr['id'] != vote_rslt['kmmbr_id']:
if
|
kmmbr:
yield from get_kmmbr_results(kmmbr, data)
kmmbr = {'id': vote_rslt['kmmbr_id'],
'names': set(),
'vote_rslts': []}
kmmbr['names'].add(vote_rslt['kmmbr_name'].strip())
kmmbr['names'].add(vote_rslt['kmmbr_name'].strip().replace('`', "'"))
kmmbr['vote_rslts'].append(vote_rslt)
yield from get_kmmbr_results(kmmbr, data)
def get_resources(datapackage, resources):
data = {}
for descriptor, resource in zip(datapackage['resources'], resources):
if descriptor['name'] == 'mk_individual_positions':
yield get_mk_individuals(resource, data)
elif descriptor['name'] == 'vote_rslts_kmmbr_shadow':
yield get_vote_rslts(resource, data)
else:
yield resource
def get_datapackage(datapackage):
for descriptor in datapackage['resources']:
if descriptor['name'] == 'vote_rslts_kmmbr_shadow':
new_fields = [{'name': 'mk_individual_id', 'type': 'integer'}]
descriptor['schema']['fields'] = [field for field in descriptor['schema']['fields']
if field['name'] not in [f['name'] for f in new_fields]]
descriptor['schema']['fields'] += new_fields
return datapackage
def main():
parameters, datapackage, resources, stats = ingest() + ({},)
spew(get_datapackage(datapackage),
get_resources(datapackage, resources),
stats)
if __name__ == '__main__':
main()
|
kmoocdev2/edx-platform
|
cms/djangoapps/contentstore/features/html-editor.py
|
Python
|
agpl-3.0
| 9,937
| 0.001409
|
# disable missing docstring
# pylint: disable=missing-docstring
from collections import OrderedDict
from lettuce import step, world
from nose.tools import assert_equal, assert_false, assert_in, assert_true
from common import get_codemirror_value, type_in_codemirror
CODEMIRROR_SELECTOR_PREFIX = "$('iframe').contents().find"
@step('I have created a Blank HTML Page$')
def i_created_blank_html_page(step):
step.given('I am in Studio editing a new unit')
world.create_component_instance(
step=step,
category='html',
component_type='Text'
)
@step('I have created a raw HTML component')
def i_created_raw_html(step):
step.given('I am in Studio editing a new unit')
world.create_component_instance(
step=step,
category='html',
component_type='Raw HTML'
)
@step('I see the HTML component settings$')
def i_see_only_the_html_display_name(step):
world.verify_all_setting_entries(
[
['Display Name', "Text", False],
['Editor', "Visual", False]
]
)
@step('I have created an E-text Written in LaTeX$')
def i_created_etext_in_latex(step):
step.given('I am in Studio editing a new unit')
step.given('I have enabled latex compiler')
world.create_component_instance(
step=step,
category='html',
component_type='E-text Written in LaTeX'
)
@step('I edit the page$')
def i_click_on_edit_icon(step):
world.edit_component()
@step('I add a link with static link "(.*)" via the Link Plugin Icon$')
def i_click_on_link_plugin_icon(step, path):
def fill_in_link_fields():
world.css_fill('.mce-textbox', path, 0)
world.css_fill('.mce-textbox', 'picture', 1)
use_plugin('.mce-i-link', fill_in_link_fields)
@step('the link is shown as "(.*)" in the Link Plugin$')
def check_link_in_link_plugin(step, path):
# Ensure caret position is within the link just created.
script = """
var editor = tinyMCE.activeEditor;
editor.selection.select(editor.dom.select('a')[0]);"""
world.browser.driver.execute_script(script)
world.wait_for_ajax_complete()
use_plugin(
'.mce-i-link',
lambda: assert_equal(path, world.css_find('.mce-textbox')[0].value)
)
@step('type "(.*)" in the code editor and press OK$')
def type_in_codemirror_plugin(step, text):
# Verify that raw code editor is not visible.
assert_true(world.css_has_class('.CodeMirror', 'is-inactive'))
# Verify that TinyMCE editor is present
assert_true(world.is_css_present('.tiny-mce'))
use_code_editor(
lambda: type_in_codemirror(0, text, CODEMIRROR_SELECTOR_PREFIX)
)
@step('and the code editor displays "(.*)"$')
def verify_code_editor_text(step, text):
use_code_editor(
lambda: assert_equal(text, get_codemirror_value(0, CODEMIRROR_SELECTOR_PREFIX))
)
@step('I save the page$')
def i_click_on_save(step):
world.save_component()
@step('the page text contains:')
def check_page_text(step):
assert_in(step.multiline, world.css_find('.xmodule_HtmlModule').html)
@step('the Raw Editor contains exactly:')
def check_raw_editor_text(step):
assert_equal(step.multiline, get_codemirror_value(0))
@step('the src link is rewritten to the asset link "(.*)"$')
def image_static_link_is_rewritten(step, path):
# Find the TinyMCE iframe within the main window
with world.browser.get_iframe('mce_0_ifr') as tinymce:
image = tinymce.find_by_tag('img').first
assert_in(unicode(world.scenario_dict['COURSE'].id.make_asset_key('asset', path)), image['src'])
@step('the href link is rewritten to the asset link "(.*)"$')
def link_static_link_is_rewritten(step, path):
# Find the TinyMCE iframe within the main window
with world.browser.get_iframe('mce_0_ifr') as tinymce:
link = tinymce.find_by_tag('a').first
assert_in(unicode(world.scenario_dict['COURSE'].id.make_asset_key('asset', path)), link['href'])
@step('the expected toolbar buttons are displayed$')
def check_toolbar_buttons(step):
dropdowns = world.css_find('.mce-listbox')
assert_equal(2, len(dropdowns))
# Format dropdown
assert_equal('Paragraph', dropdowns[0].text)
# Font dropdown
assert_equal('Font Family', dropdowns[1].text)
buttons = world.css_find('.mce-ico')
# Note that the code editor icon is not present because we are now showing text instead of an icon.
# However, other test points user the code editor, so we have already verified its presence.
expected_buttons = [
'bold',
'italic',
'underline',
'forecolor',
# This is our custom "code style" button, which uses an image instead of a class.
'none',
'alignleft',
'aligncenter',
'alignright',
'alignjustify',
'bullist',
'numlist',
'outdent',
'indent',
'blockquote',
'link',
'unlink',
'image'
]
assert_equal(len(expected_buttons), len(buttons))
for index, button in enumerate(expected_buttons):
class_names = buttons[index]._element.get_attribute('class')
assert_equal("mce-ico mce-i-" + button, class_names)
@step('I set the text to "(.*)" and I select the text$')
def set_text_and_select(step, text):
script = """
var editor = tinyMCE.activeEditor;
editor.setContent(arguments[0]);
editor.selection.select(editor.dom.select('p')[0]);"""
world.browser.driver.execute_script(script, str(text))
world.wait_for_ajax_complete()
@step('I select the code toolbar button$')
def select_code_button(step):
# This is our custom "code style" button. It uses an image instead of a class.
world.css_click(".mce-i-none")
@step('type "(.*)" into the Raw Editor$')
def type_in_raw_editor(step, text):
# Verify that CodeMirror editor is not hidden
assert_false(world.css_has_class('.CodeMirror', 'is-inactive'))
# Verify that TinyMCE Editor is not present
assert_true(world.is_css_not_present('.tiny-mce'))
type_in_codemirror(0, text)
@step('I edit the component and select the (Raw|Visual) Editor$')
def select_editor(step, editor):
world.edit_component_and_select_settings()
world.browser.select('Editor', editor)
@step('I click font selection dropdown')
def click_font_dropdown(step):
dropdowns = [drop for drop in world.css_find('.mce-listbox') if drop.text == 'Font Family']
assert_equal(len(dropdowns), 1)
dropdowns[0].click()
@step('I should see a list of available fonts')
def font_selector_dropdown_is_shown(step):
font_panel = get_fonts_list_panel(world)
expected_fonts = list(CUSTOM_FONTS.keys()) + list(TINYMCE_FONTS.keys())
actual_fonts = [font.strip() for font in font_panel.text.split('\n')]
assert_equal(actual_fonts, expected_fonts)
@step('"Default" option sets the
|
expected font family')
def default_options_sets_expected_font_family(step): # pylint: disable=unused-argument, redefined-outer-name
fonts = get_available_fonts(get_fonts_list_panel(world))
fonts_found = fonts.get("Default", None)
expected_font_family = CUSTOM_FONTS.get('Default')
for expected_font in expected_font_family:
assert_in(expected_font, fonts_found)
@step('all standard tinyMCE fonts should be available')
def ch
|
eck_standard_tinyMCE_fonts(step):
fonts = get_available_fonts(get_fonts_list_panel(world))
for label, expected_fonts in TINYMCE_FONTS.items():
for expected_font in expected_fonts:
assert_in(expected_font, fonts.get(label, None))
TINYMCE_FONTS = OrderedDict([
("Andale Mono", ['andale mono', 'times']),
("Arial", ['arial', 'helvetica', 'sans-serif']),
("Arial Black", ['arial black', 'avant garde']),
("Book Antiqua", ['book antiqua', 'palatino']),
("Comic Sans MS", ['comic sans ms', 'sans-serif']),
("Courier New", ['courier new', 'courier']),
("Georgia", ['georgia', 'palatino']),
("Helvetica", ['helvetica']),
("Impact", ['impact', 'chicago']),
("Symbol", ['symbol']),
("Tahoma", ['tahoma', 'arial', 'helvetica', 'sans-serif']),
("Terminal", ['terminal',
|
athoune/aiohttp_security
|
setup.py
|
Python
|
apache-2.0
| 2,105
| 0.00095
|
import codecs
from setuptools import setup, find_packages
import os
import re
import sys
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
with codecs.open(os.path.join(os.path.abspath(os.path.dirname(
__file__)), 'aiohttp_security', '__init__.py'), 'r', 'latin1') as fp:
try:
version = re.findall(r"^__version__ = '([^']+)'$", fp.read(), re.M)[0]
except IndexError:
raise RuntimeError('Unable to determine version.')
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
install_requires = ['aiohttp>=0.14']
tests_require = install_requires + ['pytest']
extras_require = {}
setup(name='aiohttp_security',
version=version,
description=("security for ai
|
ohttp.web"),
long_description='\n\n'.join((read('README.rst'), read('CHANGES.txt'))),
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Lang
|
uage :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP'],
author='Andrew Svetlov',
author_email='andrew.svetlov@gmail.com',
url='https://github.com/aio-libs/aiohttp_security/',
license='Apache 2',
packages=find_packages(),
install_requires=install_requires,
tests_require=tests_require,
cmdclass={'test': PyTest},
include_package_data=True,
extras_require=extras_require)
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/scipy/ndimage/measurements.py
|
Python
|
gpl-3.0
| 47,436
| 0.000358
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy
import numpy as np
from . import _ni_support
from . import _ni_label
from . import _nd_image
from . import morphology
__all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean',
'variance', 'standard_deviation', 'minimum', 'maximum', 'median',
'minimum_position', 'maximum_position', 'extrema', 'center_of_mass',
'histogram', 'watershed_ift']
def label(input, structure=None, output=None):
"""
Label features in an array.
Parameters
----------
input : array_like
An array-like object to be labeled. Any non-zero values in `input` are
counted as features and zero values are considered the background.
structure : array_like, optional
A structuring element that defines feature connections.
`structure` must be symmetric. If no structuring element is provided,
one is automatically generated with a squared connectivity equal to
one. That is, for a 2-D `input` array, the default structuring element
is::
[[0,1,0],
[1,1,1],
[0,1,0]]
output : (None, data-type, array_like), optional
If `output` is a data type, it specifies the type of the resulting
labeled feature array
If `output` is an array-like object, then `output` will be updated
with the labeled features from this function. This function can
operate in-place, by passing output=input.
Note that the output must be able to store the largest label, or this
function will raise an Exception.
Returns
-------
label : ndarray or int
An integer ndarray where each unique feature in `input` has a unique
label in the returned array.
num_features : int
How many objects were found.
If `output` is None, this function returns a tuple of
(`labeled_array`, `num_features`).
If `output` is a ndarray, then it will be updated with values in
`labeled_array` and only `num_features` will be returned by this
function.
See Also
--------
find_objects : generate a list of slices for the labeled features (or
|
objects); useful for finding features' position or
dimensions
Examples
--------
Create an image with some features, then label it using the
|
default
(cross-shaped) structuring element:
>>> a = np.array([[0,0,1,1,0,0],
... [0,0,0,1,0,0],
... [1,1,0,0,1,0],
... [0,0,0,1,0,0]])
>>> labeled_array, num_features = label(a)
Each of the 4 features are labeled with a different integer:
>>> print(num_features)
4
>>> print(labeled_array)
array([[0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[2, 2, 0, 0, 3, 0],
[0, 0, 0, 4, 0, 0]])
Generate a structuring element that will consider features connected even
if they touch diagonally:
>>> s = generate_binary_structure(2,2)
or,
>>> s = [[1,1,1],
[1,1,1],
[1,1,1]]
Label the image using the new structuring element:
>>> labeled_array, num_features = label(a, structure=s)
Show the 2 labeled features (note that features 1, 3, and 4 from above are
now considered a single feature):
>>> print(num_features)
2
>>> print(labeled_array)
array([[0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[2, 2, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0]])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if structure is None:
structure = morphology.generate_binary_structure(input.ndim, 1)
structure = numpy.asarray(structure, dtype=bool)
if structure.ndim != input.ndim:
raise RuntimeError('structure and input must have equal rank')
for ii in structure.shape:
if ii != 3:
raise ValueError('structure dimensions must be equal to 3')
# Use 32 bits if it's large enough for this image.
# _ni_label.label() needs two entries for background and
# foreground tracking
need_64bits = input.size >= (2**31 - 2)
if isinstance(output, numpy.ndarray):
if output.shape != input.shape:
raise ValueError("output shape not correct")
caller_provided_output = True
else:
caller_provided_output = False
if output is None:
output = np.empty(input.shape, np.intp if need_64bits else np.int32)
else:
output = np.empty(input.shape, output)
# handle scalars, 0-dim arrays
if input.ndim == 0 or input.size == 0:
if input.ndim == 0:
# scalar
maxlabel = 1 if (input != 0) else 0
output[...] = maxlabel
else:
# 0-dim
maxlabel = 0
if caller_provided_output:
return maxlabel
else:
return output, maxlabel
try:
max_label = _ni_label._label(input, structure, output)
except _ni_label.NeedMoreBits:
# Make another attempt with enough bits, then try to cast to the
# new type.
tmp_output = np.empty(input.shape, np.intp if need_64bits else np.int32)
max_label = _ni_label._label(input, structure, tmp_output)
output[...] = tmp_output[...]
if not np.all(output == tmp_output):
# refuse to return bad results
raise RuntimeError("insufficient bit-depth in requested output type")
if caller_provided_output:
# result was written in-place
return max_label
else:
return output, max_label
def find_objects(input, max_label=0):
"""
Find objects in a labeled array.
Parameters
----------
input : ndarray of ints
Array containing objects defined by different labels.
max_label : int, optional
Maximum label to be searched for in `input`. If max_label is not
given, the positions of all objects are returned.
Returns
-------
object_slices : list of tuples
A list of tuples, with each tuple containing N slices (with N the
dimension of the input array). Slices correspond to the minimal
parallelepiped that contains the object. If a number is missing,
None is returned instead of a slice.
See Also
--------
label, center_of_mass
Notes
-----
This function is very useful for isolating a volume of interest inside
a 3-D array, that cannot be "seen through".
Examples
--------
>>> a = np.zeros((6,6), dtype=np.i
|
tidus747/Tutoriales_juegos_Python
|
Ataca a los orcos V0.0.5/Ataca_a_los_orcos_V0.0.5.py
|
Python
|
gpl-3.0
| 4,736
| 0.011467
|
# -*- coding: utf-8 -*-
import random
import textwrap
def print_bold(msg):
#Funcion para mostrar por pantalla un string en negrita
print("\033[1m"+msg+"\033[0m")
def print_linea_punteada(width=72):
print('-'*width)
def ocupar_chozas():
ocupantes = ['enemigo','amigo','no ocupada']
chozas = []
while len(chozas) < 5: #Definimos un número de asentamiento para establecerlo como amigo o enemigo
eleccion_aleatoria = random.choice(ocupantes)
chozas.append(eleccion_aleatoria)
return chozas
def mostrar_mision():
print("\033[1m"+ "Ataca a los Orcos V0.0.5" + "\033[0m")
msg = ("La guerra entre los humanos y sus arqueros enemigos, los Orcos, estaba en el aire."
"Un enorme ejército de orcos se dirigía hacia los territos de los humanos. Destruían"
"prácticamente todo en su camino. Los grandes reyes de la raza humana, se unieron para"
" derrotar a su peor enemigo, era la gran batalla de su tiempo. Sir Gandorel, uno de los "
"valientes caballeros que guardan las llanuras meridionales, inició un largo viaje hacia el este"
", a través de un desconocido bosque espeso. Durante dos días y dos noches, se movió con cautela "
"a través del grueso bosque. En su camino, vio un pequeño asentamiento aislado. Cansado y con "
"la esperanza de reponer su stock de alimentos, decidió tomar un desvío. Cuando se acercó al pueblo,"
"vio cinco chozas. No había nadie alrededor. En ese instante, decidió entrar en un choza...")
print(textwrap.fill(msg, width = 72))
print("\033[1m"+"Misión:"+"\033[0m")
print("Elige una choza donde poder descansar...")
print("\033[1m"+"NOTA:"+"\033[0m")
print("¡Cuidado! Hay enemigos rondando la zona")
print_linea_punteada()
def mostrar_salud(medidor_salud, bold):
if bold:
print_bold("Salud Sir Gandorel:")
print_bold("%d"%(medidor_salud['jugador']))
print_bold("Salud Enemigo:")
print_bold("%d"%(medidor_salud['enemigo']))
else:
print("Salud Sir Gandorel:")
print("%d"%(medidor_salud['jugador']))
print("Salud Enemigo:")
print("%d"%(medidor_salud['enemigo']))
def procesar_decision_usuario():
msg = "\033[1m" + "Elige una choza, introduce un número entre 1 y 5: " + "\033[0m"
decision_usuario = input("\n"+msg)
idx = int(decision_usuario)
return idx
def reset_medidor_salud(medidor_salud):
medidor_salud['jugador']=40
medidor_salud['enemigo']=30
def atacar(medidor_salud):
lista_golpes = 4*['jugador']+6*['enemigo']
unidad_herida = random.choice(lista_golpes)
puntos_vida = medidor_salud[unidad_herida]
herida = random.rand
|
int(10,15)
medidor_salud[unidad_herida] = max(puntos_vida- herida,0)
print("¡Ataque!")
mostrar_salud(medidor_salud,bold=False)
def revelar_ocupa
|
ntes(idx, chozas):
msg=""
print("Revelando los ocupantes...")
for i in range(len(chozas)):
ocupantes_info = "<%d:%s>"%(i+1, chozas[i])
if i+1 == idx:
ocupantes_info = "\033[1m" + ocupantes_info + "\033[0m"
msg += ocupantes_info + " "
print("\t" + msg)
print_linea_punteada()
#En la siguiente función se establece un sistema de combate iterativo
def play_game(medidor_salud):
chozas = ocupar_chozas()
idx = procesar_decision_usuario()
revelar_ocupantes(idx, chozas)
if chozas[idx-1] != 'enemigo':
print_bold("¡Enhorabuena! ¡Has GANADO!")
else:
print_bold('¡Enemigo encontrado!')
mostrar_salud(medidor_salud, bold=True)
continuar_ataque = True
while continuar_ataque:
continuar_ataque = input("...continuar con el ataque? Si(1)/No(0)")
if continuar_ataque == 0:
print_bold("Huyendo con el siguiente estado de salud...")
mostrar_salud(medidor_salud, bold=True)
print_bold("¡Game Over!")
break
atacar(medidor_salud)
if medidor_salud['enemigo'] <=0:
print_bold("¡Sir Gandorel ha derrotado a su enemigo!")
break
if medidor_salud['jugador'] <=0:
print_bold("Sir Gandorel ha muerto ...")
break
#Funcion para hacer funcionar el programa principal que queremos ejecutar
def run_application():
seguir_jugando = 1
medidor_salud = {}
reset_medidor_salud(medidor_salud)
mostrar_mision()
while seguir_jugando == 1:
reset_medidor_salud(medidor_salud)
play_game(medidor_salud)
seguir_jugando = input("¿Quieres jugar de nuevo? Si(1)/No(0):")
if __name__ == '__main__':
run_application()
|
joopert/home-assistant
|
homeassistant/scripts/credstash.py
|
Python
|
apache-2.0
| 2,373
| 0.001686
|
"""Script to get, put and delete secrets stored in credstash."""
import argparse
import getpass
from homeassistant.util.yaml import _SECRET_NAMESPACE
# mypy: allow-untyped-defs
REQUIREMENTS = ["credstash==1.15.0"]
def run(args):
"""Handle credstash script."""
parser = argparse.ArgumentParser(
description=(
"Modify Home Assistant secrets in credstash."
"Use the secrets in configuration files with: "
"!secret <name>"
)
)
parser.add_argument("--script", choices=["credstash"])
parser.add_argument(
"action",
choices=["get", "put", "del", "list"],
help="Get, put or delete a secret, or list all available secrets",
)
parser.add_argument("name", help="Name of the secret", nargs="?", default=None)
parser.add_argument(
"value", help="The value to save when putting a secret", nargs="?", default=None
)
# pylint: disable=import-error, no-member
import credstash
args = parser.parse_args(args)
table = _SECRET_NAMESPACE
try:
credstash.listSecrets(table=table)
except Exception: # pylint: disable=broad-except
credstash.createDdbTable(table=table)
if args.action == "list":
secrets = [i["name"] for i in credstash.listSecrets(table=table)]
deduped_secrets = sorted(set(secrets))
print("Saved secrets:")
for secret in deduped_secrets:
print(secret)
return 0
if args.name is None:
parser.print_help()
return 1
if args.action == "put":
if args.value:
the_secret = args.value
else:
the_secret = getpass.getpass(f"Please enter the secret for {args.name}: ")
current_version = credstash.getHighestVersion(args.name, table=table)
credstash.putSecret(
args.name, the_secret, version=int(current_version) + 1, table=table
|
)
|
print(f"Secret {args.name} put successfully")
elif args.action == "get":
the_secret = credstash.getSecret(args.name, table=table)
if the_secret is None:
print(f"Secret {args.name} not found")
else:
print(f"Secret {args.name}={the_secret}")
elif args.action == "del":
credstash.deleteSecrets(args.name, table=table)
print(f"Deleted secret {args.name}")
|
tripatheea/Riemann-Zeta
|
python/dirichlet.py
|
Python
|
mit
| 1,897
| 0.038482
|
from __future__ import division
import math
import numpy as np
from time import time
import sympy as sp
import mpmath as mp
from mpmath.ctx_mp_python import mpf
from scipy.misc import factorial
from scipy.special import gamma
precision = 53
mp.prec = precision
mp.pretty = True
def calculate_factorial_ratio(n, i):
# This function calculates (n + i - 1)! / (n - i)!
|
mp.dps = 50
k = (n - i)
result = 1
for j in range(k + 2*i - 1, k, -1):
result = mp.fmul(result, j)
return result
def n_choose_k(n, k):
j = n - k
numerator = 1
for i in range(1, k + 1):
numerator *= (j + i)
denominator = factorial(k)
return
|
numerator / denominator
def dirichlet_eta(s, N):
def calculate_d_n(n):
total = 0.0
for k in range(n + 1):
if k % 2 == 0:
alternating_factor = 1
else:
alternating_factor = -1
total += alternating_factor * n_choose_k(n, k) / ( k + 1)**s
return total
eta = 0.0
for n in range(N + 1):
d_n = calculate_d_n(n)
eta += d_n / (2**(n + 1))
return eta
def alternating_series(s, N):
eta = dirichlet_eta(s, N)
denominator = 1 - 2**(1 - s)
zeta = eta / denominator
return zeta
def riemann_siegel_theta(t):
first_term = np.angle( gamma( (2.j*t + 1) / 4) )
second_term = t * np.log(np.pi) / 2
return first_term - second_term
def zeta_function(s, N):
z = alternating_series(s, N)
return z
def z_function(t, N=100000):
zeta = zeta_function(1/2 + (1.j)*t, N)
return mp.re( np.exp( 1.j * riemann_siegel_theta(t) ) * zeta )
def calculate_z(t): # Convenient wrapper to use for roots.py
return z_function(t, N=25)
if __name__ == '__main__':
# print zeta_function(s=1/2 + 25.j, N=1000)
# print z_function(t=18, N=100)
start = time()
eta = dirichlet_eta(1, N=25)
print eta
print abs(eta - np.log(2))
end = time()
print "Calculated using alternating series in {:.4f} seconds.".format(float(end - start))
|
stelfrich/bioformats
|
tools/bump_maven_version.py
|
Python
|
gpl-2.0
| 3,006
| 0.001331
|
#! /usr/bin/python
# Script for increasing versions numbers across the code
import sys
import glob
import re
import argparse
def check_version_format(version):
"""Check format of version number"""
pattern = '^[0-9]+[\.][0-9]+[\.][0-9]+(\-.+)*$'
return re.match(pattern, version) is not None
BIO_FORMATS_ARTIFACT = (
r"(<groupId>%s</groupId>\n"
".*<artifactId>pom-bio-formats</artifactId>\n"
".*<version>).*(</version>)")
class Replacer(object):
def __init__(self, old_group="ome", new_group="ome"):
self.old_group = old_group
self.new_group = new_group
self.group_pattern = \
r"(<groupId>)%s(</groupId>)" % \
old_group
self.artifact_pattern = BIO_FORMATS_ARTIFACT % old_group
self.release_version_pattern = \
r"(<release.version>).*(</release.version>)"
self.stableversion_pattern = \
r"(STABLE_VERSION = \").*(\";)"
self.upgradecheck = \
"components/formats-bsd/src/loci/formats/UpgradeChecker.java"
def replace_file(self, input_path, pattern, version):
"""Substitute a pattern with version in a file"""
with open(input_path, "r") as infile:
regexp = re.compile(pattern)
new_content = regexp.sub(r"\g<1>%s\g<2>" % version, infile.read())
with open(input_path, "w") as output:
output.write(new_content)
output.close()
infile.close()
def bump_pom_versions(self, version):
"""Replace versions in pom.xml files"""
# Replace versions in components pom.xml
for pomfile in (glob.glob("*/*/pom.xml") + glob.glob("*/*/*/pom.xml")):
self.replace_file(pomfile, self.artifact_pattern, version)
self.replace_file(pomfile, self.group_pattern, self.new_group)
# Replace versions in top-level pom.xml
toplevelpomfile = "pom.xml"
self.replace_file(
toplevelpomfile, self.artifact_pattern, version)
self.replace_file(
toplevelpomfile, self.release_version_pattern, version)
self.replace_file(
toplevelpomfile, self.group_pattern, self.new_group)
def bump_stable_version(self, version):
"""Replace UpgradeChecker stable version"""
self.replace_f
|
ile(
self.upgradecheck, self.stableversion_pattern, version)
if __name__ == "__main__":
# Input check
parser = argparse.ArgumentParser()
parser.add_argument("--old-group", type=str, default="ome")
parser.add_argument("--new-group", type=str, default="ome")
parser.add_argument("version", type=str)
ns = parser.parse_args()
if not check_version_format(ns.version):
print "Invalid version format"
sys.exit(1)
replacer = Replacer(old_group=ns.ol
|
d_group, new_group=ns.new_group)
replacer.bump_pom_versions(ns.version)
if not ns.version.endswith('SNAPSHOT'):
replacer.bump_stable_version(ns.version)
|
kuraha4/roguelike-tutorial-python
|
src/data/status_effect.py
|
Python
|
mit
| 1,577
| 0.000634
|
# -*- coding: utf-8 -*-
"""Status effect data."""
from components.status_effect i
|
mport StatusEffect
from status_effect_functions import damage_of_time
# todo: generate new object not copy
STATUS_EFFECT_CATALOG = {
'POISONED':
{
'name': 'poisoned',
'tile_path': 'status_effect/poisoned.png',
'color': 'green',
'tick_function': damage_of_time,
'duration': 6,
'function_kwargs': {'init_dmg': 2} # specify as dictionary
},
|
'OFF_BALANCED':
{
'name': 'off-balanced',
'tile_path': 'status_effect/off_balanced.png',
'color': 'gray',
'duration': 4,
'stats': {'phys_pow': -1,
'defense': -2}
},
'VIGILANT':
{
'name': 'vigilant',
'tile_path': 'status_effect/vigilant.png',
'color': 'blue',
'duration': 6,
'stats': {'defense': 5}
},
}
def generate_status_effect(statfx_id):
"""Return a status effect generated from catalog."""
statfx_data = STATUS_EFFECT_CATALOG.get(statfx_id)
# char and color are set as placeholder, ASCII graphics features will be removed in future.
return StatusEffect(statfx_data.get('name'),
statfx_data.get('tile_path'),
statfx_data.get('color'),
function_kwargs=statfx_data.get('function_kwargs'),
tick_function=statfx_data.get('tick_function'),
duration=statfx_data.get('duration'),
stats=statfx_data.get('stats'))
|
bmihelac/django-import-export
|
import_export/templatetags/import_export_tags.py
|
Python
|
bsd-2-clause
| 323
| 0
|
from diff_match_patch import diff_match_patch
from django import template
register = template.Library()
@register.simple_tag
def compare_values(value1, value2):
dmp = diff_match_patch()
diff = dmp.diff_main(value1, value2)
dmp.diff_cleanupSemantic(diff)
|
html = dmp.diff_prettyHtml(diff)
r
|
eturn html
|
eunchong/build
|
scripts/slave/recipe_modules/auto_bisect/resources/fetch_revision_info.py
|
Python
|
bsd-3-clause
| 1,529
| 0.011772
|
#!/usr/bin/python
# Copyright 2015 The Chro
|
mium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LIC
|
ENSE file.
"""Gets information about one commit from gitiles.
Example usage:
./fetch_revision_info.py 343b531d31 chromium
./fetch_revision_info.py 17b4e7450d v8
"""
import argparse
import json
import urllib2
import depot_map # pylint: disable=relative-import
_GITILES_PADDING = ')]}\'\n'
_URL_TEMPLATE = 'https://chromium.googlesource.com/%s/+/%s?format=json'
def fetch_revision_info(commit_hash, depot_name):
"""Gets information about a chromium revision."""
path = depot_map.DEPOT_PATH_MAP[depot_name]
url = _URL_TEMPLATE % (path, commit_hash)
response = urllib2.urlopen(url).read()
response_json = response[len(_GITILES_PADDING):]
response_dict = json.loads(response_json)
message = response_dict['message'].splitlines()
subject = message[0]
body = '\n'.join(message[1:])
result = {
'author': response_dict['author']['name'],
'email': response_dict['author']['email'],
'subject': subject,
'body': body,
'date': response_dict['committer']['time'],
}
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument('commit_hash')
parser.add_argument('depot', choices=list(depot_map.DEPOT_PATH_MAP))
args = parser.parse_args()
revision_info = fetch_revision_info(args.commit_hash, args.depot)
print json.dumps(revision_info)
if __name__ == '__main__':
main()
|
mozilla/ChangeDetector
|
pyLibrary/queries/containers/__init__.py
|
Python
|
mpl-2.0
| 4,498
| 0.001556
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from collections import Mapping
from copy import copy
from types import GeneratorType
from pyLibrary.debugs.logs import Log
from pyLibrary.dot import set_default, split_field, wrap, DictList
from pyLibrary.dot.dicts import Dict
type2container = Dict()
config = Dict() # config.default IS EXPECTED TO BE SET BEFORE CALLS ARE MADE
_ListContainer = None
_Cube = None
_run = None
_Query = None
_Normal = None
def _delayed_imports():
global type2container
global _ListContainer
global _Cube
global _run
global _Query
global _Normal
try:
from pyLibrary.queries.qb_usingMySQL import MySQL as _MySQL
except Exception:
_MySQL = None
from pyLibrary.queries.qb_usingES import FromES as _FromES
from pyLibrary.queries.containers.lists import ListContainer as _ListContainer
from pyLibrary.queries.containers.cube import Cube as _Cube
from pyLibrary.queries.qb import run as _run
from pyLibrary.queries.query import Query as _Query
set_default(type2container, {
"elasticsearch": _FromES,
"mysql": _MySQL,
"memory": None
})
_ = _run
_ = _Query
_ = _Normal
class Container(object):
__slots__ = ["data", "schema", "namespaces"]
@classmethod
def new_instance(type, frum, schema=None):
"""
Factory!
"""
if not type2container:
_delayed_imports()
if isinstance(frum, Container):
return frum
elif isinstance(frum, _Cube):
return frum
elif isinstance(frum, _Query):
return _run(frum)
elif isinstance(frum, (list, set, GeneratorType)):
return _ListContainer(frum)
elif isinstance(frum, basestring):
# USE DEFAULT STORAGE TO FIND Container
if not config.default.settings:
Log.error("expecting pyLibrary.queries.query.config.default.settings to contain default elasticsearch connection info")
settings = set_default(
{
"index": split_field(frum)[0],
"name": frum,
},
config.default.settings
)
settings.type = None # WE DO NOT WANT TO INFLUENCE THE TYPE BECAUSE NONE IS IN THE frum STRING ANYWAY
return type2container["elasticsearch"](settings)
elif isinstance(frum, Mapping):
frum = wrap(frum)
if frum.type and type2container[frum.type]:
return type2container[frum.type](frum.settings)
elif frum["from"]:
frum = copy(frum)
frum["from"] = Container(frum["from"])
return _Query(frum)
else:
Log.error("Do not know how to handle {{frum|json}}", frum=frum)
else:
Log.error("Do not know how to handle {{type}}", type=frum.__class__.__name__)
def __init__(self, frum, schema=None):
object.__init__(self)
if not type2container:
_delayed_imports()
self.data = frum
if isinstance(schema, list):
Log.error("expecting map from abs_name to column object")
self.schema = schema
# self.namespaces = wrap([_Normal()])
def query(self, query):
if query.frum != self:
Log.error("not expected")
Log.error("Not implemented")
def filter(self, where):
return self.where(where)
def where(self, where):
_ = where
Log.error("not implemented")
def sort(self, sort):
_ = sort
Log.error("not implemented")
def select(self, select):
_ = select
Log.error("not implemented")
def window(self, window):
_ = window
Log.error("not implemented")
def
|
having(self, having):
_ = having
Log.error("not implemented")
def format(self, format):
_ = format
Log.error("not implemented")
def get_columns(self, table):
"""
USE THE frum
|
TO DETERMINE THE COLUMNS
"""
Log.error("Not implemented")
|
PicoCentauri/GromacsWrapper
|
gromacs/fileformats/ndx.py
|
Python
|
gpl-3.0
| 7,868
| 0.003177
|
# GromacsWrapper: formats.py
# Copyright (c) 2009-2011 Oliver Beckstein <orbeckst@gmail.com>
# Released under the GNU Public License 3 (or higher, your choice)
# See the file COPYING for details.
"""
Gromacs NDX index file format
=============================
The `.ndx file`_ contains lists of atom indices that are grouped in
sections by *group names*. The classes :class:`NDX` and
:class:`uniqueNDX` can parse such ndx files and provide convenient
access to the individual groups.
.. _`.ndx file`: http://www.gromacs.org/Documentation/File_Formats/.ndx_File
.. autoclass:: NDX
:members:
.. autoclass:: uniqueNDX
:members:
.. autoclass:: IndexSet
"""
from __future__ import absolute_import, with_statement
import os, errno
import re
import warnings
import operator
import numpy
from ..exceptions import ParseError, AutoCorrectionWarning
from .. import utilities
from collections import OrderedDict as odict
import logging
class NDX(odict, utilities.FileUtils):
"""Gromacs index file.
Represented as a ordered dict where the keys are index group names and
values are numpy arrays of atom numbers.
Use the :meth:`NDX.read` and :meth:`NDX.write` methods for
I/O. Access groups by name via the :meth:`NDX.get` and
:meth:`NDX.set` methods.
Alternatively, simply treat the :class:`NDX` instance as a
dictionary. Setting a key automatically transforms the new value
into a integer 1D numpy array (*not* a set, as would be the
:program:`make_ndx` behaviour).
.. Note::
The index entries themselves are ordered and can contain
duplicates so that output from NDX can be easily used for
:program:`g_dih` and friends. If you need set-like behaviour
you will have do use :class:`gromacs.formats.uniqueNDX` or
:class:`gromacs.cbook.IndexBuilder` (which uses
:program:`make_ndx` throughout).
**Example**
Read index file, make new group and write to disk::
ndx = NDX()
ndx.read('system.ndx')
print ndx['Protein']
ndx['my_group'] = [2, 4, 1, 5] # add new group
ndx.write('new.ndx')
Or quicker (replacing the input
|
file ``system.ndx``)::
ndx = NDX('system') # suffix .ndx is
|
automatically added
ndx['chi1'] = [2, 7, 8, 10]
ndx.write()
"""
default_extension = "ndx"
# match: [ index_groupname ]
SECTION = re.compile("""\s*\[\s*(?P<name>\S.*\S)\s*\]\s*""")
#: standard ndx file format: 15 columns
ncol = 15
#: standard ndx file format: '%6d'
format = '%6d'
def __init__(self, filename=None, **kwargs):
super(NDX, self).__init__(**kwargs) # can use kwargs to set dict! (but no sanity checks!)
if filename is not None:
self._init_filename(filename)
self.read(filename)
def read(self, filename=None):
"""Read and parse index file *filename*."""
self._init_filename(filename)
data = odict()
with open(self.real_filename) as ndx:
current_section = None
for line in ndx:
line = line.strip()
if len(line) == 0:
continue
m = self.SECTION.match(line)
if m:
current_section = m.group('name')
data[current_section] = [] # can fail if name not legal python key
continue
if current_section is not None:
data[current_section].extend(map(int, line.split()))
super(NDX,self).update(odict([(name, self._transform(atomnumbers))
for name, atomnumbers in data.items()]))
def write(self, filename=None, ncol=ncol, format=format):
"""Write index file to *filename* (or overwrite the file that the index was read from)"""
with open(self.filename(filename, ext='ndx'), 'w') as ndx:
for name in self:
atomnumbers = self._getarray(name) # allows overriding
ndx.write('[ {0!s} ]\n'.format(name))
for k in xrange(0, len(atomnumbers), ncol):
line = atomnumbers[k:k+ncol].astype(int) # nice formatting in ncol-blocks
n = len(line)
ndx.write((" ".join(n*[format])+'\n') % tuple(line))
ndx.write('\n')
def get(self, name):
"""Return index array for index group *name*."""
return self[name]
def set(self, name, value):
"""Set or add group *name* as a 1D numpy array."""
self[name] = value
def size(self, name):
"""Return number of entries for group *name*."""
return len(self[name])
@property
def groups(self):
"""Return a list of all groups."""
return self.keys()
@property
def sizes(self):
"""Return a dict with group names and number of entries,"""
return {name: len(atomnumbers) for name, atomnumbers in self.items()}
@property
def ndxlist(self):
"""Return a list of groups in the same format as :func:`gromacs.cbook.get_ndx_groups`.
Format:
[ {'name': group_name, 'natoms': number_atoms, 'nr': # group_number}, ....]
"""
return [{'name': name, 'natoms': len(atomnumbers), 'nr': nr+1} for
nr,(name,atomnumbers) in enumerate(self.items())]
def _getarray(self, name):
"""Helper getter that is used in write().
Override when using a _transform that stores something that
cannot be indexed, e.g. when using set()s.
"""
return self[name]
def _transform(self, v):
"""Transform input to the stored representation.
Override eg with ``return set(v)`` for index lists as sets.
"""
return numpy.ravel(v).astype(int)
def __setitem__(self, k, v):
super(NDX, self).__setitem__(k, self._transform(v))
def setdefault(*args,**kwargs):
raise NotImplementedError
class IndexSet(set):
"""set which defines '+' as union (OR) and '-' as intersection (AND)."""
def __add__(self, x):
return self.union(x)
def __sub__(self, x):
return self.intersection(x)
class uniqueNDX(NDX):
"""Index that behaves like make_ndx, i.e. entries behaves as sets,
not lists.
The index lists behave like sets:
- adding sets with '+' is equivalent to a logical OR: x + y == "x | y"
- subtraction '-' is AND: x - y == "x & y"
- see :meth:`~gromacs.formats.join` for ORing multiple groups (x+y+z+...)
**Example** ::
I = uniqueNDX('system.ndx')
I['SOLVENT'] = I['SOL'] + I['NA+'] + I['CL-']
"""
def join(self, *groupnames):
"""Return an index group that contains atoms from all *groupnames*.
The method will silently ignore any groups that are not in the
index.
**Example**
Always make a solvent group from water and ions, even if not
all ions are present in all simulations::
I['SOLVENT'] = I.join('SOL', 'NA+', 'K+', 'CL-')
"""
return self._sum([self[k] for k in groupnames if k in self])
def _sum(self, sequence):
return reduce(operator.add, sequence)
def _transform(self, v):
return IndexSet(v)
def _getarray(self, k):
return numpy.sort(numpy.fromiter(self[k],dtype=int,count=len(self[k])))
# or use list of these?
# class IndexGroup(dict):
# def __init__(self, groupnumber=None, name="empty", atomnumbers=None, **kwargs):
# atomnumbers = atomnumbers or []
# _atomnumbers = numpy.asarray(atomnumbers).astype(int)
# super(IndexGroup, self).__init__(name=str(name),
# atomnumbers=_atomnumbers,
# nr=groupnumber)
|
sbg/sevenbridges-python
|
sevenbridges/meta/comp_mutable_dict.py
|
Python
|
apache-2.0
| 1,715
| 0
|
# noinspection PyProtectedMember,PyUnresolvedReferences
class CompoundMutableDict(dict):
"""
Resource used for mutable compound dictionaries.
"""
# noinspection PyMissingConstructor
def __init__(self, **kwargs):
self._parent = kwargs.pop('_parent')
self._api = kwargs.pop('api')
for k, v in kwargs.items():
super().__setitem__(k, v)
def __setitem__(self, key, value):
super().__setitem__(key, value)
if self._name not in self._parent._dirty:
self._parent._dirty.update({self._name: {}})
if key in se
|
lf._parent._data[self._name]:
if self._parent._data[self._name][key] != value:
self._parent._dirty[self._name][key] = value
self._parent._data[self._name][key] = value
else:
self._parent._data[self._name][key] = value
self._parent._dirty[self._name][key] = value
|
def __repr__(self):
values = {}
for k, _ in self.items():
values[k] = self[k]
return str(values)
__str__ = __repr__
def update(self, e=None, **f):
other = {}
if e:
other.update(e, **f)
else:
other.update(**f)
for k, v in other.items():
if other[k] != self[k]:
self[k] = other[k]
def items(self):
values = []
for k in self.keys():
values.append((k, self[k]))
return values
def equals(self, other):
if not type(other) == type(self):
return False
return (
self is other or
self._parent._data[self._name] == other._parent._data[self._name]
)
|
hurricanerix/swift
|
test/unit/common/middleware/test_staticweb.py
|
Python
|
apache-2.0
| 41,893
| 0
|
# Copyright (c) 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import mock
from six.moves.urllib.parse import urlparse
from swift.common.swob import Request, Response, HTTPUnauthorized
from swift.common.middleware import staticweb
meta_map = {
'c1': {'status': 401},
'c2': {},
'c3': {'meta': {'web-index': 'index.html',
'web-listings': 't'}},
'c3b': {'meta': {'web-index': 'index.html',
'web-listings': 't'}},
'c4': {'meta': {'web-index': 'index.html',
'web-error': 'error.html',
'web-listings': 't',
'web-listings-css': 'listing.css',
'web-directory-type': 'text/dir'}},
'c5': {'meta': {'web-index': 'index.html',
'web-error': 'error.html',
'web-listings': 't',
'web-listings-css': 'listing.css'}},
'c6': {'meta': {'web-listings': 't',
'web-error': 'error.html'}},
'c6b': {'meta': {'web-listings': 't',
'web-listings-label': 'foo'}},
'c7': {'meta': {'web-listings': 'f',
'web-error': 'error.html'}},
'c8': {'meta': {'web-error': 'error.html',
'web-listings': 't',
'web-listings-css':
'http://localhost/stylesheets/listing.css'}},
'c9': {'meta': {'web-error': 'error.html',
'web-listings': 't',
'web-listings-css':
'/absolute/listing.css'}},
'c10': {'meta': {'web-listings': 't'}},
'c11': {'meta': {'web-index': 'index.html'}},
'c11a': {'meta': {'web-index': 'index.html',
'web-directory-type': 'text/directory'}},
'c12': {'meta': {'web-index': 'index.html',
'web-error': 'error.html'}},
'c13': {'meta': {'web-listings': 'f',
'web-listings-css': 'listing.css'}},
}
def mock_get_container_info(env, app, swift_source='SW'):
container = env['PATH_INFO'].rstrip('/').split('/')[3]
container_info = meta_map[container]
container_info.setdefault('status', 200)
container_info.setdefault('read_acl', '.r:*')
return container_info
class FakeApp(object):
def __init__(self, status_headers_body_iter=None):
self.calls = 0
self.get_c4_called = False
def __call__(self, env, start_response):
self.calls += 1
if 'swift.authorize' in env:
resp = env['swift.authorize'](Request(env))
if resp:
return resp(env, start_response)
if env['PATH_INFO'] == '/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1':
return Response(
status='412 Precondition Failed')(env, start_response)
elif env['PATH_INFO'] == '/v1/a':
return Response(status='401 Unauthorized')(env, start_response)
|
elif env['PATH_INFO'] == '/v1/a/c1':
return Response(status='401 Unauthorized')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c2':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c2/one.txt':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3':
return self.listi
|
ng(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/index.html':
return Response(status='200 Ok', body='''
<html>
<body>
<h1>Test main index.html file.</h1>
<p>Visit <a href="subdir">subdir</a>.</p>
<p>Don't visit <a href="subdir2/">subdir2</a> because it doesn't really
exist.</p>
<p>Visit <a href="subdir3">subdir3</a>.</p>
<p>Visit <a href="subdir3/subsubdir">subdir3/subsubdir</a>.</p>
</body>
</html>
''')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3b':
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3b/index.html':
resp = Response(status='204 No Content')
resp.app_iter = iter([])
return resp(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir3/subsubdir':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir3/subsubdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdir3/subsubdir/index.html':
return Response(status='200 Ok', body='index file')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirx/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirx/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdiry/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdiry/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirz':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/subdirz/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/unknown':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c3/unknown/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4':
self.get_c4_called = True
return self.listing(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/one.txt':
return Response(
status='200 Ok',
headers={'x-object-meta-test': 'value'},
body='1')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/two.txt':
return Response(status='503 Service Unavailable')(env,
start_response)
elif env['PATH_INFO'] == '/v1/a/c4/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/subdir/':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/subdir/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/unknown':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/unknown/index.html':
return Response(status='404 Not Found')(env, start_response)
elif env['PATH_INFO'] == '/v1/a/c4/404error.html':
return Response(status='200 Ok', body='''
<html>
<body style="background: #000000; color: #ffaaaa">
<p>Chrome's 404 fancy-page sucks.</p>
</body>
</html>
'''.strip())(env,
|
oliviamillard/CS141
|
ChaosSierpinskiTriangle.py
|
Python
|
mit
| 2,806
| 0.015324
|
#Olivia Millard - Homework 3
#This program will generate a Sierpinski Triangle.
import pygame, random, math
## PRE- user-inputted width/height to generate the size of the image
## POST- Creates a list with (len(length[0])); each list item is a list with (len(size[1]))
## Points = image[x][y]: 3 components = [0]: red; [1]: green; [2]: blue
def newImage(size):
return pygame.surfarray.array3d(pygame.Surface(size))
## PRE- image is a list of list of 3-tuples; 3 tuple: (R, G, B)
## POST- the image is displayed
def showImage(image):
width, height, depth = image.shape
pygame.display.set_mode((width, height))
surface = pygame.display.get_surface()
pygame.surfarray.blit_array(surface, image)
pygame.display.flip()
## This function will take width/height as values to generate random points in the image.
## It does not output anything.
def random_point(width, height):
x_one = (random.random() * width)
y_one = (random.random() * height)
return(x_one, y_one)
## This function will find the midpoint between the randomly generated point within the window and the randomly generated corner.
## It does not output anything.
def midpoint(x_0, y_0, x_1, y_1):
x_two = ((x_0 + x_1)/2)
y_two = ((y_0 + y_1)/2)
return x_two, y_two
## This function will color the triangle's points based off of a RGB colorscheme.
def color_point (x, y, width, height):
w = (255 / width)
h = (255 / height)
color_x = x * w
color_y = y * h
r = math.fabs(255 - color_y)
g = math.fabs(255 - color_x)
b = math.fabs(255 - color_x - color_y)
return(r, g, b)
## This function will, first, get user-input to identify the width and height of the window.
## Next, this function will assign values to the three points of each triangle (p, c, m).
width = int(input("How wide would you like your window to be? "))
height = int(input("How tall would you like your window to be? "))
windo
|
w = newImage((width, height))
for X in range(width):
for Y in range(height):
wind
|
ow[X][Y] = (255,255,255)
p = 1
p = random_point(width, height)
i = 0
for i in range(4444444):
img_corners = [(width, height),(0, height),(width // 2, 0)]
c = random.choice(img_corners)
m = midpoint(p[0], p[1], c[0], c[1])
color = color_point((m[0]), (m[1]), width, height)
if i > 20:
window[(m[0])][(m[1])] = color
i = i + 1
p = m
if i % 1000 == 0:
showImage(window)
pygame.init()
#To end the game.
print ('Done!')
input ("ENTER to quit")
pygame.quit()
|
pombreda/omnipy
|
omnipy/reader/_reader.py
|
Python
|
gpl-3.0
| 1,755
| 0.034758
|
"""
The basic module about log readers
"""
import os
import re
from ..utils.gzip2 import GzipFile
__author__ = 'chenxm'
__all__ = ["FileReader"]
class FileReader(object):
@staticmethod
def open_file(filename, mode='rb'):
""" open plain or compressed file
@return file handler
"""
parts = os.path.basename(filename).split('.')
try:
assert parts[-1] == 'gz'
fh = GzipFile(mode=mode, filename = filename)
except:
fh = open(filename, mode)
return fh
@staticmethod
def list_files(folder, regex_str=r'.', match=True):
""" find all files under 'folder' with names matching
some reguler expression
"""
assert os.path.isdir(folder)
all_files_path = []
for root, dirs, files in os.walk(folder):
for filename in files:
if match and re.match(regex_str, filename, re.IGNORECASE):
all_files_path.append(os.path.join(root, filename))
elif not match and re.search(regex_str, filename, re.IGNORECASE):
all_files_path.append(os.path.join(root, filename))
return all_files_path
class LogEntry(object):
def __init__(self):
self.data = {}
def get(self, property):
try:
return self[property]
except KeyError:
return None
def set(self, property, value):
self[property] = value
def __getitem__(self, property):
|
return self.data[property]
def __setitem__(self, property, value):
self.data[property] = value
def __str__(self):
return str(self.data)
class LogReader(object):
def __init__(s
|
elf, filename):
self.filename = filename
self.filehandler = FileReader.open_file(filename)
def __iter__(self):
return self
def next(self):
try:
new_line = self.filehandler.next()
return new_line
except StopIteration:
self.filehandler.close()
raise StopIteration
|
patricklaw/pants
|
src/python/pants/backend/python/register.py
|
Python
|
apache-2.0
| 2,773
| 0.001082
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Support for Python.
See https://www.pantsbuild.org/docs/python-backend.
"""
from pants.backend.python import target_types_rules
from pants.backend.python.dependency_inference import rules as dependency_inference_rules
from pants.backend.python.goals import (
|
coverage_py,
lockfile,
package_pex_binary,
pytest_runner,
repl,
run_pex_binary,
setup_py,
tailor,
)
from pants.backend.python.macros.pants_requirement import PantsRequirement
from pants.backend.python.macros.pipenv_requirements import PipenvRequirements
from pants.backend.python.macros.poetry_requirements import PoetryRequirements
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.macros.python_requirements import
|
PythonRequirements
from pants.backend.python.subsystems import ipython, pytest, python_native_code, setuptools
from pants.backend.python.target_types import (
PexBinary,
PythonDistribution,
PythonRequirementsFile,
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
PythonTestsGeneratorTarget,
)
from pants.backend.python.util_rules import (
ancestor_files,
local_dists,
pex,
pex_cli,
pex_environment,
pex_from_targets,
python_sources,
)
from pants.build_graph.build_file_aliases import BuildFileAliases
def build_file_aliases():
return BuildFileAliases(
objects={"python_artifact": PythonArtifact, "setup_py": PythonArtifact},
context_aware_object_factories={
"python_requirements": PythonRequirements,
"poetry_requirements": PoetryRequirements,
"pipenv_requirements": PipenvRequirements,
PantsRequirement.alias: PantsRequirement,
},
)
def rules():
return (
*coverage_py.rules(),
*lockfile.rules(),
*tailor.rules(),
*ancestor_files.rules(),
*local_dists.rules(),
*python_sources.rules(),
*dependency_inference_rules.rules(),
*pex.rules(),
*pex_cli.rules(),
*pex_environment.rules(),
*pex_from_targets.rules(),
*pytest_runner.rules(),
*package_pex_binary.rules(),
*python_native_code.rules(),
*repl.rules(),
*run_pex_binary.rules(),
*target_types_rules.rules(),
*setup_py.rules(),
*setuptools.rules(),
*ipython.rules(),
*pytest.rules(),
)
def target_types():
return [
PexBinary,
PythonDistribution,
PythonSourcesGeneratorTarget,
PythonRequirementTarget,
PythonRequirementsFile,
PythonTestsGeneratorTarget,
]
|
nabla-c0d3/nassl
|
nassl/cert_chain_verifier.py
|
Python
|
agpl-3.0
| 3,020
| 0.00298
|
from pathlib import Path
from typing import List
from nassl._nassl import X509, X509_STORE_CTX
class CertificateChainVerificationFailed(Exception):
def
|
__init__(self, openssl_error_code: int) -> None:
self.openssl_error_code = openssl_error_code
self.openssl_error_str
|
ing = X509.verify_cert_error_string(self.openssl_error_code)
super().__init__(
f'Verification failed with OpenSSL error code {self.openssl_error_code}: "{self.openssl_error_string}"'
)
class CertificateChainVerifier:
def __init__(self, trusted_certificates: List[X509]) -> None:
if not trusted_certificates:
raise ValueError("Supplied an empty list of trusted certificates")
self._trusted_certificates = trusted_certificates
@classmethod
def from_pem(cls, trusted_certificates_as_pem: List[str]) -> "CertificateChainVerifier":
if not trusted_certificates_as_pem:
raise ValueError("Supplied an empty list of trusted certificates")
return cls([X509(cert_pem) for cert_pem in trusted_certificates_as_pem])
@classmethod
def from_file(cls, trusted_certificates_path: Path) -> "CertificateChainVerifier":
parsed_certificates: List[str] = []
with trusted_certificates_path.open() as file_content:
for pem_segment in file_content.read().split("-----BEGIN CERTIFICATE-----")[1::]:
pem_content = pem_segment.split("-----END CERTIFICATE-----")[0]
pem_cert = f"-----BEGIN CERTIFICATE-----{pem_content}-----END CERTIFICATE-----"
parsed_certificates.append(pem_cert)
return cls.from_pem(parsed_certificates)
def verify(self, certificate_chain: List[X509]) -> List[X509]:
"""Validate a certificate chain and if successful, return the verified chain.
The leaf certificate must be at index 0 of the certificate chain.
WARNING: the validation logic does not perform hostname validation.
"""
if not certificate_chain:
raise ValueError("Supplied an empty certificate chain")
# Setup the context object for cert verification
store_ctx = X509_STORE_CTX()
store_ctx.set0_trusted_stack(self._trusted_certificates)
store_ctx.set0_untrusted(certificate_chain)
leaf_cert = certificate_chain[0]
store_ctx.set_cert(leaf_cert)
# Run the verification
result: int = X509.verify_cert(store_ctx)
if result == 1:
# Validation succeeded
verified_chain = store_ctx.get1_chain()
return verified_chain
elif result == 0:
# Validation failed
verify_result = store_ctx.get_error()
raise CertificateChainVerificationFailed(verify_result)
elif result < 0:
raise RuntimeError("X509_verify_cert() was invoked incorrectly")
else:
raise RuntimeError(f"Result {result}; should never happen according to the OpenSSL documentation")
|
iulian787/spack
|
var/spack/repos/builtin/packages/prokka/package.py
|
Python
|
lgpl-2.1
| 1,155
| 0.001732
|
# Copyright 2013-2020 Lawrence Live
|
rmore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Prokka(Package):
"""Prokka is a software tool to annotate bacterial, archaeal and viral
genomes quickly and produce standards-compliant output files."""
homepage = "https://git
|
hub.com/tseemann/prokka"
url = "https://github.com/tseemann/prokka/archive/v1.14.5.tar.gz"
version('1.14.6', sha256='f730b5400ea9e507bfe6c5f3d22ce61960a897195c11571c2e1308ce2533faf8')
depends_on('perl', type='run')
depends_on('perl-bioperl', type='run')
depends_on('perl-xml-simple', type='run')
depends_on('perl-bio-searchio-hmmer', type='run')
depends_on('hmmer', type='run')
depends_on('blast-plus', type='run')
depends_on('prodigal', type='run')
depends_on('tbl2asn', type='run')
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
install_tree('binaries', prefix.binaries)
install_tree('db', prefix.db)
install_tree('doc', prefix.doc)
|
BLuu13/ISSAMemberManager
|
tests/test_delete.py
|
Python
|
mpl-2.0
| 278
| 0.02518
|
import unittest
from ISSA.MemberMailer.Objects.database
|
imp
|
ort Database
class TestDelete(unittest.TestCase):
def test_delete_mem(self):
test_member = Database()
test = test_member.delete(8)
self.assertEqual(test, "Success!")
if __name__ == '__main__':
unittest.main()
|
michellab/SireUnitTests
|
unittests/SireIO/test_mol2.py
|
Python
|
gpl-3.0
| 5,051
| 0.008513
|
from Sire.Base import *
from Sire.IO import *
from Sire.Mol import *
from glob import glob
from nose.tools import assert_equal, assert_almost_equal
# Check that we have Mol2 support in this version of Sire.
has_mol2 = True
try:
p = Mol2()
except:
# No Mol2 support.
has_mol2 = False
# General test of ability to read and write Mol2 files.
# All Mol2 files in the "../io/" directory are parsed.
# Once the input file is parsed we then check that the parser constructs a
# Sire Molecule from the parsed data. Fo
|
llowing this, we then c
|
heck that the
# parser can convert the molecule back into the correct data format, ready to
# be written to file.
def test_read_write(verbose=False):
if not has_mol2:
return
# Glob all of the Mol2 files in the example file directory.
mol2files = glob('../io/*mol2')
# Loop over all test files.
for file in mol2files:
# Test in parallel and serial mode.
for use_par in [True, False]:
if verbose:
print("Reading Mol2 file: %s" % file)
print("Parallel = %s" % use_par)
# Parse the file into a Mol2 object.
p = Mol2(file, {"parallel" : wrap(use_par)})
if verbose:
print("Constructing molecular system...")
# Construct a Sire molecular system.
s = p.toSystem()
if verbose:
print("Reconstructing Mol2 data from molecular system...")
# Now re-parse the molecular system.
p = Mol2(s, {"parallel" : wrap(use_par)})
if verbose:
print("Passed!\n")
# Specific atom coordinate data validation test for file "../io/complex.mol2".
def test_atom_coords(verbose=False):
if not has_mol2:
return
# Test atoms.
atoms = ["N", "CA", "C", "O", "CB"]
# Test coordinates.
coords = [[ -2.9880, -2.0590, -2.6220],
[ -3.8400, -2.0910, -7.4260],
[ -6.4250, -3.9190, -10.9580],
[ -6.1980, -6.0090, -14.2910],
[ -9.8700, -6.5500, -15.2480]]
# Test in parallel and serial mode.
for use_par in [True, False]:
if verbose:
print("Reading Mol2 file: ../io/complex.mol2")
print("Parallel = %s" % use_par)
# Parse the Mol2 file.
p = Mol2('../io/complex.mol2', {"parallel" : wrap(use_par)})
if verbose:
print("Constructing molecular system...")
# Create a molecular system.
s = p.toSystem()
# Get the first molecule.
m = s[MolIdx(0)]
if verbose:
print("Checking atomic coordinates...")
# Loop over all of the atoms.
for i in range(0, len(atoms)):
# Extract the atom from the residue "i + 1".
a = m.atom(AtomName(atoms[i]) + ResNum(i+1))
# Extract the atom coordinates.
c = a.property("coordinates")
# Validate parsed coordinates against known values.
assert_almost_equal( c[0], coords[i][0] )
assert_almost_equal( c[1], coords[i][1] )
assert_almost_equal( c[2], coords[i][2] )
if verbose:
print("Passed!\n")
# Residue and chain validation test for file "../io/complex.mol2".
def test_residues(verbose=False):
if not has_mol2:
return
# Test in parallel and serial mode.
for use_par in [True, False]:
if verbose:
print("Reading Mol2 file: ../io/complex.mol2")
print("Parallel = %s" % use_par)
# Parse the Mol2 file.
p = Mol2('../io/complex.mol2', {"parallel" : wrap(use_par)})
if verbose:
print("Constructing molecular system...")
# Create a molecular system.
s = p.toSystem()
# Get the two molecules.
m1 = s[MolIdx(0)]
m2 = s[MolIdx(1)]
# Get the chains from the molecules.
c1 = m1.chains()
c2 = m2.chains()
if verbose:
print("Checking chain and residue data...")
# Check the number of chains in each molecule.
assert_equal( len(c1), 3 )
assert_equal( len(c2), 1 )
# Check the number of residues in each chain of the first molecule.
assert_equal( len(c1[0].residues()), 118 )
assert_equal( len(c1[1].residues()), 114 )
assert_equal( len(c1[2].residues()), 118 )
# Check the number of residues in the single chain of the second molecule.
assert_equal( len(c2[0].residues()), 1 )
# Check some specific residue names in the first chain from the first molecule.
assert_equal( c1[0].residues()[0].name().toString(), "ResName('PRO1')" )
assert_equal( c1[1].residues()[1].name().toString(), "ResName('MET2')" )
assert_equal( c1[1].residues()[2].name().toString(), "ResName('PHE3')" )
if verbose:
print("Passed!\n")
if __name__ == "__main__":
test_read_write(True)
test_atom_coords(True)
test_residues(True)
|
AutorestCI/azure-sdk-for-python
|
azure-batch/azure/batch/models/job_get_all_lifetime_statistics_options.py
|
Python
|
mit
| 1,726
| 0.000579
|
# coding=utf-8
# -------------------------------
|
-------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior
|
and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobGetAllLifetimeStatisticsOptions(Model):
"""Additional parameters for get_all_lifetime_statistics operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_corvette_rebel_crowley.py
|
Python
|
mit
| 458
| 0.045852
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shar
|
ed_dressed_corvette_rebel_crowley.iff"
result.attribute_template_id = 9
result.stfName("npc_name","twilek_
|
base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.