repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
opticode/eve
|
eve/io/media.py
|
Python
|
bsd-3-clause
| 2,207
| 0
|
# -*- coding: utf-8 -*-
"""
eve.io.media
~~~~~~~~~~~~
Media storage for Eve-powered APIs.
:copyright: (c) 2014 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
class Med
|
iaStorage(object):
""" The MediaStorage class provides a standardized API for storing files,
along with a set of default behaviors that all other storage systems can
in
|
herit or override as necessary.
..versioneadded:: 0.3
"""
def __init__(self, app=None):
"""
:param app: the flask application (eve itself). This can be used by
the class to access, amongst other things, the app.config object to
retrieve class-specific settings.
"""
self.app = app
def get(self, id_or_filename):
""" Opens the file given by name or unique id. Note that although the
returned file is guaranteed to be a File object, it might actually be
some subclass. Returns None if no file was found.
"""
raise NotImplementedError
def put(self, content, filename=None, content_type=None):
""" Saves a new file using the storage system, preferably with the name
specified. If there already exists a file with this name name, the
storage system may modify the filename as necessary to get a unique
name. Depending on the storage system, a unique id or the actual name
of the stored file will be returned. The content type argument is used
to appropriately identify the file when it is retrieved.
.. versionchanged:: 0.5
Allow filename to be optional (#414).
"""
raise NotImplementedError
def delete(self, id_or_filename):
""" Deletes the file referenced by name or unique id. If deletion is
not supported on the target storage system this will raise
NotImplementedError instead
"""
raise NotImplementedError
def exists(self, id_or_filename):
""" Returns True if a file referenced by the given name or unique id
already exists in the storage system, or False if the name is available
for a new file.
"""
raise NotImplementedError
|
wasade/american-gut-web
|
amgut/lib/data_access/sql_connection.py
|
Python
|
bsd-3-clause
| 7,011
| 0.000143
|
from __future__ import division
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The American Gut Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from contextlib import contextmanager
from psycopg2 import connect, Error as PostgresError
from psycopg2.extras import DictCursor
from amgut.lib.config_manager import AMGUT_CONFIG
class SQLConnectionHandler(object):
"""Encapsulates the DB connection with the Postgres DB"""
def __init__(self, con=None):
if not con:
self._connection = connect(user=AMGUT_CONFIG.user,
password=AMGUT_CONFIG.password,
database=AMGUT_CONFIG.database,
host=AMGUT_CONFIG.host,
port=AMGUT_CONFIG.port)
else:
self._connection = con
self.execute('set search_path to ag, public')
def __del__(self):
self._connection.close()
@contextmanager
def get_postgres_cursor(self):
""" Returns a Postgres cursor, commits on close
Returns
-------
pgcursor : psycopg2.cursor
"""
try:
with self._connection.cursor(cursor_factory=DictCursor) as cur:
yield cur
except:
self._connection.rollback()
raise
else:
self._connection.commit()
def _check_sql_args(self, sql_args):
""" Checks that sql_args have the correct type
Inputs:
sql_args: SQL arguments
Raises a TypeError if sql_args does not have the correct type,
otherwise it just returns the execution to the caller
"""
# Check that sql arguments have the correct type
if sql_args and type(sql_args) not in [tuple, list, dict]:
raise TypeError("sql_args should be tuple, list or dict. Found %s "
% type(sql_args))
@contextmanager
def _sql_executor(self, sql, sql_args=None, many=False):
"""Executes an SQL query
Parameters
----------
sql: str
The SQL query
sql_args: tuple or list, optional
The arguments for th
|
e SQL query
many: bool, optional
If true, performs an execute many call
Returns
-------
pgcursor : psycopg2.cursor
The cursor in which the SQL query was executed
Raises
------
ValueError
If there is some error executing the SQL query
"""
# Check that sql arguments have the correct type
|
if many:
for args in sql_args:
self._check_sql_args(args)
else:
self._check_sql_args(sql_args)
# Execute the query
with self.get_postgres_cursor() as cur:
try:
if many:
cur.executemany(sql, sql_args)
else:
cur.execute(sql, sql_args)
yield cur
self._connection.commit()
except PostgresError as e:
self._connection.rollback()
try:
err_sql = cur.mogrify(sql, sql_args)
except ValueError:
err_sql = cur.mogrify(sql, sql_args[0])
raise ValueError(("\nError running SQL query: %s"
"\nError: %s" % (err_sql, e)))
def execute_fetchall(self, sql, sql_args=None):
""" Executes a fetchall SQL query
Parameters
----------
sql: str
The SQL query
sql_args: tuple or list, optional
The arguments for the SQL query
Returns
------
list of tuples
The results of the fetchall query
Note: from psycopg2 documentation, only variable values should be bound
via sql_args, it shouldn't be used to set table or field names. For
those elements, ordinary string formatting should be used before
running execute.
"""
with self._sql_executor(sql, sql_args) as pgcursor:
result = pgcursor.fetchall()
return result
def execute_fetchone(self, sql, sql_args=None):
""" Executes a fetchone SQL query
Parameters
----------
sql: str
The SQL query
sql_args: tuple or list, optional
The arguments for the SQL query
Returns
-------
Tuple
The results of the fetchone query
Notes
-----
from psycopg2 documentation, only variable values should be bound
via sql_args, it shouldn't be used to set table or field names. For
those elements, ordinary string formatting should be used before
running execute.
"""
with self._sql_executor(sql, sql_args) as pgcursor:
result = pgcursor.fetchone()
return result
def execute(self, sql, sql_args=None):
""" Executes an SQL query with no results
Parameters
----------
sql: str
The SQL query
sql_args: tuple or list, optional
The arguments for the SQL query
Notes
-----
from psycopg2 documentation, only variable values should be bound
via sql_args, it shouldn't be used to set table or field names. For
those elements, ordinary string formatting should be used before
running execute.
"""
with self._sql_executor(sql, sql_args):
pass
def executemany(self, sql, sql_args_list):
""" Executes an executemany SQL query with no results
Parameters
----------
sql: str
The SQL query
sql_args: list of tuples
The arguments for the SQL query
Note: from psycopg2 documentation, only variable values should be bound
via sql_args, it shouldn't be used to set table or field names. For
those elements, ordinary string formatting should be used before
running execute.
"""
with self._sql_executor(sql, sql_args_list, True):
pass
def execute_proc_return_cursor(self, procname, proc_args):
"""Executes a stored procedure and returns a cursor
Parameters
----------
procname: str
the name of the stored procedure
proc_args: list
arguments sent to the stored procedure
"""
proc_args.append('cur2')
cur = self._connection.cursor()
cur.callproc(procname, proc_args)
cur.close()
return self._connection.cursor('cur2')
|
sanacl/GrimoireELK
|
grimoire/elk/git.py
|
Python
|
gpl-3.0
| 15,586
| 0.002695
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
#
# Copyright (C) 2015 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
import json
import logging
import re
import time
import requests
from dateutil import parser
from grimoire.elk.enrich import Enrich, metadata
try:
from grimoire.elk.sortinghat import SortingHat
SORTINGHAT_LIBS = True
except ImportError:
SORTINGHAT_LIBS = False
GITHUB = 'https://github.com/'
SH_GIT_COMMIT = 'github-commit'
DEMOGRAPHY_COMMIT_MIN_DATE='1980-01-01'
class GitEnrich(Enrich):
roles = ['Author', 'Commit']
def __init__(self, db_sortinghat=None, db_projects_map=None, json_projects_map=None,
db_user='', db_password='', db_host=''):
super().__init__(db_sortinghat, db_projects_map, json_projects_map,
db_user, db_password, db_host)
self.studies = [self.enrich_demography]
# GitHub API management
self.github_token = None
self.github_logins = {}
self.github_logins_committer_not_found = 0
self.github_logins_author_not_found = 0
self.rate_limit = None
self.rate_limit_reset_ts = None
self.min_rate_to_sleep = 100 # if pending rate < 100 sleep
def set_github_token(self, token):
self.github_token = token
def get_field_author(self):
return "Author"
def get_field_unique_id(self):
return "ocean-unique-id"
def get_fields_uuid(self):
return ["author_uuid", "committer_uuid"]
def get_elastic_mappings(self):
mapping = """
{
"properties": {
"message_analyzed": {
"type": "string",
"index":"analyzed"
}
}
}"""
return {"items":mapping}
def get_identities(self, item):
""" Return the identities from an item.
If the repo is in GitHub, get the usernames from GitHub. """
identities = []
def add_sh_github_identity(user, user_field, rol):
""" Add a new github identity to SH if it does not exists """
github_repo = None
if GITHUB in item['origin']:
github_repo = item['origin'].replace(GITHUB,'')
github_repo = re.sub('.git$', '', github_repo)
if not github_repo:
return
# Try to get the identity from SH
user_data = item['data'][user_field]
sh_identity = SortingHat.get_github_commit_username(self.sh_db, user, SH_GIT_COMMIT)
if not sh_identity:
# Get the usename from GitHub
gh_username = self.get_github_login(user_data, rol, commit_hash, github_repo)
# Create a new SH identity with name, email from git and username from github
logging.debug("Adding new identity %s to SH %s: %s", gh_username, SH_GIT_COMMIT, user)
user = self.get_sh_identity(user_data)
user['username'] = gh_username
|
SortingHat.add_identity(self.sh_db, user, SH_GIT_COMMIT)
else:
if user_data not in self.github_logins:
|
self.github_logins[user_data] = sh_identity['username']
logging.debug("GitHub-commit exists. username:%s user:%s",
sh_identity['username'], user_data)
commit_hash = item['data']['commit']
if item['data']['Author']:
user = self.get_sh_identity(item['data']["Author"])
identities.append(user)
if self.github_token:
add_sh_github_identity(user, 'Author', 'author')
if item['data']['Commit']:
user = self.get_sh_identity(item['data']['Commit'])
identities.append(user)
if self.github_token:
add_sh_github_identity(user, 'Commit', 'committer')
return identities
def get_sh_identity(self, item, identity_field=None):
# John Smith <john.smith@bitergia.com>
identity = {}
git_user = item # by default a specific user dict is expected
if 'data' in item and type(item) == dict:
git_user = item['data'][identity_field]
name = git_user.split("<")[0]
name = name.strip() # Remove space between user and email
email = git_user.split("<")[1][:-1]
identity['username'] = None
identity['email'] = email
identity['name'] = name
return identity
def get_project_repository(self, eitem):
return eitem['origin']
def get_github_login(self, user, rol, commit_hash, repo):
""" rol: author or committer """
login = None
try:
login = self.github_logins[user]
except KeyError:
# Get the login from github API
GITHUB_API_URL = "https://api.github.com"
commit_url = GITHUB_API_URL+"/repos/%s/commits/%s" % (repo, commit_hash)
headers = {'Authorization': 'token ' + self.github_token}
r = self.requests.get(commit_url, headers=headers)
self.rate_limit = int(r.headers['X-RateLimit-Remaining'])
self.rate_limit_reset_ts = int(r.headers['X-RateLimit-Reset'])
logging.debug("Rate limit pending: %s", self.rate_limit)
if self.rate_limit <= self.min_rate_to_sleep:
seconds_to_reset = self.rate_limit_reset_ts - int(time.time()) + 1
if seconds_to_reset < 0:
seconds_to_reset = 0
cause = "GitHub rate limit exhausted."
logging.info("%s Waiting %i secs for rate limit reset.", cause, seconds_to_reset)
time.sleep(seconds_to_reset)
# Retry once we have rate limit
r = self.requests.get(commit_url, headers=headers)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
# commit not found probably or rate limit exhausted
logging.error("Can't find commit %s %s", commit_url, ex)
return login
commit_json = r.json()
author_login = None
if 'author' in commit_json and commit_json['author']:
author_login = commit_json['author']['login']
else:
self.github_logins_author_not_found += 1
user_login = None
if 'committer' in commit_json and commit_json['committer']:
user_login = commit_json['committer']['login']
else:
self.github_logins_committer_not_found += 1
if rol == "author":
login = author_login
elif rol == "committer":
login = user_login
else:
logging.error("Wrong rol: %s" % (rol))
raise RuntimeError
self.github_logins[user] = login
logging.debug("%s is %s in github (not found %i authors %i committers )", user, login,
self.github_logins_author_not_found,
self.github_logins_committer_not_found)
return login
@metadata
def get_rich_item(self, item):
eitem = {}
# metadata fields to copy
copy_fields = ["metadata__updated_on","metadata__timestamp","ocean-unique-id","origin"]
for f in copy_fields:
|
adamziel/django_translate
|
django_translate/management/commands/tranzdump.py
|
Python
|
mit
| 8,550
| 0.004094
|
# -*- coding: utf-8 -*-
import os
import os.path
import re
import sys
import string
from django.apps.registry import apps
from django.core.management.base import BaseCommand, CommandError
from python_translate.extractors import base as extractors
from python_translate import operations
from python_translate.translations import MessageCatalogue
from django_translate.utils import bcolors
from django_translate import services
from django_translate import settings
class AnyFormatSpec:
def __format__(self, fmt):
return ''
class Formatter(string.Formatter):
def __init__(self):
self.used = set()
def get_value(self, key, args, kwargs):
self.used.add(key)
return AnyFormatSpec()
class Command(BaseCommand):
help = """Extract translation strings from templates from a given location. It can display them or merge
the new ones into the translation files. When new translation strings are found it can
automatically add a prefix to the translation message.
Example running against app folder
./manage.py tranzdump -l en --path ./ --output-path ./tranz
./manage.py tranzdump -l fr --force --prefix="new_" --app website --exclude ./website/static
"""
def __init__(self, stdout=None, stderr=None, no_color=False):
self.excluded_paths = None
self.locale = None
self.verbosity = None
super(Command, self).__init__(stdout, stderr, no_color)
def add_arguments(self, parser):
parser.add_argument('--locale', '-l', default='en', dest='locale', action='store',
help='Locale to process')
parser.add_argument('--app', '-a', dest='app', action='store',
help='App to scan.')
parser.add_argument('--path', '-p', dest='path', action='store',
help='Path to scan')
parser.add_argument('--output-dir', dest='output_dir', default=None, action='store',
help='Override the default output dir')
parser.add_argument('--exclude-dir', '-x', default=[], dest='excluded_paths', action='append',
help='Paths to exclude. Default is none. Can be used multiple times. '
'Works only with ChainExtractor.')
parser.add_argument('--prefix', dest='prefix', default="__", action='store',
help='Override the default prefix')
parser.add_argument('--format', dest='format', default="yml", action='store',
help='Override the default output format')
parser.add_argument('--dump-messages', dest='dump_messages', action='store_true',
help='Should the messages be dumped in the console')
parser.add_argument('--force', dest='force', action='store_true',
help='Should the update be done')
parser.add_argument('--no-backup', dest='no_backup', action='store_true',
help='Should backup be disabled')
parser.add_argument('--clean', dest='clean', default=False, action='store_true',
help='Should clean not found messages',)
def handle(self, *args, **options):
if options.get('force') != True and options.get('dump_messages') != True:
print((bcolors.WARNING + 'You must choose at least one of --force or --dump-messages' + bcolors.ENDC))
return
if not (bool(options.get('app')) ^ bool(options.get('path'))):
print((bcolors.WARNING + 'You must choose only one of --app or --path' + bcolors.ENDC))
return
if not options.get('output_dir') and (not options.get('app') or not settings.TRANZ_SEARCH_LOCALE_IN_APPS):
print((bcolors.WARNING + 'You must provide an --output-dir when in --path mode, or when TRANZ_SEARCH_LOCALE_IN_APPS ' \
'settings variable is False.' + bcolors.ENDC))
return
self.excluded_paths = [os.path.abspath(path) for path in options['excluded_paths']]
self.excluded_paths += [os.path.abspath(django_translate.__path__[0])]
self.excluded_paths += settings.TRANZ_EXCLUDED_DIRS
# Find directories to scan
if options.get('app'):
for app in list(apps.app_configs.values()):
if app.name == options.get('app'):
current_name = app.name
root_path = app.path
break
else:
raise ValueError("App {0} not found".format(options.get('app')))
else:
root_path = os.path.abspath(options['path'])
current_name = root_path.split("/")[-1]
output_dir = options.get('output_dir') or os.path.join(root_path, 'tranz')
writer = services.writer
print(('Generating "{0}" translation files for "{1}"'.format(options.get('locale'), current_name)))
print("Loading existing messages")
current_catalogue = MessageCatalogue(options['locale'])
loader = services.loader
loader.load_messages(output_dir, current_catalogue)
if len(current_catalogue.messages) == 0:
print(("No messages were loaded, make sure there actually are " \
"translation file in format {{catalog}}.{{locale}}.{{format}} in {0}".format(output_dir)))
return
print("Extracting messages")
extracted_catalogue = MessageCatalogue(options['locale'])
extractor = services.extractor
extractor.set_prefix(options['prefix'])
self.extract_messages(extractor, root_path, extracted_catalogue)
print("Processing catalogues")
operation_class = operations.DiffOperation if options['clean'] else operations.MergeOperation
operation = operation_class(current_catalogue, extracted_catalogue)
if not len(operation.get_domains()):
print("No translations found")
return
if options["dump_messages"]:
for domain in operation.get_domains():
print(("Displaying messages for domain {0}".format(domain)))
new_keys = list(operation.get_new_messages(domain).keys())
all_keys = list(operation.get_messages(domain).keys())
for id in set(all_keys).difference(new_keys):
print(id)
for id in new_keys:
print((bcolors.OKGREEN + id + bcolors.ENDC))
for id in list(operation.get_obsolete_messages(domain).keys()):
print((bcolors.FAIL + id + bcolors.ENDC))
if options["no_backup"]:
writer.disable_backup()
if options["force"]:
print(("Writing files to {0}".format(output_dir)))
writer.write_translations(operation.get_result(), options['format'], {
"path": output_dir,
"default_locale": options['locale']
})
def extract_messages(self, extractor, root_path, extracted_catalo
|
gue):
if isinstance(extractor, extractors.ChainExtractor):
su
|
bextractors = list(extractor._extractors.values())
else:
subextractors = [extractor]
for subextractor in subextractors:
if not isinstance(subextractor, extractors.BaseExtractor):
subextractor.extract(root_path, extracted_catalogue)
continue
paths = subextractor.extract_files(root_path)
paths = self.filter_exluded_paths(paths)
for path in paths:
try:
subextractor.extract([path], extracted_catalogue)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = 'There was an exception in extractor {0} when processing ' \
'resource "{1}"'.format(type(subextractor).__name__, path)
msg = msg + "\nOriginal message: {0} {1}".format(exc_type.__name__
|
sivel/ansible-modules-core
|
network/iosxr/iosxr_config.py
|
Python
|
gpl-3.0
| 11,400
| 0.000965
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software
|
: you can redistribute it and
|
/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = """
---
module: iosxr_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage Cisco IOS XR configuration sections
description:
- Cisco IOS XR configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with IOS XR configuration sections in
a deterministic way.
extends_documentation_fragment: iosxr
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines).
required: false
default: null
version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block', 'config']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: [ "yes", "no" ]
version_added: "2.2"
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
comment:
description:
- Allows a commit description to be specified to be included
when the configuration is committed. If the configuration is
not changed or committed, this argument is ignored.
required: false
default: 'configured by iosxr_config'
version_added: "2.2"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: cisco
password: cisco
transport: cli
- name: configure top level configuration
iosxr_config:
lines: hostname {{ inventory_hostname }}
provider: "{{ cli }}"
- name: configure interface settings
iosxr_config:
lines:
- description test interface
- ip address 172.31.1.1 255.255.255.0
parents: interface GigabitEthernet0/0/0/0
provider: "{{ cli }}"
- name: load a config from disk and replace the current config
iosxr_config:
src: config.cfg
update: replace
backup: yes
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: path
sample: /playbooks/ansible/backup/iosxr01.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.iosxr import NetworkModule, NetworkError
DEFAULT_COMMIT_COMMENT = 'configured by iosxr_config'
def check_args(module, warnings):
if module.params['comment']:
if len(module.params['comment']) > 60:
module.fail_json(msg='comment argument cannot be more than 60 characters')
if module.params['force']:
warnings.append('The force argument is deprecated, please use '
'match=none instead. This argument will be '
'removed in the future')
def get_config(module, result):
contents = module.params['config']
if not contents:
contents = module.config.get_config()
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
|
martwo/ndhist
|
examples/python/fill_1d_generic_axis.py
|
Python
|
bsd-2-clause
| 790
| 0.037975
|
import numpy as np
import ndhist
h = ndhist.ndhist((np.array([0,1,2,3,4,5,6,7,8,9,11], dtype=np.dtype(np.float64)),
)
, dtype=np.dtype(np.float6
|
4))
h.fill([-0.1, 0, 0.9, 1, 3.3, 9.9, 10, 11.1])
print(h.bc)
class V(object):
def __init__(self, v=0):
self._v = v
def __lt__(self, rhs):
print("%f < %f"%(self._v, rhs._v))
re
|
turn self._v < rhs._v
def __add__(self, rhs):
print("%f + %f"%(self._v, rhs._v))
return V(self._v + rhs._v)
h2 = ndhist.ndhist((np.array([V(0),V(1),V(2),V(3),V(4),V(5),V(6),V(7),V(8),V(9),V(10)], dtype=np.dtype(object)),
)
, dtype=np.dtype(np.float64))
h2.fill([V(-0.1), V(0), V(0.9), V(1), V(3.3), V(9.9), V(10), V(11.1)] )
print(h2.bc)
|
microblink/NNPACK
|
src/x86_64-fma/fft16x16.py
|
Python
|
bsd-2-clause
| 23,581
| 0.002417
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
from common import butterfly, sqrt2_over_2
from common import butterfly, sqrt2_over_2, cos_npi_over_8, interleave
def fft8_bitreverse(n):
return int(format(n, "03b")[::-1], 2)
def load_ymm_variable(variable):
assert isinstance(variable, (YMMRegister, LocalVariable))
ymm_variable = variable
if isinstance(variable, LocalVariable):
assert variable.size == YMMRegister.size
ymm_variable = YMMRegister()
VMOVAPS(ymm_variable, variable)
return ymm_variable
def store_ymm_result(variable, result):
assert isinstance(result, YMMRegister)
if isinstance(variable, YMMRegister):
SWAP.REGISTERS(variable, result)
else:
VMOVAPS(variable, result)
def forward_vfft(reg_t0, reg_t8, reg_t_stride, data_out, reg_row_start=None, reg_row_end=None, ymm_load_mask=None):
assert isinstance(reg_t0, GeneralPurposeRegister64)
assert isinstance(reg_t8, GeneralPurposeRegister64)
assert isinstance(reg_t_stride, GeneralPurposeRegister64)
assert isinstance(data_out, list) and len(data_out) == 16
assert ymm_load_mask is None or isinstance(ymm_load_mask, YMMRegister)
out_real, out_imag = data_out[0::2], data_out[1::2]
real, imag = [YMMRegister() for _ in range(8)], [YMMRegister() for _ in range(8)]
imag[0] = LocalVariable(YMMRegister.size)
imag[4] = LocalVariable(YMMRegister.size)
data = interleave(real, imag)
for i, (data_lo, data_hi) in enumerate(zip(data[0:8], data[8:16])):
row_lo = i
row_hi = row_lo + 8
ymm_data_lo, ymm_data_hi = data_lo, data_hi
if isinstance(data_lo, LocalVariable):
ymm_data_lo = YMMRegister()
if isinstance(data_hi, LocalVariable):
ymm_data_hi = YMMRegister()
VXORPS(ymm_data_lo, ymm_data_lo, ymm_data_lo)
skip_data_lo = Label()
if reg_row_start:
CMP(reg_row_start, row_lo)
JA(skip_data_lo)
if reg_row_end:
CMP(reg_row_end, row_lo)
JBE(skip_data_lo)
if ymm_load_mask is None:
VMOVUPS(ymm_data_lo, [reg_t0])
else:
VMASKMOVPS(ymm_data_lo, ymm_load_mask, [reg_t0])
if i + 1 != 8:
ADD(reg_t0, reg_t_stride)
LABEL(skip_data_lo)
VMOVAPS(ymm_data_hi, ymm_data_lo)
skip_data_hi = Label()
if reg_row_start:
CMP(reg_row_start, row_hi)
JA(skip_data_hi)
if reg_row_end:
CMP(reg_row_end, row_hi)
JBE(skip_data_hi)
if ymm_load_mask is None:
VMOVUPS(ymm_data_hi, [reg_t8])
butterfly(ymm_data_lo, ymm_data_hi)
else:
ymm_temp_hi = YMMRegister()
VMASKMOVPS(ymm_temp_hi, ymm_load_mask, [reg_t8])
VSUBPS(ymm_data_hi, ymm_data_lo, ymm_temp_hi)
VADDPS(ymm_data_lo, ymm_data_lo, ymm_temp_hi)
if i + 1 != 8:
ADD(reg_t8, reg_t_stride)
LABEL(skip_data_hi)
if isinstance(data_lo, LocalVariable):
VMOVAPS(data_lo, ymm_data_lo)
if isinstance(data_hi, LocalVariable):
VMOVAPS(data_hi, ymm_data_hi)
# FFT8: multiplication by twiddle factors
fft4_scale_b, fft4_negate_b = {}, {}
fft2_scale_b, fft2_negate_b = {}, {}
# w6.re, w6.im = w6.im, -w6.re
SWAP.REGISTERS(real[6], imag[6])
fft4_negate_b[id(imag[6])] = True
# w5.re, w5.im = SQRT2_OVER_2 * (w5.re + w5.im), SQRT2_OVER_2 * (w5.im - w5.re)
butterfly(imag[5], real[5])
SWAP.REGISTERS(real[5], imag[5])
# w7.re, w7.im = -SQRT2_OVER_2 * (w7.re - w7.im), -SQRT2_OVER_2 * (w7.re + w7.im)
butterfly(real[7], imag[7], negate_b=True)
fft4_negate_b[id(real[7])] = True
fft4_negate_b[id(imag[7])] = True
# Propogate multiplication by sqrt2_over_2 until the last butterfly in FFT2
ymm_sqrt2_over_2 = YMMRegister()
fft2_scale_b[id(real[5])] = ymm_sqrt2_over_2
fft2_scale_b[id(imag[5])] = ymm_sqrt2_over_2
fft2_scale_b[id(real[7])] = ymm_sqrt2_over_2
fft2_scale_b[id(imag[7])] = ymm_sqrt2_over_2
# 2x FFT4: butterfly
for data_lo, data_hi in zip(data[0:4] + data[8:12], data[4:8] + data[12:16]):
butterfly(data_lo, data_hi, negate_b=fft4_negate_b.get(id(data_hi), False), scale_b=fft4_scale_b.get(id(data_hi)))
# 2x FFT4: multiplication by twiddle factors
# w3.re, w3.im = w3.im, -w3.re
# w7.re, w7.im = w7.im, -w7.re
SWAP.REGISTERS(real[3], imag[3])
SWAP.REGISTERS(real[7], imag[7])
fft2_negate_b[id(imag[3])] = True
fft2_negate_b[id(imag[7])] = True
# 4x FFT2: butterfly
# Process the first two elements separately
ymm_real0, ymm_real1 = butterfly(real[0], real[1], writeback=False)
store_ymm_result(o
|
ut_real[4], ymm_real1) # bit-reversal: 1->4
ymm_imag0, ymm_imag1 = butterfly(imag[0], imag[1], negate_out_b=True, writeback=False)
store_ymm_result(out_imag[4], ymm_imag1) # bit-reversal: 1->4
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
for i, (data_lo, data_hi) in enumerate(zip(data[4:6] + data[8:10] + data[12:14], data[6:8] + data[10:12] + data[14:16])):
butterfly(data_lo, data_hi,
|
negate_b=fft2_negate_b.get(id(data_hi), False), scale_b=fft2_scale_b.get(id(data_hi)))
butterfly(ymm_real0, ymm_imag0)
store_ymm_result(out_real[0], ymm_real0)
store_ymm_result(out_imag[0], ymm_imag0)
# Bit reversal
for i in range(8):
new_i = fft8_bitreverse(i)
if new_i > i:
real[i], real[new_i] = real[new_i], real[i]
imag[i], imag[new_i] = imag[new_i], imag[i]
data = interleave(real, imag)
ymm_two_g2_real, ymm_two_g2_imag = YMMRegister(), YMMRegister()
ymm_two_h2_real, ymm_two_h2_imag = YMMRegister(), YMMRegister()
VADDPS(ymm_two_g2_real, real[2], real[6])
VSUBPS(ymm_two_h2_imag, real[6], real[2])
VSUBPS(ymm_two_g2_imag, imag[2], imag[6])
VADDPS(ymm_two_h2_real, imag[2], imag[6])
ymm_two_g1_real, ymm_two_g1_imag = YMMRegister(), YMMRegister()
ymm_two_h1_real, ymm_two_h1_imag = YMMRegister(), YMMRegister()
ymm_real1 = load_ymm_variable(real[1])
VADDPS(ymm_two_g1_real, ymm_real1, real[7])
VSUBPS(ymm_two_h1_imag, real[7], ymm_real1)
ymm_imag1 = load_ymm_variable(imag[1])
VSUBPS(ymm_two_g1_imag, ymm_imag1, imag[7])
VADDPS(ymm_two_h1_real, ymm_imag1, imag[7])
ymm_two_h2_add, ymm_two_h2_sub = YMMRegister(), YMMRegister()
VADDPS(ymm_two_h2_add, ymm_two_h2_real, ymm_two_h2_imag)
VSUBPS(ymm_two_h2_sub, ymm_two_h2_imag, ymm_two_h2_real)
ymm_two_g3_real, ymm_two_g3_imag = YMMRegister(), YMMRegister()
ymm_two_h3_real, ymm_two_h3_imag = YMMRegister(), YMMRegister()
VADDPS(ymm_two_g3_real, real[3], real[5])
VSUBPS(ymm_two_h3_imag, real[5], real[3])
VSUBPS(ymm_two_g3_imag, imag[3], imag[5])
VADDPS(ymm_two_h3_real, imag[3], imag[5])
# const float two_w2_real = two_g2_real + SQRT2_OVER_2 * (two_h2_real + two_h2_imag);
# const float two_w2_imag = two_g2_imag + SQRT2_OVER_2 * (two_h2_imag - two_h2_real);
# const float two_w6_real = two_g2_real - SQRT2_OVER_2 * (two_h2_real + two_h2_imag);
# const float two_w6_imag = -two_g2_imag + SQRT2_OVER_2 * (two_h2_imag - two_h2_real);
ymm_sqrt2_over_2 = YMMRegister()
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
ymm_two_w2_real, ymm_two_w6_real = YMMRegister(), ymm_two_g2_real
VMOVAPS(ymm_two_w2_real, ymm_two_g2_real)
VFMADD231PS(ymm_two_w2_real, ymm_two_h2_add, ymm_sqrt2_over_2)
VFNMADD231PS(ymm_two_w6_real, ymm_two_h2_add, ymm_sqrt2_over_2)
ymm_two_w2_imag, ymm_two_w6_imag = YMMRegister(), ymm_two_g2_imag
VMOVAPS(ymm_two_w2_imag, ymm_two_g2_imag)
VFMADD231PS(ymm_two_w2_imag, ymm_two_h2_sub, ymm_sqrt2_over_2)
VFMSUB231PS(ymm_two_w6_imag, ymm_two_h2_sub, ymm_sqrt2_over_2)
ymm_half = YMMRegister()
VMOVAPS(ymm_half, Constant.float32x8(0.5))
VMULPS(ymm_two_w2_real, ymm_two_w2_real, ymm_half)
store_ymm_result(out_real[2], ymm_two_w2_real)
VMUL
|
FiveEye/ProblemSet
|
LeetCode/lc992.py
|
Python
|
mit
| 1,820
| 0.008242
|
def add(s, i, x):
n = len(s)
while i < n:
s[i] += x
i += (i&(-i))
def get(s, i):
ret = 0
while i != 0:
ret += s[i]
i -= (i&(-i))
return ret
def find(s, k):
n = len(s)
beg = 0
end = n
tt = get(s, n-1)
while beg < end:
mid = (beg + end) // 2
t = get(s, mid)
if tt - t >= k:
beg = mid + 1
else:
end = mid
return end
def findR(s, k):
n = len(s)
tt = get(s, n-1)
tk = tt - k
if tk < 0:
return 0
i =
|
0
w = 1
while w * 2 < n:
w *= 2
while w > 0:
|
while i+w >= n or s[i+w] > tk:
w //= 2
if w == 0:
break
if w == 0:
break
tk -= s[i+w]
i += w
w //= 2
#print("findR tk:", tk, i+1)
return i+1
class Solution:
def subarraysWithKDistinct(self, A: List[int], K: int) -> int:
n = len(A)
#print(n)
pre = [-1 for _ in range(n+10)]
lst = [-1 for _ in range(n+10)]
for i in range(n):
pre[i] = lst[A[i]]
lst[A[i]] = i
s = [0 for _ in range(n+10)]
for i in range(n+1):
if lst[i] == -1:
continue
add(s, lst[i]+1, 1)
ans = 0
for i in range(n-1, -1, -1):
#print("i:", i)
#for j in range(n+1):
# print(j, get(s, j))
#print("findR:", findR(s, K), findR(s, K+1))
#print("find:", find(s, K), find(s, K+1))
if get(s, len(s) - 1) < K:
break
ans += findR(s, K) - findR(s, K+1)
add(s, i+1, -1)
if pre[i] != -1:
add(s, pre[i]+1, 1)
return ans
|
gedankenstuecke/opensnp-fun
|
01_analyse.py
|
Python
|
mit
| 2,478
| 0.031881
|
#!/usr/bin/env python
# encoding: utf-8
import glob
import os
import subprocess
cwd = os.getcwd()
'''
Conv
|
ert 23andMe files to
PLINK format
'''
def twenty3_and_me_files():
"""Return the opensnp files that are 23 and me format"""
all_twenty3_and_me_files= glob.glob('../opensnp_datadump.current/*.23andme.txt')
fifteen_mb = 15 * 1000 * 1000
non_junk_files = [path for path in all_twenty3_and_me_files if os.path.getsize(path) > fifteen_mb]
return non_junk_files
def run_plink_format(usable_files):
"""Reformat the 23andMe files into plink binary s
|
tuff"""
try:
os.mkdir("23andme_plink")
except:
print "can't create output-folder"
exit
for f in usable_files:
# gid is the genotype-ID
gid = f.split("/")[-1].split("_")[1].replace("file","")
# converts the genotyping file to plink format, using the gid as sample name
call = "./plink --23file "+ f + " F" + gid + "ID" + gid + "I 1"
call += " --out 23andme_plink/genotypeid_" + gid
print "convert gid " + gid
subprocess.call(call,shell=True)
def merge_plink():
"""Merge the Files, will crash at first and then needs to do some weird corrections"""
try:
os.mkdir("23andme_merged")
except:
pass
allbeds = glob.glob("23andme_plink/*.bed")
start_bed = allbeds[0].replace(".bed","")
list_bed = allbeds
listhandle = open("merge_list.txt","w")
for i in list_bed:
# check that files have been processed and are working so far.
if os.path.isfile(i.replace(".bed",".fam")) and os.path.isfile(i.replace(".bed",".bim")):
listhandle.write(cwd + "/"+i.replace(".bed","")+"\n")
call_merge = "./plink --bfile " + cwd + "/" + start_bed + " --merge-list " + cwd + "/merge_list.txt --make-bed --out " + cwd + "/23andme_merged/merge-pass1"
print "merging 23andme data, first pass"
print call_merge
tout = open("merge.sh","w")
tout.write(call_merge + "\n")
tout.close()
subprocess.call("chmod +x ./merge.sh",shell=True)
#x = subprocess.call(["./plink","--bfile",cwd+"/"+start_bed,"--merge-list",cwd + "/merge_list.txt","--make-bed","--out",cwd+"/23andme_merged/merge-pass1"])
#print x
cmd = ''.join([
'./plink',
' --bfile %s/%s' % (cwd, start_bed),
' --merge-list %s/%s' % (cwd, '/merge_list.txt'),
' --make-bed',
' --out %s/%s' % (cwd, '23andme_merged/merge-pass1')
])
# if subprocess.call doesn't work, copy paste this to debug
print cmd
subprocess.call(cmd, shell=True)
usable_files = twenty3_and_me_files()
#run_plink_format(usable_files)
merge_plink()
|
narendergupta/mooc
|
mooc/src/gen_utils.py
|
Python
|
gpl-2.0
| 1,796
| 0.007795
|
import os
def lowercase(source):
if type(source) is str:
return source.lower()
elif type(source) is dict:
return dict((k,lowercase(v)) for k,v in source.items())
elif type(s
|
ource) is list:
return [lowercase(x) for x in
|
source]
else:
return source
def unique(source):
if type(source) is list:
return list(set(source))
elif type(source) is dict:
return dict((k,unique(v)) for k,v in source.items())
else:
return source
def float_precise_str(source, precision=2):
if type(source) is float:
precision_str = '%.' + str(precision) + 'f'
return precision_str % source
elif type(source) is dict:
return dict((k,float_precise_str(v,precision)) for k,v in source.items())
elif type(source) is list:
return [float_precise_str(x,precision) for x in source]
else:
return source
def skewness(items, label1, label2):
numer = min(items.count(label1),items.count(label2))
denom = max(items.count(label1),items.count(label2))
skewness = float(numer)/float(denom)
if skewness < 0.5:
skewness = 1.0 - skewness
return skewness
def index_min(items):
if len(items) > 0:
return min(range(len(items)), key=items.__getitem__)
return None
def index_max(items):
if len(items) > 0:
return max(range(len(items)), key=items.__getitem__)
return None
def ensure_dir_exists(dir_path):
if os.path.exists(dir_path) is False or\
os.path.isdir(dir_path) is False:
os.makedirs(dir_path)
def does_file_exist(file_path):
return os.path.exists(file_path) and os.path.isfile(file_path)
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
|
ARPA-SIMC/arkimet
|
python/tests/test_arki_check.py
|
Python
|
gpl-2.0
| 34,243
| 0.004088
|
# python 3.7+ from __future__ import annotations
import arkimet as arki
import unittest
import os
from contextlib import contextmanager
from arkimet.cmdline.check import Check
from arkimet.test import Env, CmdlineTestMixin, LogCapture
class StatsReporter:
def __init__(self):
self.op_progress = []
self.op_manual_intervention = []
self.op_aborted = []
self.op_report = []
self.seg_info = []
self.seg_repack = []
self.seg_archive = []
self.seg_delete = []
self.seg_deindex = []
self.seg_rescan = []
self.seg_tar = []
self.seg_compress = []
self.seg_issue52 = []
def operation_progress(self, ds, operation, message):
self.op_progress.append((ds, operation, message))
def operation_manual_intervention(self, ds, operation, message):
self.op_manual_intervention.append((ds, operation, message))
def operation_aborted(self, ds, operation, message):
self.op_aborted.append((ds, operation, message))
def operation_report(self, ds, operation, message):
self.op_report.append((ds, operation, message))
def segment_info(self, ds, relpath, message):
self.seg_info.append((ds, relpath, message))
def segment_repack(self, ds, relpath, message):
self.seg_repack.append((ds, relpath, message))
def segment_archive(self, ds, relpath, message):
self.seg_archive.append((ds, relpath, message))
def segment_delete(self, ds, relpath, message):
self.seg_delete.append((ds, relpath, message))
def segment_deindex(self, ds, relpath, message):
self.seg_deindex.append((ds, relpath, message))
def segment_rescan(self, ds, relpath, message):
self.seg_rescan.append((ds, relpath, message))
def segment_tar(self, ds, relpath, message):
self.seg_tar.append((ds, relpath, message))
def segment_compress(self, ds, relpath, message):
self.seg_compress.append((ds, relpath, message))
def segment_issue51(self, ds, relpath, message):
self.seg_issue51.append((ds, relpath, message))
class ArkiCheckTestsBase(CmdlineTestMixin):
command = Check
@contextmanager
def datasets(self, **kw):
kw.setdefault("format", "grib")
with Env(**self.dataset_config(**kw)) as env:
yield env
def assertCheckClean(self, env, files=None, items=None, **kw):
"""
Check that the dataset results clean to a check run
"""
reporter = StatsReporter()
state = env.segment_state(**kw)
if files is not None:
self.assertEqual(len(state), files)
not_ok = [x for x in state.items() if x[1] != "OK"]
self.assertEqual(not_ok, [])
env.check(reporter=reporter, **kw)
self.assertEqual(reporter.op_manual_intervention, [])
self.assertEqual(reporter.op_aborted, [])
self.assertEqual(reporter.seg_repack, [])
self.assertEqual(reporter.seg_archive, [])
self.assertEqual(reporter.seg_delete, [])
self.assertEqual(reporter.seg_deindex, [])
self.assertEqual(reporter.seg_rescan, [])
self.assertEqual(reporter.seg_tar, [])
self.assertEqual(reporter.seg_compress, [])
self.assertEqual(reporter.seg_issue52, [])
if items is not None:
mdc = env.query()
self.assertEqual(len(mdc), items)
def assertSegmentExists(self, env, pathname, extensions=None):
"""
Check that the given segment exists, optionally verifying that it only
exists with the given extensions
"""
if extensions is None:
extensions = ("", ".gz", ".tar", ".zip")
if not any(os.path.exists(pathname + ext) for ext in extensions):
self.fail("Segment {} does not exist (also tried .gz, .tar, and .zip)".format(pathname))
else:
all_extensions = frozenset(("", ".gz", ".tar", ".zip", ".gz.idx", ".metadata", ".summary"))
if env.ds_cfg["type"] == "simple" or ".archive/" in pathname:
extensions = list(extensions) + [".metadata", ".summary"]
for ext in extensions:
if not os.path.exists(pathname + ext):
self.fail("Segment {}{} does not exist but it should".format(pathname, ext))
for ext in all_extensions - frozenset(extensions):
if os.path.exists(pathname + ext):
self.fail("Segment {}{} exists but it should not".format(pathname, ext))
def assertSegmentNotExists(self, env, pathname):
"""
Check that the given segment does not exist in any possible version
"""
all_extensions = ("", ".gz", ".tar", ".zip", ".gz.idx", ".metadata", ".summary")
for ext in all_extensions:
if os.path.exists(pathname + ext):
self.fail("Segment {}{} exists but it should not".format(pathname, ext))
def assertQueryResults(self, env, imported, results):
"""
Compare the query results of the dataset against what was imported
"""
mds = env.query(with_data=True)
expected = [imported[i].to_python("reftime") for i in results]
mds = [x.to_python("reftime") for x in mds]
self.assertEqual(mds, expected)
def test_clean(self):
with self.datasets() as env:
env.import_file("inbound/fixture.grib1")
out = self.call_output_success("testenv/test
|
ds")
self.assertEqual(out, "testds: check 3 files ok\n")
out = self.call_output_success("testenv/testds", "--fix")
self.assertEqual(out, "testds: check 3 files ok\n")
out = self.call_output_success("testenv/testds", "--repack")
|
self.assertEqual(out, "testds: repack 3 files ok\n")
out = self.call_output_success("testenv/testds", "--repack", "--fix")
self.assertRegex(
out,
r"(testds: repack: running VACUUM ANALYZE on the dataset index(, if applicable)?\n)?"
r"(testds: repack: rebuilding the summary cache\n)?"
r"testds: repack 3 files ok\n"
)
def test_clean_filtered(self):
with self.datasets() as env:
env.import_file("inbound/fixture.grib1")
out = self.call_output_success("testenv/testds", "--filter=reftime:>=2007-07-08")
self.assertEqual(out, "testds: check 2 files ok\n")
out = self.call_output_success("testenv/testds", "--fix", "--filter=reftime:>=2007-07-08")
self.assertEqual(out, "testds: check 2 files ok\n")
out = self.call_output_success("testenv/testds", "--repack", "--filter=reftime:>=2007-07-08")
self.assertEqual(out, "testds: repack 2 files ok\n")
out = self.call_output_success("testenv/testds", "--repack", "--fix", "--filter=reftime:>=2007-07-08")
self.assertRegex(
out,
r"(testds: repack: running VACUUM ANALYZE on the dataset index(, if applicable)?\n)?"
r"(testds: repack: rebuilding the summary cache\n)?"
r"testds: repack 2 files ok\n"
)
def test_remove_all(self):
with self.datasets() as env:
env.import_file("inbound/fixture.grib1")
self.assertTrue(os.path.exists("testenv/testds/2007/07-08.grib"))
self.assertTrue(os.path.exists("testenv/testds/2007/07-07.grib"))
self.assertTrue(os.path.exists("testenv/testds/2007/10-09.grib"))
out = self.call_output_success("testenv/testds", "--remove-all")
self.assertEqual(
out,
"testds:2007/07-07.grib: should be deleted\n"
"testds:2007/07-08.grib: should be deleted\n"
"testds:2007/10-09.grib: should be deleted\n"
)
self.assertTrue(os.path.exists("testenv/testds/2007/07-08.grib"))
self.assertTrue(os.path.exists("testenv/testds/2007/07-07.grib"))
self.assertTrue(os.path.e
|
ericlee0803/surrogate-GCP
|
gp/GPsim.py
|
Python
|
bsd-3-clause
| 1,664
| 0.026442
|
import sys
import time
import logging
import threading
import GPy
import numpy as np
import matplotlib.pyplot as plt
import pdb
from GPhelpers import *
from IPython.display import display
from poap.strategy import FixedSampleStrategy
from poap.strategy import InputStrategy
from poap.tcpserve import ThreadedTCPServer
from poap.tcpserve import SimpleSocketWorker
from scipy.stats import norm
class GPsim:
def __init__(self, batchsize=100, prunerate=.2, timebound=10, money=1000, fevalcost=1):
self.batchsize = batchsize
self.prunerate = prunerate
self.timebound = timebound
self.money = money
self.fevalcost = fevalcost
def run(self, f, bounds):
breakcond = 1e-5
# run initial batch, deduct money
self.money = self.money
|
- self.batchsize*self.fevalcost
eval_logX = np.random.uniform(bounds[0], bounds[1], self.batchsize)
eval_logY = f(eval_logX)
ybest = np.amin(eval_logY)
while(self.money > 0):
# calc Gaussian Process
m = calcGP(eval_logX, eval_logY)
# calc batchsize, break if necessary
self.batchsize = np.floor(self.batchsize*(1-self.prunerate))
if(self.batchsize < 2):
print "Batch Size reached Minimum"
break
# Deduct Money, evaluate new batch
|
self.money = self.money - self.batchsize*self.fevalcost
X = batchNewEvals_EI(m, bounds=1, batchsize=self.batchsize, fidelity=1000)
Y = f(X)
eval_logY = np.concatenate([eval_logY, Y])
eval_logX = np.concatenate([eval_logX, X])
ynew = np.amin(eval_logY)
if(np.absolute(ynew - ybest) < breakcond):
print "Break Condition Reached, Improvement Halted"
print "Num evals:", eval_logY.size
break
plotGP(m)
print
|
Ultimaker/Cura
|
plugins/3MFReader/WorkspaceDialog.py
|
Python
|
lgpl-3.0
| 14,135
| 0.002971
|
# Copyright (c) 2020 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import List, Optional, Dict, cast
from PyQt5.QtCore import pyqtSignal, QObject, pyqtProperty, QCoreApplication
from UM.FlameProfiler import pyqtSlot
from UM.PluginRegistry import PluginRegistry
from UM.Application import Application
from UM.i18n import i18nCatalog
from UM.Settings.ContainerRegistry import ContainerRegistry
from cura.Settings.GlobalStack import GlobalStack
from .UpdatableMachinesModel import UpdatableMachinesModel
import os
import threading
import time
from cura.CuraApplication import CuraApplication
i18n_catalog = i18nCatalog("cura")
class WorkspaceDialog(QObject):
showDialogSignal = pyqtSignal()
def __init__(self, parent = None):
super().__init__(parent)
self._component = None
self._context = None
self._view = None
self._qml_url = "WorkspaceDialog.qml"
self._lock = threading.Lock()
self._default_strategy = None
self._result = {"machine": self._default_strategy,
"quality_changes": self._default_strategy,
"definition_changes": self._default_strategy,
"material": self._default_strategy}
self._override_machine = None
self._visible = False
self.showDialogSignal.connect(self.__show)
self._has_quality_changes_conflict = False
self._has_definition_changes_conflict = False
self._has_machine_conflict = False
self._has_material_conflict = False
self._has_visible_settings_field = False
self._num_visible_settings = 0
self._num_user_settings = 0
self._active_mode = ""
self._quality_name = ""
self._num_settings_overridden_by_quality_changes = 0
self._quality_type = ""
self._intent_name = ""
self._machine_name = ""
self._machine_type = ""
self._variant_type = ""
self._material_labels = []
self._extruders = []
self._objects_on_plate = False
self._is_printer_group = False
self._updatable_machines_model = UpdatableMachinesModel(self)
machineConflictChanged = pyqtSignal()
qualityChangesConflictChanged = pyqtSignal()
materialConflictChanged = pyqtSignal()
numVisibleSettingsChanged = pyqtSignal()
activeModeChanged = pyqtSignal()
qualityNameChanged = pyqtSignal()
hasVisibleSettingsFieldChanged = pyqtSignal()
numSettingsOverridenByQualityChangesChanged = pyqtSignal()
qualityTypeChanged = pyqtSignal()
intentNameChanged = pyqtSignal()
machineNameChanged = pyqtSignal()
updatableMachinesChanged = pyqtSignal()
materialLabelsChanged = pyqtSignal()
objectsOnPlateChanged = pyqtSignal()
numUserSettingsChanged = pyqtSignal()
machineTypeChanged = pyqtSignal()
variantTypeChanged = pyqtSignal()
extrudersChanged = pyqtSignal()
isPrinterGroupChanged = pyqtSignal()
@pyqtProperty(bool, notify = isPrinterGroupChanged)
def isPrinterGroup(self) -> bool:
return self._is_printer_group
def setIsPrinterGroup(self, value: bool):
if value != self._is_printer_group:
self._is_printer_group = value
self.isPrinterGroupChanged.emit()
@pyqtProperty(str, notify=variantTypeChanged)
def variantType(self) -> str:
return self._variant_type
def setVariantType(self, variant_type: str) -> None:
if self._variant_type != variant_type:
self._variant_type = variant_type
self.variantTypeChanged.emit()
@pyqtProperty(str, notify=machineTypeChanged)
def machineType(self) -> str:
return self._machine_type
def setMachineType(self, machine_type: str) -> None:
self._machine_type = machine_type
self.machineTypeChanged.emit()
def setNumUserSettings(self, num_user_settings: int) -> None:
if self._num_user_settings != num_user_settings:
self._num_user_settings = num_user_settings
self.numVisibleSettingsChanged.emit()
@pyqtProperty(int, notify=nu
|
mUserSettingsChanged)
def numUserSettings(self) -> int:
return self._num_user_settings
@pyqtProperty(bool, notify=objectsOnPlateChanged)
def hasObjectsOnPlate(self) -> bool:
return self._objects_on_plate
def setHasObjectsOnPlate(self, objects_on_plate):
if self._objects_on_plate != objects_on_plate:
self._objects_on_plate = objects_on_plate
self.objectsOnPlateChanged.emit()
@pyqtProperty
|
("QVariantList", notify = materialLabelsChanged)
def materialLabels(self) -> List[str]:
return self._material_labels
def setMaterialLabels(self, material_labels: List[str]) -> None:
if self._material_labels != material_labels:
self._material_labels = material_labels
self.materialLabelsChanged.emit()
@pyqtProperty("QVariantList", notify=extrudersChanged)
def extruders(self):
return self._extruders
def setExtruders(self, extruders):
if self._extruders != extruders:
self._extruders = extruders
self.extrudersChanged.emit()
@pyqtProperty(str, notify = machineNameChanged)
def machineName(self) -> str:
return self._machine_name
def setMachineName(self, machine_name: str) -> None:
if self._machine_name != machine_name:
self._machine_name = machine_name
self.machineNameChanged.emit()
@pyqtProperty(QObject, notify = updatableMachinesChanged)
def updatableMachinesModel(self) -> UpdatableMachinesModel:
return cast(UpdatableMachinesModel, self._updatable_machines_model)
def setUpdatableMachines(self, updatable_machines: List[GlobalStack]) -> None:
self._updatable_machines_model.update(updatable_machines)
self.updatableMachinesChanged.emit()
@pyqtProperty(str, notify=qualityTypeChanged)
def qualityType(self) -> str:
return self._quality_type
def setQualityType(self, quality_type: str) -> None:
if self._quality_type != quality_type:
self._quality_type = quality_type
self.qualityTypeChanged.emit()
@pyqtProperty(int, notify=numSettingsOverridenByQualityChangesChanged)
def numSettingsOverridenByQualityChanges(self) -> int:
return self._num_settings_overridden_by_quality_changes
def setNumSettingsOverriddenByQualityChanges(self, num_settings_overridden_by_quality_changes: int) -> None:
self._num_settings_overridden_by_quality_changes = num_settings_overridden_by_quality_changes
self.numSettingsOverridenByQualityChangesChanged.emit()
@pyqtProperty(str, notify=qualityNameChanged)
def qualityName(self) -> str:
return self._quality_name
def setQualityName(self, quality_name: str) -> None:
if self._quality_name != quality_name:
self._quality_name = quality_name
self.qualityNameChanged.emit()
@pyqtProperty(str, notify = intentNameChanged)
def intentName(self) -> str:
return self._intent_name
def setIntentName(self, intent_name: str) -> None:
if self._intent_name != intent_name:
self._intent_name = intent_name
self.intentNameChanged.emit()
@pyqtProperty(str, notify=activeModeChanged)
def activeMode(self) -> str:
return self._active_mode
def setActiveMode(self, active_mode: int) -> None:
if active_mode == 0:
self._active_mode = i18n_catalog.i18nc("@title:tab", "Recommended")
else:
self._active_mode = i18n_catalog.i18nc("@title:tab", "Custom")
self.activeModeChanged.emit()
@pyqtProperty(bool, notify = hasVisibleSettingsFieldChanged)
def hasVisibleSettingsField(self) -> bool:
return self._has_visible_settings_field
def setHasVisibleSettingsField(self, has_visible_settings_field: bool) -> None:
self._has_visible_settings_field = has_visible_settings_field
self.hasVisibleSettingsFieldChanged.emit()
@pyqtProperty(int, constant = Tr
|
indico/indico-migrate
|
indico_migrate/importer.py
|
Python
|
gpl-3.0
| 6,978
| 0.002436
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import time
from operator import itemgetter
from sqlalchemy.sql import func, select
from indico.core.db.sqlalchemy import db
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.modules.groups import GroupProxy
from indico_migrate.logger import logger_proxy
from indico_migrate.util import convert_to_unicode
class Importer(object):
step_name = ''
#: Specify plugins that need to be loaded for the import (e.g. to access its .settings property)
plugins = frozenset()
print_info = logger_proxy('info')
print_success = logger_proxy('success')
print_warning = logger_proxy('warning')
print_error = logger_proxy('error')
print_log = logger_proxy('log')
def __init__(self, logger, app, sqlalchemy_uri, zodb_root, verbose, dblog, default_group_provider, tz, **kwargs):
self.sqlalchemy_uri = sqlalchemy_uri
self.quiet = not verbose
self.dblog = dblog
self.zodb_root = zodb_root
self.app = app
self.tz = tz
self.default_group_provider = default_group_provider
self.logger = logger
self.initialize_global_ns(Importer._global_ns)
def initialize_global_ns(self, g):
pass
@property
def log_prefix(self):
return '%[cyan]{:<14}%[reset]'.format('[%[grey!]{}%[cyan]]'.format(self.step_name))
@property
def makac_info(self):
return self.zodb_root['MaKaCInfo']['main']
@property
def global_ns(self):
return Importer._global_ns
def __repr__(self):
return '<{}({})>'.format(type(self).__name__, self.sqlalchemy_uri)
def flushing_iterator(self, iterable, n=5000):
"""Iterates over `iterable` and flushes the ZODB cache every `n` items.
:param iterable: an iterable object
:param n: number of items to flush after
"""
conn = self.zodb_root._p_jar
for i, item in enumerate(iterable, 1):
yield item
if i % n == 0:
conn.sync()
def convert_principal(self, old_principal):
"""Converts a legacy principal to PrincipalMixin style"""
if old_principal.__class__.__name__ == 'Avatar':
principal = self.global_ns.avatar_merged_user.get(old_principal.id)
if not principal and 'email' in old_principal.__dict__:
email = convert_to_unicode(old_principal.__dict__['email']).lower()
principal = self.global_ns.users_by_primary_email.get(
email, self.global_ns.users_by_secondary_email.get(email))
if principal is not None:
self.print_warning('Using {} for {} (matched via {})'.format(principal, old_principal, email))
if not principal:
self.print_error("User {} doesn't exist".format(old_principal.id))
return principal
elif old_principal.__class__.__name__ == 'Group':
assert int(old_principal.id) in self.global_ns.all_groups
return GroupProxy(int(old_principal.id))
elif old_principal.__class__.__name__ in {'CERNGroup', 'LDAPGroup', 'NiceGroup'}:
return GroupProxy(old_principal.id, self.default_group_provider)
def convert_principal_list(self, opt):
"""Convert ACL principals to new objects"""
return set(filter(None, (self.convert_principal(principal) for principal in opt._PluginOption__value)))
def fix_sequences(self, schema=None
|
, tables=None):
for name, cls in sorted(db.Model._decl_class_registry.iteritems(), key=itemgetter(0)):
table = getattr(cls, '__table__', None)
if table is None:
continue
elif schema is not None and table.schema != schema:
continue
elif tables is not None and cls.__tablename__ not in tables:
continue
# Check if we have a single autoincrementing pr
|
imary key
candidates = [col for col in table.c if col.autoincrement and col.primary_key]
if len(candidates) != 1 or not isinstance(candidates[0].type, db.Integer):
continue
serial_col = candidates[0]
sequence_name = '{}.{}_{}_seq'.format(table.schema, cls.__tablename__, serial_col.name)
query = select([func.setval(sequence_name, func.max(serial_col) + 1)], table)
db.session.execute(query)
db.session.commit()
def protection_from_ac(self, target, ac, acl_attr='acl', ac_attr='allowed', allow_public=False):
"""Convert AccessController data to ProtectionMixin style.
This needs to run inside the context of `patch_default_group_provider`.
:param target: The new object that uses ProtectionMixin
:param ac: The old AccessController
:param acl_attr: The attribute name for the acl of `target`
:param ac_attr: The attribute name for the acl in `ac`
:param allow_public: If the object allows `ProtectionMode.public`.
Otherwise, public is converted to inheriting.
"""
if ac._accessProtection == -1:
target.protection_mode = ProtectionMode.public if allow_public else ProtectionMode.inheriting
elif ac._accessProtection == 0:
target.protection_mode = ProtectionMode.inheriting
elif ac._accessProtection == 1:
target.protection_mode = ProtectionMode.protected
acl = getattr(target, acl_attr)
for principal in getattr(ac, ac_attr):
principal = self.convert_principal(principal)
assert principal is not None
acl.add(principal)
else:
raise ValueError('Unexpected protection: {}'.format(ac._accessProtection))
class TopLevelMigrationStep(Importer):
def run(self):
start = time.time()
self.pre_migrate()
try:
self.migrate()
finally:
self.post_migrate()
self.print_log('%[cyan]{:.06f} seconds%[reset]\a'.format((time.time() - start)))
def pre_migrate(self):
pass
def migrate(self):
raise NotImplementedError
def post_migrate(self):
pass
|
ksmaheshkumar/grr
|
lib/data_stores/mysql_data_store_test.py
|
Python
|
apache-2.0
| 1,991
| 0.008036
|
#!/usr/bin/env python
"""Tests the mysql data store."""
import unittest
# pylint: disable=unused-import,g-bad-import-order
from grr.lib
|
import server_plugins
# pylint: enable=unused-import,g-bad-import-order
import logging
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import data_store_test
from grr.lib import flags
|
from grr.lib import test_lib
from grr.lib.data_stores import mysql_data_store
class MysqlTestMixin(object):
def InitDatastore(self):
self.token = access_control.ACLToken(username="test",
reason="Running tests")
# Use separate tables for benchmarks / tests so they can be run in parallel.
config_lib.CONFIG.Set("Mysql.database_name", "grr_test_%s" %
self.__class__.__name__)
config_lib.CONFIG.Set("Mysql.table_name", "aff4_test")
try:
data_store.DB = mysql_data_store.MySQLDataStore()
data_store.DB.security_manager = test_lib.MockSecurityManager()
data_store.DB.RecreateDataBase()
except Exception as e:
logging.debug("Error while connecting to MySQL db: %s.", e)
raise unittest.SkipTest("Skipping since Mysql db is not reachable.")
def DestroyDatastore(self):
data_store.DB.DropDatabase()
def testCorrectDataStore(self):
self.assertTrue(isinstance(data_store.DB, mysql_data_store.MySQLDataStore))
class MysqlDataStoreTest(MysqlTestMixin, data_store_test._DataStoreTest):
"""Test the mysql data store abstraction."""
class MysqlDataStoreBenchmarks(MysqlTestMixin,
data_store_test.DataStoreBenchmarks):
"""Benchmark the mysql data store abstraction."""
class MysqlDataStoreCSVBenchmarks(MysqlTestMixin,
data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the mysql data store abstraction."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
|
FND/tiddlyspace
|
setup.py
|
Python
|
bsd-3-clause
| 1,806
| 0.018826
|
AUTHOR = 'Osmosoft'
AUTHOR_EMAIL = 'tiddlyspace@osmosoft.com'
NAME = 'tiddlywebplugins.tiddlyspace'
DESCRIPTION = 'A discoursive social model for TiddlyWiki'
VERSION = '1.0.76' # NB: duplicate of tiddlywebplugins.tiddlyspace.__init__
import os
from setuptools import setup, find_packages
setup(
namespace_packages = ['tiddlywebplugins'],
name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = open(os.path.join(os.path.dirname(__file__), 'README')).read(),
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = 'http://pypi.python.org/pypi/%s' % NAME,
platforms = 'Posix; MacOS X; Windows',
packages = find_packages(exclude=['test']),
scripts = ['tiddlyspace'],
install_requires = [
'setuptools',
'tiddlyweb>=1.2.51',
'tiddlywebwiki>=0.57.0',
'tiddlywebplugins.utils>=1.0',
'tiddlywebplugins.logout>=0.6',
'tiddlywebplugins.virtualhosting',
'tiddlywebplugins.hashmaker>=0.3',
'tiddlywebplugins.socialusers>=0.6',
'tiddlywebplugins.magicuser>=0.3',
'tiddlywebplugins.openid2>=0.5',
'tiddlywebplugins.cookiedomain>=0.6',
'tiddlywebplugins.mselect',
'tiddly
|
webplugins.oom',
'tiddlywebplugins.prettyerror>=0.9.2',
'tiddlywebplugins.pathinfohack>=0.9.1',
'tiddlywebplug
|
ins.form',
'tiddlywebplugins.reflector>=0.6',
'tiddlywebplugins.atom>=1.3.7',
'tiddlywebplugins.mysql2>=2.1.0',
'tiddlywebplugins.privateer',
'tiddlywebplugins.lazy>=0.4',
'tiddlywebplugins.relativetime',
'tiddlywebplugins.jsonp>=0.4',
'tiddlywebplugins.templates',
'selector<0.9.0'
],
include_package_data = True,
zip_safe = False,
license = 'BSD',
)
|
kuke/models
|
fluid/adversarial/tutorials/mnist_model.py
|
Python
|
apache-2.0
| 2,821
| 0
|
"""
CNN on mnist data using fluid api of paddlepaddle
"""
import paddle
import paddle.fluid as fluid
def mnist_cnn_model(img):
"""
Mnist cnn model
Args:
img(Varaible): the input image to be recognized
Returns:
Variable: the label prediction
"""
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
num_filters=20,
filter_size=5,
pool_size=2,
pool_stride=2,
act='relu')
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
num_filters=50,
filter_size=5,
pool_size=2,
pool_stride=2,
act='relu')
fc = fluid.layers.fc(input=conv_pool_2, size=50, act='relu')
logits = fluid.layers.fc(input=fc, size=10, act='softmax')
return logits
def main():
"""
Train the cnn model on mnist datasets
"""
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
logits = mnist_cnn_model(img)
cost = fluid.layers.cross_entropy(input=logits, label=label)
avg_cost = fluid.layers.mean(x=cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.01)
optimizer.minimize(avg_cost)
batch_size = fluid.layers.create_tensor(dtype='int64')
batch_acc = fluid.layers.accuracy(
input=logits, label=label, total=batch_size)
BATCH_SIZE = 50
PASS_NUM = 3
ACC_THRESHOLD = 0.98
LOSS_THRESHOLD = 10.0
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE)
# use CPU
place = fluid.CPUPlace()
# use GPU
# place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
exe.run(fluid.default_startup_program())
pass_acc = fluid.average.WeightedAverage()
for pass_id in range(PASS_NUM):
pass_acc.reset()
for data in train_reader():
loss, acc, b_size = exe.run(
fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[avg_cost, batch_acc, batch_size])
pass_acc.add(value=acc, weight=b_size)
pass_acc_val = pass_acc.eval()[0]
print("pass_id=" + str(pass_id) + " acc=" + str(acc[0]) +
" pass_acc=" + str(pass_acc_val))
if loss < LOSS_THRESHOLD and pass_acc_val > ACC_THRESHOLD:
# early stop
|
break
print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc.e
|
val()[
0]))
fluid.io.save_params(
exe, dirname='./mnist', main_program=fluid.default_main_program())
print('train mnist done')
if __name__ == '__main__':
main()
|
sdrogers/ms2ldaviz
|
ms2ldaviz/decomp_create_motifset.py
|
Python
|
mit
| 1,879
| 0.029803
|
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ms2ldaviz.settings")
import django
django.setup()
from decomposition.models import *
from basicviz.models import *
# Script to transform an experiment into a motifset for decomposition
if __name__ == '__main__':
experiment_name = sys.argv[1]
original_experiment = Experiment.objects.get(name = experiment_name)
docs = Document.objects.filter(experiment = original_experiment)
# Find out the featureset
temp_doc = docs[0]
fi = FeatureInstance.objects.filter(document = temp_doc)[0]
bfs = fi.feature.featureset
original_features = Feature.objects.filter(featureset = bfs)
# Get the decomposition featureset - hardcoded
fs = FeatureSet.objects.get_or_create(name='binned_005')[0]
n_done = 0
for localfeature in original_
|
features:
gf = GlobalFeature.objects.get_or_create(name = localfeature.name,
min_mz = localfeature.min_mz,
|
max_mz = localfeature.max_mz,
featureset = fs)[0]
FeatureMap.objects.get_or_create(localfeature = localfeature,
globalfeature = gf)
n_done += 1
if n_done % 100 == 0:
print(n_done,len(original_features))
# Create the motif links
global_motifs = []
original_motifs = Mass2Motif.objects.filter(experiment = original_experiment)
for motif in original_motifs:
gm = GlobalMotif.objects.get_or_create(originalmotif = motif)[0]
global_motifs.append(gm)
# Create the motifset and put the global motifs in it
ms = MotifSet.objects.get_or_create(name = sys.argv[2],featureset = fs)[0]
for gm in global_motifs:
gms = GlobalMotifsToSets.objects.get_or_create(motif = gm,motifset = ms)
|
brn0/gitosis
|
gitosis/run_hook.py
|
Python
|
gpl-2.0
| 2,455
| 0.000815
|
"""
Perform gitosis actions for a git hook.
"""
from ConfigParser import NoOptionError, NoSectionError
import errno
import logging
import os
import sys
import shutil
from gitosis import repository
from gitosis import ssh
from gitosis import gitweb
from gitosis import gitdaemon
from gitosis import app
from gitosis import util
from gitosis import mirror
def post_update(cfg, git_dir):
export = os.path.join(git_dir, 'gitosis-export')
try:
shutil.rmtree(export)
except OSError, e:
if e.errno == errno.ENOENT:
pass
else:
raise
repository.export(git_dir=git_dir, path=export)
os.rename(
os.path.join(export, 'gitosis.conf'),
os.path.join(export, '..', 'gitosis.conf'),
)
# re-read config to get up-to-date settings
cfg.read(os.path.join(export, '..', 'gitosis.conf'))
gitweb.set_descriptions(
config=cfg,
)
generated = util.getGeneratedFilesDir(config=cfg)
gitweb.generate_project_list(
config=cfg,
path=os.path.join(generated, 'projects.list'),
)
gitdaemon.set_export_ok(
config=cfg,
)
authorized_keys =
|
util.getSSHAuthorizedKeysPath(config=cfg)
ssh.writeAuthorizedKeys(
path=authorized_keys,
keydir=os.path.join(export, 'keydir'),
)
def update_mirro
|
rs(cfg, git_dir):
mirror.push_mirrors(cfg, git_dir)
class Main(app.App):
def create_parser(self):
parser = super(Main, self).create_parser()
parser.set_usage('%prog [OPTS] HOOK')
parser.set_description(
'Perform gitosis actions for a git hook')
return parser
def handle_args(self, parser, cfg, options, args):
try:
(hook,) = args
except ValueError:
parser.error('Missing argument HOOK.')
log = logging.getLogger('gitosis.run_hook')
os.umask(0022)
git_dir = os.environ.get('GIT_DIR')
if git_dir is None:
log.error('Must have GIT_DIR set in enviroment')
sys.exit(1)
if hook == 'post-update':
log.info('Running hook %s', hook)
post_update(cfg, git_dir)
log.info('Done.')
elif hook == 'update-mirrors':
log.info('Running hook %s', hook)
update_mirrors(cfg, git_dir)
log.info('Done.')
else:
log.warning('Ignoring unknown hook: %r', hook)
|
cxong/Slappa
|
keyboard.py
|
Python
|
mit
| 1,293
| 0.000773
|
from config import *
class Keyboard:
def __init__(self):
self.keys = None
self.left = pygame.K_LEFT
self.right = pygame.K_RIGHT
self.jump = pygame.K_UP
self.hit_left = pygame.K_a
self.hit_right = pygame.K_d
self.hit_up = pygame.K_w
if GCW_ZERO:
self.hit_left = pygame.K_LSHIFT
|
self.hit_right = pygame.K_LCTRL
self.hit_up = pygame.K_SPACE
self.on_down = N
|
one
def update(self):
self.keys = pygame.key.get_pressed()
def is_escape(self):
return self.keys is not None and self.keys[pygame.K_ESCAPE]
def dir(self):
if self.keys is None:
return 0
if self.keys[self.left]:
return -1
elif self.keys[self.right]:
return 1
return 0
def is_jump(self):
return self.keys is not None and self.keys[self.jump]
def hit(self):
if self.keys is None:
return ""
if self.keys[self.hit_left]:
return "left"
elif self.keys[self.hit_right]:
return "right"
elif self.keys[self.hit_up]:
return "up"
return ""
def pressed(self):
return self.dir() != 0 or self.is_jump() or self.hit() != ""
|
ebigelow/LOTlib
|
LOTlib/Inference/MHShared.py
|
Python
|
gpl-3.0
| 769
| 0.002601
|
from math import
|
log, exp, isnan
from random import random
def MH_acceptance(cur, prop, fb, acceptance_temperature=1.0):
"""Return a true/false, do we accept the proposal given the current sample.
Also handles all the weird corner cases for computing MH acceptance ratios.
"""
# If we get infs or are in a stupid state, let's just sample from the prior so things don't get crazy
if isnan(cur) or (cur == float("-inf") and prop == float("-inf")):
# Just choose at random -- we can't sa
|
mple priors since they may be -inf both
r = -log(2.0)
elif isnan(prop):
# Never accept
r = float("-inf")
else:
r = (prop-cur-fb) / acceptance_temperature
# And flip
return r >= 0.0 or random() < exp(r)
|
dgsantana/arsenalsuite
|
cpp/lib/PyQt4/pyuic/uic/port_v3/ascii_upper.py
|
Python
|
gpl-2.0
| 1,612
| 0.013648
|
#############################################################################
##
## Copyright (c) 2012 Riverbank Computing Limited <info@riverbankcomputing.com>
##
## This file is part of PyQt.
##
## This file may be used under the terms of the GNU General Public
## License versions 2.0 or 3.0 as published by t
|
he Free Software
## Foundation and appearing in the files LICENSE.GPL2 and LICENSE.GPL3
## included in the packaging of this file. Alternatively you may (at
## your option) use any later version of the GNU General Public
## License if such license has been publicly approved by Riverbank
## Computing Limited (or its successors, if any
|
) and the KDE Free Qt
## Foundation. In addition, as a special exception, Riverbank gives you
## certain additional rights. These rights are described in the Riverbank
## GPL Exception version 1.1, which can be found in the file
## GPL_EXCEPTION.txt in this package.
##
## If you are unsure which license is appropriate for your use, please
## contact the sales department at sales@riverbankcomputing.com.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
# A translation table for converting ASCII lower case to upper case.
_ascii_trans_table = bytes.maketrans(b'abcdefghijklmnopqrstuvwxyz',
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
# Convert a string to ASCII upper case irrespective of the current locale.
def ascii_upper(s):
return s.translate(_ascii_trans_table)
|
veger/ansible
|
lib/ansible/modules/storage/netapp/na_elementsw_node.py
|
Python
|
gpl-3.0
| 8,394
| 0.001787
|
#!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
'''
Element Software Node Operation
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_node
short_description: NetApp Element Software Node Operation
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Add, remove cluster node on Element Software Cluster.
options:
state:
description:
- Element Software Storage Node operation state.
- present - To add pending node to participate in cluster data storage.
- absent - To remove node from active cluster. A node cannot be removed if active drives are present.
choices: ['present', 'absent']
default: 'present'
node_id:
description:
- List of IDs or Names or IP Address of nodes from cluster used for operation.
required: true
'''
EXAMPLES = """
- name: Add node from pending to active cluster
tags:
- elementsw_add_node
na_elementsw_node:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
state: present
node_id: sf4805-meg-03
- name: Remove active node from cluster
tags:
- elementsw_remove_node
na_elementsw_node:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
state: absent
node_id: 13
- name: Add node from pending to active cluster using node IP
tags:
- elementsw_add_node_ip
na_elementsw_node:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
state: present
node_id: 10.109.48.65
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class ElementSWNode(object):
"""
Element SW Storage Node operations
"""
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
node_id=dict(required=True, type='list'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
input_params = self.module.params
self.state = input_params['state']
self.node_id = input_params['node_id']
if HAS_SF_SDK is False:
self.module.fail_json(
msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def check_node_has_active_drives(self, node_id=None):
"""
Check if node has active drives attached to cluster
:description: Validate if node have active drives in cluster
:return: True or False
:rtype: bool
"""
if node_id is not None:
cluster_drives = self.sfe.list_drives()
for drive in cluster_drives.drives:
if drive.node_id == node_id and drive.status == "active":
return True
return False
def get_node_list(self):
"""
Get Node List
:description: Find and retrieve node_id from the active cluster
:return: None
:rtype: None
"""
if len(self.node_id) > 0:
unprocessed_node_list = self.node_id
list_nodes = []
all_nodes = self.sfe.list_all_nodes()
# For add operation lookup for nodes list with status pendingNodes list
# else nodes will have to be traverse through active cluster
if self.state == "present":
list_nodes = all_nodes
|
.pending_nodes
else:
list_nodes = all_nodes.nodes
for current_node in list_no
|
des:
if self.state == "absent" and \
(current_node.node_id in self.node_id or current_node.name in self.node_id or current_node.mip in self.node_id):
if self.check_node_has_active_drives(current_node.node_id):
self.module.fail_json(msg='Error deleting node %s: node has active drives' % current_node.name)
else:
self.action_nodes_list.append(current_node.node_id)
if self.state == "present" and \
(current_node.pending_node_id in self.node_id or current_node.name in self.node_id or current_node.mip in self.node_id):
self.action_nodes_list.append(current_node.pending_node_id)
# report an error if state == present and node is unknown
if self.state == "present":
for current_node in all_nodes.nodes:
if current_node.node_id in unprocessed_node_list:
unprocessed_node_list.remove(current_node.node_id)
elif current_node.name in unprocessed_node_list:
unprocessed_node_list.remove(current_node.name)
elif current_node.mip in unprocessed_node_list:
unprocessed_node_list.remove(current_node.mip)
for current_node in all_nodes.pending_nodes:
if current_node.pending_node_id in unprocessed_node_list:
unprocessed_node_list.remove(current_node.node_id)
elif current_node.name in unprocessed_node_list:
unprocessed_node_list.remove(current_node.name)
elif current_node.mip in unprocessed_node_list:
unprocessed_node_list.remove(current_node.mip)
if len(unprocessed_node_list) > 0:
self.module.fail_json(msg='Error adding node %s: node not in pending or active lists' % to_native(unprocessed_node_list))
return None
def add_node(self, nodes_list=None):
"""
Add Node that are on PendingNodes list available on Cluster
"""
try:
self.sfe.add_nodes(nodes_list,
auto_install=True)
except Exception as exception_object:
self.module.fail_json(msg='Error add node to cluster %s' % (to_native(exception_object)),
exception=traceback.format_exc())
def remove_node(self, nodes_list=None):
"""
Remove active node from Cluster
"""
try:
self.sfe.remove_nodes(nodes_list)
except Exception as exception_object:
self.module.fail_json(msg='Error remove node from cluster %s' % (to_native(exception_object)),
exception=traceback.format_exc())
def apply(self):
"""
Check, process and initiate Cluster Node operation
"""
changed = False
self.action_nodes_list = []
if self.module.check_mode is False:
self.get_node_list()
if self.state == "present" and len(self.action_nodes_list) > 0:
self.add_node(self.action_nodes_list)
changed = True
elif self.state == "absent" and len(self.action_nodes_list) > 0:
self.remove_node(self.action_nodes_list)
changed = True
result_message = 'List of nodes : %s - %s' % (to_native(self.action_nodes_list), to_native(self.node_id))
self
|
bitmovin/bitmovin-python
|
tests/bitmovin/services/inputs/sftp_input_service_tests.py
|
Python
|
unlicense
| 6,702
| 0.002835
|
import unittest
import json
from bitmovin import Bitmovin, Response, SFTPInput
from bitmovin.errors import BitmovinApiError
from tests.bitmovin import BitmovinTestCase
class SFTPInputTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_create_sftp_input(self):
(sample_input, sample_files) = self._get_sample_sftp_input()
input_resource_response = self.bitmovin.inputs.SFTP.create(sample_input)
self.assertIsNotNone(input_resource_response)
self.assertIsNotNone(input_resource_response.resource)
self.assertIsNotNone(input_resource_response.resource.id)
self._compare_sftp_inputs(sample_input, input_resource_response.resource)
def test_create_sftp_input_without_name(self):
(sample_input, sample_files) = self._get_sample_sftp_input()
sample_input.name = None
input_resource_response = self.bitmovin.inputs.SFTP.create(sample_input)
self.assertIsNotNone(input_resource_response)
self.assertIsNotNone(input_resource_response.resource)
self.assertIsNotNone(input_resource_response.resource.id)
self._compare_sftp_inputs(sample_input, input_resource_response.resource)
def test_create_sftp_input_custom(self):
(sample_input, sample_files) = self._get_sample_sftp_input()
sample_input.port = 9921
input_resource_response = self.bitmovin.inputs.SFTP.create(sample_input)
self.assertIsNotNone(input_resource_response)
self.assertIsNotNone(input_resource_response.resource)
self.assertIsNotNone(input_resource_response.resource.id)
self._compare_sftp_inputs(sample_input, input_resource_response.resource)
self.assertEqual(sample_input.port, input_resource_response.resource.port)
def test_retrieve_sftp_input(self):
(sample_input, sample_files) = self._get_sample_sftp_input()
created_input_response = self.bitmovin.inputs.SFTP.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_sftp_inputs(sample_input, created_input_response.resource)
retrieved_input_response = self.bitmovin.inputs.SFTP.retrieve(created_input_response.resource.id)
self.assertIsNotNone(retrieved_input_response)
self.assertIsNotNone(retrieved_input_response.resource)
self._compare_sftp_inputs(created_input_response.resource, retrieved_input_response.resource)
def test_delete_sftp_input(self):
(sample_input, sample_files) = self._get_sample_sftp_input()
created_input_response = self.bitmovin.inputs.SFTP.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_sftp_inputs(sample_input, created_input_response.resource)
deleted_minimal_resource = self.bitmovin.inputs.SFTP.delete(created_input_response.resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.inputs.SFTP.retrieve(created_input_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving input after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_list_sftp_inputs(self):
(sample_input, sample_files) = self.
|
_get_sample_sftp_input()
created_input_response = self.bitmovin.inputs.SFTP.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_sftp_inputs(sample_input, created_input_response.resource)
inputs = self.bitmovin.inputs.SFTP.list()
self.assertIsNotNone(inputs)
|
self.assertIsNotNone(inputs.resource)
self.assertIsNotNone(inputs.response)
self.assertIsInstance(inputs.resource, list)
self.assertIsInstance(inputs.response, Response)
self.assertGreater(inputs.resource.__sizeof__(), 1)
def test_retrieve_sftp_input_custom_data(self):
(sample_input, sample_files) = self._get_sample_sftp_input()
sample_input.customData = '<pre>my custom data</pre>'
created_input_response = self.bitmovin.inputs.SFTP.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_sftp_inputs(sample_input, created_input_response.resource)
custom_data_response = self.bitmovin.inputs.SFTP.retrieve_custom_data(created_input_response.resource.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_input.customData, json.loads(custom_data.customData))
def _compare_sftp_inputs(self, first: SFTPInput, second: SFTPInput):
"""
:param first: SFTPInput
:param second: SFTPInput
:return: bool
"""
self.assertEqual(first.host, second.host)
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
#self.assertEqual(first.username, second.username) # issue 574
def _get_sample_sftp_input(self):
sftp_input_settings = self.settings.get('sampleObjects').get('inputs').get('sftp')\
.get('3945fee9-5e0f-48ce-8f3d-d451c0bf1071')
files = sftp_input_settings.get('files')
sftp_input = SFTPInput(
host=sftp_input_settings.get('host'),
username=sftp_input_settings.get('username'),
password=sftp_input_settings.get('password'),
name='Sample SFTP input'
)
self.assertIsNotNone(sftp_input.host)
self.assertIsNotNone(sftp_input.username)
self.assertIsNotNone(sftp_input.password)
return sftp_input, files
if __name__ == '__main__':
unittest.main()
|
grlee77/scipy
|
scipy/special/_precompute/zetac.py
|
Python
|
bsd-3-clause
| 591
| 0
|
"""Compute the Taylor series for zeta(x) - 1
|
around x = 0."""
try:
import mpmath
except ImportError:
pass
def zetac_series(N):
coeffs = []
with mpmath.workdps(100):
coeffs.append(-1.5)
for n in range(1, N):
coeff = mpmath.diff(mpmath.zeta, 0, n)/mpmath.factorial(n)
coeffs.append(coeff)
return coeffs
def main():
print(__doc__)
coeffs =
|
zetac_series(10)
coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
for x in coeffs]
print("\n".join(coeffs[::-1]))
if __name__ == '__main__':
main()
|
jfterpstra/bluebottle
|
bluebottle/homepage/serializers.py
|
Python
|
bsd-3-clause
| 554
| 0
|
from bluebottle.projects.serializers import ProjectPreviewSerializer
from bluebottle.quotes.serializers import QuoteSerializer
from bluebottle.slides.serializers import SlideSerializer
from bluebottle.statistics.serializers import StatisticSerializer
from rest_framework import serializers
class HomePageSerializer(serializers.Serializer):
id = serializers.CharField()
quotes
|
= QuoteSerializer(many=True)
slides = Sli
|
deSerializer(many=True)
statistics = StatisticSerializer(many=True)
projects = ProjectPreviewSerializer(many=True)
|
richgieg/flask-now
|
config.py
|
Python
|
mit
| 4,539
| 0
|
import os
from datetime import timedelta
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
###########################################################################
# [ Application ]
###########################################################################
APP_TITLE = 'WebApp'
APP_MAIL_NAME = '%s Support' % APP_TITLE
APP_MAIL_ADDRESS = 'support@webapp.com'
APP_MAIL_SENDER = '%s <%s>' % (APP_MAIL_NAME, APP_MAIL_ADDRESS)
APP_MAIL_SUBJECT_PREFIX = '[%s]' % APP_TITLE
# Email address for the primary site administrator user account.
APP_ADMIN = os.environ.get('APP_ADMIN')
# Allow new users to register.
APP_ALLOW_NEW_USERS = True
# A value of 0 means unlimited.
APP_MAX_USERS = 2
# Toggles the logging of user events.
APP_EVENT_LOGGING = True
###########################################################################
# [ Flask ]
###########################################################################
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
###########################################################################
# [ Flask-Login ]
###########
|
################################################################
# Ensures that the "remember me" cookie isn't accessible by
# client-sides scripts.
REMEMBER_COOKIE_HTTPONLY = True
|
# Time-to-live for the "remember me" cookie.
REMEMBER_COOKIE_DURATION = timedelta(days=365)
# Must be disabled for the application's security layer to
# function properly.
SESSION_PROTECTION = None
###########################################################################
# [ Flask-Mail ]
###########################################################################
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
###########################################################################
# [ Flask-SQLAlchemy ]
###########################################################################
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
###########################################################################
# [ Flask ]
###########################################################################
DEBUG = True
###########################################################################
# [ Flask-SQLAlchemy ]
###########################################################################
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
###########################################################################
# [ Flask ]
###########################################################################
TESTING = True
###########################################################################
# [ Flask-SQLAlchemy ]
###########################################################################
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
###########################################################################
# [ Flask ]
###########################################################################
# Uncomment the following line if you're running HTTPS throughout
# your entire application.
# SESSION_COOKIE_SECURE = True
###########################################################################
# [ Flask-Login ]
###########################################################################
# Uncomment the following line if you're running HTTPS throughout
# your entire application.
# REMEMBER_COOKIE_SECURE = True
###########################################################################
# [ Flask-SQLAlchemy ]
###########################################################################
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
|
youtube/cobalt
|
third_party/web_platform_tests/fetch/api/resources/stash-put.py
|
Python
|
bsd-3-clause
| 301
| 0
|
def main(request, response):
url_dir = '/'.join(request.url_parts.pat
|
h.split('/')[:-1]) + '/'
key = request.GET.first("key")
value = request.GET.first("value")
request.server.stash.put(key, value, u
|
rl_dir)
response.headers.set('Access-Control-Allow-Origin', '*')
return "done"
|
skarra/CalDAVClientLibrary
|
caldavclientlibrary/protocol/http/tests/test_util.py
|
Python
|
apache-2.0
| 4,886
| 0.011871
|
##
# Copyright (c) 2006-2013 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.protocol.http.util import parsequoted
from caldavclientlibrary.protocol.http.util import parsetoken
from caldavclientlibrary.protocol.http.util import parseStatusLine
import unittest
class TestParseQuoted(unittest.TestCase):
def testParseQuotedOK(self):
data = {
"\"\"" : ("", ""),
"\"quoted\"" : ("quoted", ""),
"\"quoted words\"" : ("quoted words", ""),
"\"quoting a \\\"word\\\"\"" : ("quoting a \"word\"", ""),
"\"\" after" : ("", "after"),
"\"quoted\" after" : ("quoted", "after"),
"\"quoted words\" after" : ("quoted words", "after"),
"\"quoting a \\\"word\\\"\" after" : ("quoting a \"word\"", "after"),
"\"quoting a \\\"word\\\" after\" after": ("quoting a \"word\" after", "after"),
"\"quoted\"after" : ("quoted", "after"),
"\"" : ("", ""),
"\"unterminated" : ("unterminated", ""),
"\"unterminated words" : ("unterminated words", ""),
"\"unterminated a \\\"word\\\"" : ("unterminated a \"word\"", ""),
}
for input, result in data.iteritems():
self.assertEqual(parsequoted(input), result)
def testParseQuotedBAD(self):
data = (
"",
"unquoted",
"unquoted \"quoted\"",
)
for input in data:
self.assertRaises(AssertionError, parsequoted, input)
class TestParseToken(unittest.TestCase):
def testParseTokenOK(self):
data = {
"" : ("", ""),
"unquoted" : ("unquoted", ""),
"unquoted words" : ("unquoted", "words"),
"unquoted words" : ("unquoted", "words"),
"unquoting a \"word\"" : ("unquoting", "a \"word\""),
"unquoted\twords" : ("unquoted", "words"),
"unquoting\ta \"word\"" : ("unquoting", "a \"word\""),
"unquoted: words" : ("unquoted", "words"),
"unquoting: a \"word\"" : ("unquoting", "a \"word\""),
"\"\"" : ("", ""),
"\"quoted\"" : ("quoted", ""),
"\"quoted words\"" : ("quoted words", ""),
"\"quoting a \\\"word\\\"\"" : ("quoting a \"word\"", ""),
"\"\" after" : ("", "after"),
"\"quoted\" after" : ("quoted", "after"),
"\"quoted words\" after" : ("quoted words", "after"),
"\"quoting a \\\"word\\\"\" after" : ("quoting a \"word\"", "after"),
"\"quoting a \\\"word\\\" after\" after": ("quoting a \"word\" after", "after"),
"\"quoted\"after" : ("quoted", "after"),
"\"" : ("", ""),
"\"unterminated" : ("unterminated", ""),
"\"unterminated words" : ("unterminated words", ""),
"\"unterminated a \\\"word\\\"" : ("unterminated a \"word\"", ""),
}
for input, result in data.iteritems():
self.assertEqual(parsetoken(input, " \t:"), result)
class TestPa
|
rseStatusLine(unittest.TestCase):
def testParseTokenOK(self):
self.assertEqual(parseStatusLine("HTTP/1.1 200 OK"), 200)
def testParseTokenBadStatus(self):
self.assertEqual(parseStatusLine("HTTP/1.2 2001 OK"), 0)
def testParseTokenBadVersion(self):
self.assertEqual(parseStatusLine("HTTP/1.2 200 OK"), 0)
def testParseTokenBadNumber(self):
self.assertEqual(p
|
arseStatusLine("HTTP/1.1 OK"), 0)
def testParseTokenBad(self):
self.assertEqual(parseStatusLine("HTTP/1.1"), 0)
|
siliconsmiley/QGIS
|
python/plugins/processing/algs/gdal/proximity.py
|
Python
|
gpl-2.0
| 4,529
| 0.001766
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
proximity.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterString
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
class proximity(GdalAlgorithm):
INPUT = 'INPUT'
VALUES = 'VALUES'
UNITS = 'UNITS'
MAX_DIST = 'MAX_DIST'
NODATA = 'NODATA'
BUF_VAL = 'BUF_VAL'
OUTPUT = 'OUTPUT'
RTYPE = 'RTYPE'
TYPE = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64']
DISTUNITS = ['GEO', 'PIXEL']
def commandLineName(self):
return "gdalogr:proximity"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Proximity (raster distance)')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Analysis')
self.addParameter(ParameterRaster(self.INPUT,
self.tr('Input layer'), False))
self.addParameter(ParameterString(self.VALUES,
self.tr('Values'), ''))
self.addParameter(ParameterSelection(self.UNITS,
self.tr('Distance units'), self.DISTUNITS, 0))
self.addParameter(ParameterNumber(self.MAX_DIST,
self.tr('Max distance (negative value to ignore)'), -1, 9999, -1))
self.addParameter(ParameterNumber(self.NODATA,
self.tr('Nodata (negative value to ignore)'), -1, 9999, -1))
self.addParameter(ParameterNumber(self.BUF_VAL,
self.tr('Fixed buf value (negative value to ignore)'),
-1, 9999, -1))
self.addParameter(ParameterSelection(self.RTYPE,
self.tr('Output raster type'), self.TYPE, 5))
self.addOutput(OutputRaster(self.OUTPUT, self.tr('Distance')))
def getConsoleCommands(self):
output = self.getOutputValue(self.OUTPUT)
arguments = []
arguments.append('-ot')
arguments.append(self.TYPE[self.getParameterValue(self.RTYPE)])
arguments.append(self.getParameterValue(self.INPUT))
arguments.append(output)
arguments.append('-of')
arguments.append(GdalUtils.getFormatShortNameFromFilename(output))
arguments.append('-distunits')
arguments.append(self.DISTUNITS[self.getParameterValue(self.UNITS)])
values = self.getParameterValue(self.VALUES)
if len(values) > 0:
arguments.append('-values')
arguments.append(values)
values = unicode(self.getParameterValue(self.MAX_DIST))
if values < 0:
arguments.append('-maxdist')
arguments.append(values)
values = unicode(self.getParameterValue(self.NODATA))
if values < 0:
argume
|
nts.append('-nodata')
arguments.append(values)
values = unicode(self.getParameterValue(self.BUF_VAL))
if values < 0:
arguments.append('-fixed-buf-val')
arguments.append(values)
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', 'gdal_proximity.bat',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['gdal_proximity.py',
GdalUtils.escapeAndJoin(a
|
rguments)]
return commands
|
pywinauto/pywinauto
|
pywinauto/__init__.py
|
Python
|
bsd-3-clause
| 7,043
| 0.003124
|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Python package for automating GUI manipulation on Windows"""
__version__ = "0.6.8"
import sys # noqa: E402
import warnings # noqa: E402
def deprecated(method, deprecated_name=None):
"""Decorator for deprecated methods"""
if deprecated_name is None:
deprecated_name = ''.join([subname.capitalize() for subname in method.__name__.split('_')])
def wrap(*args, **kwargs):
warnings.simplefilter("default", DeprecationWarning)
warnings.warn("Method .{}() is deprecated, use .{}() instead." \
"".format(deprecated_name, method.__name__), DeprecationWarning, stacklevel=2)
return method(*args, **kwargs)
return wrap
if sys.platform == 'win32':
# Importing only pythoncom can fail with the errors like:
# ImportError: No system module 'pywintypes' (pywintypes27.dll)
# So try to facilitate pywintypes*.dll loading with implicit import of win32api
import win32api # noqa: E402
import pythoncom # noqa: E402
def _get_com_threading_mode(module_sys):
"""Set up COM threading model
The ultimate goal is MTA, but the mode is adjusted
if it was already defined prior to pywinauto import.
"""
com_init_mode = 0 # COINIT_MULTITHREADED = 0x0
if hasattr(module_sys, "coinit_flags"):
warnings.warn("Apply externally defined coinit_flags: {0}"
.format(module_sys.coinit_flags), UserWarning)
com_init_mode = module_sys.coinit_flags
try:
|
# Probe the selected COM threading mode
pythoncom.CoInitializeEx(com_init_mode)
pythoncom.CoUninitialize()
except pythoncom.com_error:
w
|
arnings.warn("Revert to STA COM threading mode", UserWarning)
com_init_mode = 2 # revert back to STA
return com_init_mode
sys.coinit_flags = _get_com_threading_mode(sys)
#=========================================================================
class WindowNotFoundError(Exception):
"""No window could be found"""
pass
from . import findwindows
WindowAmbiguousError = findwindows.WindowAmbiguousError
ElementNotFoundError = findwindows.ElementNotFoundError
ElementAmbiguousError = findwindows.ElementAmbiguousError
from . import findbestmatch
from . import backend as backends
MatchError = findbestmatch.MatchError
from pywinauto.application import Application, WindowSpecification
class Desktop(object):
"""Simple class to call something like ``Desktop().WindowName.ControlName.method()``"""
def __init__(self, backend=None, allow_magic_lookup=True):
"""Create desktop element description"""
if not backend:
backend = backends.registry.name
if backend not in backends.registry.backends:
raise ValueError('Backend "{0}" is not registered!'.format(backend))
self.backend = backends.registry.backends[backend]
self.allow_magic_lookup = allow_magic_lookup
def window(self, **kwargs):
"""Create WindowSpecification object for top-level window"""
if 'top_level_only' not in kwargs:
kwargs['top_level_only'] = True
if 'backend' in kwargs:
raise ValueError('Using another backend than set in Desktop constructor is not allowed!')
kwargs['backend'] = self.backend.name
return WindowSpecification(kwargs, allow_magic_lookup=self.allow_magic_lookup)
def windows(self, **kwargs):
"""Return a list of wrapped top level windows"""
if 'top_level_only' not in kwargs:
kwargs['top_level_only'] = True
if 'backend' in kwargs:
raise ValueError('Using another backend than set in Desktop constructor is not allowed!!')
kwargs['backend'] = self.backend.name
windows = findwindows.find_elements(**kwargs)
return [self.backend.generic_wrapper_class(win) for win in windows]
def __getitem__(self, key):
"""Allow describe top-level window as Desktop()['Window Caption']"""
return self.window(best_match=key)
def __getattribute__(self, attr_name):
"""Attribute access for this class"""
allow_magic_lookup = object.__getattribute__(self, "allow_magic_lookup") # Beware of recursions here!
try:
return object.__getattribute__(self, attr_name)
except AttributeError:
if not allow_magic_lookup:
raise
return self[attr_name] # delegate it to __get_item__
def from_point(self, x, y):
"""Get wrapper object for element at specified screen coordinates (x, y)"""
element_info = self.backend.element_info_class.from_point(x, y)
return self.backend.generic_wrapper_class(element_info)
def top_from_point(self, x, y):
"""Get wrapper object for top level element at specified screen coordinates (x, y)"""
top_element_info = self.backend.element_info_class.top_from_point(x, y)
return self.backend.generic_wrapper_class(top_element_info)
def get_active(self):
"""Get wrapper object for active element"""
element_info = self.backend.element_info_class.get_active()
return self.backend.generic_wrapper_class(element_info)
|
catalyst-cooperative/pudl
|
src/pudl/metadata/resources/eia861.py
|
Python
|
mit
| 23,777
| 0.000505
|
"""Definitions of data tables primarily coming from EIA-861."""
from typing import Any, Dict, List
RESOURCE_METADATA: Dict[str, Dict[str, Any]] = {
"advanced_metering_infrastructure_eia861": {
"description": "The data contain number of meters from automated meter readings (AMR) and advanced metering infrastructure (AMI) by state, sector, and balancing authority. The energy served (in megawatthours) for AMI systems is provided. Form EIA-861 respondents also report the number of standard meters (non AMR/AMI) in their system. Historical Changes: We started collecting the number of standard meters in 2013. The monthly survey collected these data from January 2011 to January 2017.",
"schema": {
"fields": [
'advanced_metering_infrastructure',
'automated_meter_reading',
'balancing_authority_code_eia',
'customer_class',
'daily_dig
|
ital_access_customers',
'direct_load_control_customers',
'energy_served_ami_mwh',
'entity_type',
'home_area_network',
'non_amr_ami',
'report_date',
'short_form',
'state',
'utility_id_eia',
'utility_name_eia'
],
|
"primary_key": [
'balancing_authority_code_eia',
'customer_class',
'report_date',
'state',
'utility_id_eia',
],
},
"field_namespace": "eia",
"sources": ["eia861"],
"etl_group": "eia861",
},
"balancing_authority_assn_eia861": {
"description": "Association table showing which combinations of state, balancing authority, and utilities were observed in the data each year.",
"schema": {
"fields": [
'report_date',
'balancing_authority_id_eia',
'utility_id_eia',
'state',
],
"primary_key": [
'report_date',
'balancing_authority_id_eia',
'utility_id_eia',
'state',
],
},
"field_namespace": "eia",
"sources": ["eia861"],
"etl_group": "eia861",
},
"balancing_authority_eia861": {
"description": "Annual entity table for balancing authorities.",
"schema": {
"fields": [
'report_date',
'balancing_authority_id_eia',
'balancing_authority_code_eia',
'balancing_authority_name_eia',
],
"primary_key": [
'report_date',
'balancing_authority_id_eia',
],
},
"field_namespace": "eia",
"sources": ["eia861"],
"etl_group": "eia861",
},
"demand_response_eia861": {
"description": "The data contain energy demand response programs by state, sector, and balancing authority. We collect data for the number of customers enrolled, energy savings, potential and actual peak savings, and associated costs.",
"schema": {
"fields": [
'actual_peak_demand_savings_mw',
'balancing_authority_code_eia',
'customer_class',
'customer_incentives_cost',
'customers',
'energy_savings_mwh',
'other_costs',
'potential_peak_demand_savings_mw',
'report_date',
'short_form',
'state',
'utility_id_eia',
'utility_name_eia'
],
"primary_key": [
'balancing_authority_code_eia',
'customer_class',
'report_date',
'state',
'utility_id_eia',
],
},
"field_namespace": "eia",
"sources": ["eia861"],
"etl_group": "eia861",
},
"demand_response_water_heater_eia861": {
"description": "The number of grid connected water heaters enrolled in demand response programs.",
"schema": {
"fields": [
'balancing_authority_code_eia',
'report_date',
'state',
'utility_id_eia',
'water_heater',
],
"primary_key": [
'balancing_authority_code_eia',
'report_date',
'state',
'utility_id_eia',
],
},
"field_namespace": "eia",
"sources": ["eia861"],
"etl_group": "eia861",
},
"demand_side_management_ee_dr_eia861": {
"description": "The data contain energy efficiency incremental data, energy efficiency annual data, load management incremental data, load management annual data, annual costs, and the customer counts of price response and time response programs by sector.",
"schema": {
"fields": [
'annual_indirect_program_cost',
'annual_total_cost',
'customer_class',
'energy_efficiency_annual_actual_peak_reduction_mw',
'energy_efficiency_annual_cost',
'energy_efficiency_annual_effects_mwh',
'energy_efficiency_annual_incentive_payment',
'energy_efficiency_incremental_actual_peak_reduction_mw',
'energy_efficiency_incremental_effects_mwh',
'load_management_annual_actual_peak_reduction_mw',
'load_management_annual_cost',
'load_management_annual_effects_mwh',
'load_management_annual_incentive_payment',
'load_management_annual_potential_peak_reduction_mw',
'load_management_incremental_actual_peak_reduction_mw',
'load_management_incremental_effects_mwh',
'load_management_incremental_potential_peak_reduction_mw',
'nerc_region',
'price_responsiveness_customers',
'report_date',
'state',
'time_responsiveness_customers',
'utility_id_eia'
],
},
"field_namespace": "eia",
"sources": ["eia861"],
"etl_group": "eia861",
},
"demand_side_management_misc_eia861": {
"schema": {
"fields": [
'energy_savings_estimates_independently_verified',
'energy_savings_independently_verified',
'entity_type',
'major_program_changes',
'nerc_region',
'price_responsive_programs',
'report_date',
'reported_as_another_company',
'short_form',
'state',
'time_responsive_programs',
'utility_id_eia',
'utility_name_eia'
],
},
"field_namespace": "eia",
"sources": ["eia861"],
"etl_group": "eia861",
},
"demand_side_management_sales_eia861": {
"schema": {
"fields": [
'nerc_region',
'report_date',
'sales_for_resale_mwh',
'sales_to_ultimate_consumers_mwh',
'state',
'utility_id_eia'
],
},
"field_namespace": "eia",
"sources": ["eia861"],
"etl_group": "eia861",
},
"distributed_generation_fuel_eia861": {
"schema": {
"fields": [
'estimated_or_actual_fuel_data',
'fuel_class',
'fuel_pct',
'report_date',
'state',
'utility_id_eia'
],
},
"field_namespace": "eia",
"sources": ["eia861"],
"etl_group": "eia861",
},
"distributed_generation_misc_eia861": {
"schema": {
"fields": [
'backup_capacity_mw',
'distributed_generati
|
laurmurclar/mitmproxy
|
mitmproxy/contrib/kaitaistruct/png.py
|
Python
|
mit
| 11,809
| 0.003049
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
# The source was png.ksy from here - https://github.com/kaitai-io/kaitai_struct_formats/blob/9370c720b7d2ad329102d89bdc880ba6a706ef26/image/png.ksy
import array
import struct
import zlib
from enum import Enum
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
class Png(KaitaiStruct):
class ColorType(Enum):
greyscale = 0
truecolor = 2
indexed = 3
greyscale_alpha = 4
truecolor_alpha = 6
class PhysUnit(Enum):
unknown = 0
meter = 1
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.magic = self._io.ensure_fixed_contents(struct.pack('8b', -119, 80, 78, 71, 13, 10, 26, 10))
self.ihdr_len = self._io.ensure_fixed_contents(struct.pack('4b', 0, 0, 0, 13))
self.ihdr_type = self._io.ensure_fixed_contents(struct.pack('4b', 73, 72, 68, 82))
self.ihdr = self._root.IhdrChunk(self._io, self, self._root)
self.ihdr_crc = self._io.read_bytes(4)
self.chunks = []
while not self._io.is_eof():
self.chunks.append(self._root.Chunk(self._io, self, self._root))
class Rgb(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.r = self._io.read_u1()
self.g = self._io.read_u1()
self.b = self._io.read_u1()
class Chunk(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.len = self._io.read_u4be()
self.type = self._io.read_str_byte_limit(4, "UTF-8")
_on = self.type
if _on == u"iTXt":
self._raw_body = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_body))
self.body = self._root.InternationalTextChunk(io, self, self._root)
elif _on == u"gAMA":
self._raw_body = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_body))
self.body = self._root.GamaChunk(io, self, self._root)
elif _on == u"tIME":
self._raw_body = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_body))
self.body = self._root.TimeChunk(io, self, self._root)
elif _on == u"PLTE":
self._raw_body = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_body))
self.body = self._root.PlteChunk(io, self, self._root)
elif _on == u"bKGD":
self._raw_body = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_body))
self.body = self._root.BkgdChunk(io, self, self._root)
elif _on == u"pHYs":
self._raw_body = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_body))
self.body = self._root.PhysChunk(io, self, self._root)
elif _on == u"tEXt":
self._raw_body = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_body))
self.body = self._root.TextChunk(io, self, self._root)
elif _on == u"cHRM":
self._raw_body = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_body))
self.body = self._root.ChrmChunk(io, self, self._root)
elif _on == u"sRGB":
self._raw_body = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_body))
self.body = self._root.SrgbChunk(io, self, self._root)
elif _on == u"zTXt":
self._raw_body = self._io.read_bytes(self.len)
io = KaitaiStream(BytesIO(self._raw_body))
self.body = self._root.CompressedTextChunk(io, self, self._root)
else:
self.body = self._io.read_bytes(self.len)
self.crc = self._io.read_bytes(4)
class BkgdIndexed(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.palette_index = self._io.read_u1()
class Point(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.x_int = self._io.read_u4be()
self.y_int = self._io.read_u4be()
@property
def x(self):
if hasattr(self, '_m_x'):
return self._m_x if hasattr(self, '_m_x') else None
self._m_x = (self.x_int / 100000.0)
return self._m_x if hasattr(self, '_m_x') else None
@property
def y(self):
if hasattr(self, '_m_y'):
return self._m_y if hasattr(self, '_m_y') else None
self._m_y = (self.y_int / 100000.0)
return self._m_y if hasattr(self, '_m_y') else None
class BkgdGreyscale(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.value = self._io.read_u2be()
class ChrmChunk(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.white_point = self._root.Point(self._io, self, self._root)
self.red = self._root.Point(self._io, self,
|
self._root)
self.green = self._root.Point(self._io, self, self._root)
self.blue = self._root.Point(self._io, self, self._root)
class IhdrChunk(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
|
self._root = _root if _root else self
self.width = self._io.read_u4be()
self.height = self._io.read_u4be()
self.bit_depth = self._io.read_u1()
self.color_type = self._root.ColorType(self._io.read_u1())
self.compression_method = self._io.read_u1()
self.filter_method = self._io.read_u1()
self.interlace_method = self._io.read_u1()
class PlteChunk(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.entries = []
while not self._io.is_eof():
self.entries.append(self._root.Rgb(self._io, self, self._root))
class SrgbChunk(KaitaiStruct):
class Intent(Enum):
perceptual = 0
relative_colorimetric = 1
saturation = 2
absolute_colorimetric = 3
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.render_intent = self._root.SrgbChunk.Intent(self._io.read_u1())
class CompressedTextChunk(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self.keyword = self._io.read_strz("UTF-8", 0, False, True, True)
self.compression_method = self._io.read_u1()
self._raw_text_datastream = self._io.read_bytes_full()
self.text_datastream = zlib.decompress(self._raw_text_datastream)
class BkgdTruecolor(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=Non
|
SebastianSchildt/potatonet-power
|
gui/netclient.py
|
Python
|
mit
| 699
| 0.048641
|
import socket
HOST, PORT = "localhost", 2222
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def init():
global sock
try:
sock.connect((HOST, PORT))
return True
except:
print("Can not open connection")
return False
def transceive(command):
global sock
try:
sock.sendall(bytes(command+"\n","utf-8"))
d
|
ata=readline(sock)
return data
except OSError as e:
print("Communication error: {0}".format(err))
return("505 ")
def close():
global sock
sock.close()
def readline(sock, recv_buffer=4096, delim='\n'):
buffer = ''
data = True
while data:
data = sock.recv(recv_buffer)
buffer += data.decode("utf-8")
|
if buffer[-1] == '\n':
return buffer[:-1]
|
antoniov/tools
|
clodoo/check_one2many.py
|
Python
|
agpl-3.0
| 3,475
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
# import oerplib
# from os0 import os0
try:
from clodoo import clodoo
except ImportError:
import clodoo
try:
from z0lib.z0lib import z0lib
except ImportError:
try:
from z0lib import z0lib
except ImportError:
import z0lib
# import pdb
__version__ = "0.3.53.4"
msg_time = time.time()
def msg_burst(text):
global msg_time
t = time.time() - msg_time
if (t
|
> 3):
print(text)
msg_time = time.time()
parser = z0lib.parseoptargs("Odoo test environment",
|
"© 2017-2018 by SHS-AV s.r.l.",
version=__version__)
parser.add_argument('-h')
parser.add_argument("-c", "--config",
help="configuration command file",
dest="conf_fn",
metavar="file",
default='./inv2draft_n_restore.conf')
parser.add_argument("-d", "--dbname",
help="DB name",
dest="db_name",
metavar="file",
default='demo')
parser.add_argument('-n')
parser.add_argument('-q')
parser.add_argument('-V')
parser.add_argument('-v')
ctx = parser.parseoptargs(sys.argv[1:], apply_conf=False)
oerp, uid, ctx = clodoo.oerp_set_env(ctx=ctx)
# pdb.set_trace()
model_fld = 'ir.model.fields'
for fld_id in oerp.search(model_fld, [('ttype', '=', 'one2many')]):
try:
fld = oerp.browse(model_fld, fld_id)
print('browse(%s, %d){"name":%s, "model_id":%s,'
' "relation":%s, "relation_field":%s}' % (model_fld,
fld_id,
fld.name,
fld.model_id.model,
fld.relation,
fld.relation_field))
with open('check_one2many.log', 'ab') as log:
log.write("browse(%s, %d)\n" % (model_fld, fld_id))
model2 = fld.model_id.model
for id in oerp.search(model2):
msg_burst(' - browse(%s, %d).%s' % (model2, id, fld.name))
try:
rec = oerp.browse(model2, id)
for kk in rec[fld.name]:
msg_burst('search(%s, id=%d)' % (fld.relation, kk.id))
try:
x = oerp.search(fld.relation, [('id', '=', kk.id)])
except BaseException:
x = []
if len(x) != 1:
with open('check_one2many.log', 'ab') as log:
log.write("**** Error in model %s id %d! ****\n" %
(fld.relation, kk.id))
except BaseException:
print("**** Error in model %s id %d! ****" % (model2, id))
with open('check_one2many.log', 'ab') as log:
log.write("**** Error in model %s id %d! ****\n" % (model2,
id))
except BaseException:
print("**** Error in model %s id %d! ****" % (model_fld, fld_id))
with open('check_one2many.log', 'ab') as log:
log.write("**** Error in model %s id %d! ****\n" % (model_fld,
fld_id))
|
kruegg21/casino_analytics
|
src/main.py
|
Python
|
apache-2.0
| 14,236
| 0.007516
|
import helper
import json
import pandas as pd
import mpld3
import numpy as np
import requests
import factor_analysis
import visualizations
from datetime import timedelta, datetime
from netwin_analysis import netwin_analysis
from sqlalchemy import create_engine
from generateresponsefromrequest import get_intent_entity_from_watson
from query_parameters import query_parameters
from translation_dictionaries import *
# Read password from external file
with open('passwords.json') as data_file:
data = json.load(data_file)
DATABASE_HOST = 'soft-feijoa.db.elephantsql.com'
DATABASE_PORT = '5432'
DATABASE_NAME = 'ohdimqey'
DATABASE_USER = 'ohdimqey'
DATABASE_PASSWORD = data['DATABASE_PASSWORD']
# Connect to database
database_string = 'postgres://{}:{}@{}:{}/{}'.format(DATABASE_USER,
DATABASE_PASSWORD,
DATABASE_HOST,
DATABASE_PORT,
DATABASE_NAME)
engine = create_engine(database_string)
main_factors = ['bank', 'zone', 'clublevel', 'area']
specific_factors = ['club_level', 'area', 'game_title', 'manufacturer',
'stand', 'zone', 'bank']
# dataframes = {}
def impute_period(query_params, error_checking = False):
'''
Checks to see if a period is specified in query parameters object. If none
is specified, this function imputes a period by looking at the range in the
query parameters object. The imputed range is then put into the sql_period
attribute of the query params object.
Input:
query_params -- query parameters object
error_checking (bool) -- whether to print to console
Output:
query_params with imputed period
'''
# Check to see if a period is specified, if not impute period based on range
if not query_params.period:
period = None
time_range = query_params.stop - query_params.start
if time_range > timedelta(hours = 23, minutes = 59, seconds = 59):
# Range is greater than a day
if time_range > timedelta(days = 6, hours = 23, minutes = 59, seconds = 59):
# Range is greater than a week
if time_range > timedelta(days = 31, hours = 23, minutes = 59, seconds = 59):
# Range is greater than a month
if time_range > timedelta(days = 364, hours = 23, minutes = 59, seconds = 59):
# Range is greater than a year
# Segment by months
period = 'monthly'
else:
# Range is less than a year
# Segment by weeks
period = 'weekly'
else:
# Range is less than a month
# Segment by days
period = 'daily'
else:
# Range is less than week
# Segment by hour
period = 'hourly'
else:
# Segment by minute
period = 'by_minute'
# Add imputed period
query_params.sql_period = translation_dictionary[period]
# Check to see if we need more granularity for time factor
if query_params.time_factor:
if query_params.time_factor == 'top minute':
if query_params.sql_period in ['year', 'month', 'week', 'day', 'hour']:
query_params.sql_period = 'minute'
if query_params.time_factor == 'top hour':
if query_params.sql_period in ['year', 'month', 'week', 'day']:
query_params.sql_period = 'hour'
if query_params.time_factor == 'top day':
if query_params.sql_period in ['year', 'month', 'week']:
query_params.sql_period = 'day'
if query_params.time_factor == 'top week':
if query_params.sql_period in ['year', 'month']:
query_params.sql_period = 'week'
if query_params.time_factor == 'top month':
if query_params.sql_period in ['year']:
query_params.sql_period = 'month'
return query_params
def get_data_from_nl_query(nl_query, error_checking = False):
'''
Input:
nl_query (str) -- this is a natural language query
i.e. what is my revenue today
Returns
df (dataframe) -- this is a pandas dataframe that contains a table
which will be used for visualization
query_params (query_parameters ob
|
ject) -- this is an object holding
everything we need to know
|
about the query
'''
# Get JSON Watson conversations response to natual language query
response = get_intent_entity_from_watson(nl_query, error_checking = False)
# Transform JSON Watson conversations response to query parameters object
query_params = query_parameters()
query_params.generate_query_params_from_response(nl_query, response, error_checking = error_checking)
# Add main factors
if query_params.intent == 'machine_performance':
pass
else:
query_params.sql_factors += main_factors
# Impute period if needed
query_params = impute_period(query_params)
# Generate SQL query
query_params.generate_sql_query(error_checking = error_checking)
# Get SQL query string from query parameters object
sql_query = query_params.sql_string
if error_checking:
print query_params
# Place SQL results into DataFrame
df = helper.get_sql_data(sql_query, engine)
if error_checking:
print df.head()
return df, query_params
def main(query, error_checking = False):
'''
Args:
query (str): this is the natural language input string
Returns:
plot1 (unicode): this is the html, css, javascript to render the
mpld3 plot
mainfactors (list): this is a list of tuples where each element
is three items - the metric, the direction, and the percent change
plot2 (unicode): this is the html, css, javascript to render the
mpld3 plot
derivedmetrics (list): this is a list of tuples where each element
is three items - the metric, the direction, and the percent change
aggregate_statistics (dict) -- dictionary of aggregate statistics to
display on dashboard
'''
# Pull down data from database
df, query_params = get_data_from_nl_query(query, error_checking = error_checking)
# Decide what to do based on query parameters
"""
# Metric
self.metric = None
# Factor(s)
self.factors = []
# Range
self.start = datetime.strptime('2015-01-01', '%Y-%m-%d')
self.stop = datetime.strptime('2015-01-02', '%Y-%m-%d')
# Period
self.period = None
# Ordering
self.ordering = 'date'
# Aggregate Statistic
self.statistic = None
# Specific Factors
self.club_level = None
self.area = None
self.game_title = None
self.manufacturer = None
self.stand = None
self.zone = None
self.bank = None
"""
# All queries will have a metric and range (if none provided we will infer)
# M: always included (if not infer)
# F: indicates multi-line graph or multi-dimensional histogram
# SF: indicates filtering
# R: always included (if not infer)
# P: indicates which materialized view to pull from, if missing indicates a
# single value answer should be provided
# O: indicates histogram
# S:
# Dictionary to hold calculated metrics
metrics = {}
print query_params
# Check if we want to do net win analysis
if query_params.intent == 'netwin_analysis':
return netwin_analysis(df, query_params, engine)
# Determine metrics and graph type to build
if query_params.ordering == 'date' and query_params.intent != 'machine_performance':
# Line graph
# Find factor we need to aggregate on (currently supports only si
|
sharkykh/nyaa
|
import_to_es.py
|
Python
|
gpl-3.0
| 4,652
| 0.00172
|
#!/usr/bin/env python
"""
Bulk load torents from mysql into elasticsearch `nyaav2` index,
which is assumed to already exist.
This is a one-shot deal, so you'd either need to complement it
with a cron job or some binlog-reading thing (TODO)
"""
import sys
import json
# This should be progressbar33
import progressbar
from elasticsearch import Elasticsearch
from elasticsearch.client import IndicesClient
from elasticsearch import helpers
from nyaa import create_app, models
from nyaa.extensions import db
app = create_app('config')
es = Elasticsearch(timeout=30)
ic = IndicesClient(es)
# turn into thing that elasticsearch indexes. We flatten in
# the stats (seeders/leechers) so we can order by them in es naturally.
# we _don't_ dereference uploader_id to the user's display name however,
# instead doing that at query time. I _think_ this is right because
# we don't want to reindex all the user's torrents just because they
# changed their name, and we don't really want to FTS search on the user anyway.
# Maybe it's more convenient to derefence though.
def mk_es(t, index_name):
return {
"_id": t.id,
"_type": "torrent",
"_index": index_name,
"_source": {
# we're also indexing the id as a number so you can
# order by it. seems like this is just equivalent to
|
# order by created_time, but oh well
"id": t.id,
"display_name": t.display_name,
"created_time": t.created_time,
# not analyzed but included so we can render magnet links
# without querying sql again.
|
"info_hash": t.info_hash.hex(),
"filesize": t.filesize,
"uploader_id": t.uploader_id,
"main_category_id": t.main_category_id,
"sub_category_id": t.sub_category_id,
"comment_count": t.comment_count,
# XXX all the bitflags are numbers
"anonymous": bool(t.anonymous),
"trusted": bool(t.trusted),
"remake": bool(t.remake),
"complete": bool(t.complete),
# TODO instead of indexing and filtering later
# could delete from es entirely. Probably won't matter
# for at least a few months.
"hidden": bool(t.hidden),
"deleted": bool(t.deleted),
"has_torrent": t.has_torrent,
# Stats
"download_count": t.stats.download_count,
"leech_count": t.stats.leech_count,
"seed_count": t.stats.seed_count,
}
}
# page through an sqlalchemy query, like the per_fetch but
# doesn't break the eager joins its doing against the stats table.
# annoying that this isn't built in somehow.
def page_query(query, limit=sys.maxsize, batch_size=10000, progress_bar=None):
start = 0
while True:
# XXX very inelegant way to do this, i'm confus
stop = min(limit, start + batch_size)
if stop == start:
break
things = query.slice(start, stop)
if not things:
break
had_things = False
for thing in things:
had_things = True
yield(thing)
if not had_things or stop == limit:
break
if progress_bar:
progress_bar.update(start)
start = min(limit, start + batch_size)
FLAVORS = [
('nyaa', models.NyaaTorrent),
('sukebei', models.SukebeiTorrent)
]
# Get binlog status from mysql
with app.app_context():
master_status = db.engine.execute('SHOW MASTER STATUS;').fetchone()
position_json = {
'log_file': master_status[0],
'log_pos': master_status[1]
}
print('Save the following in the file configured in your ES sync config JSON:')
print(json.dumps(position_json))
for flavor, torrent_class in FLAVORS:
print('Importing torrents for index', flavor, 'from', torrent_class)
bar = progressbar.ProgressBar(
maxval=torrent_class.query.count(),
widgets=[ progressbar.SimpleProgress(),
' [', progressbar.Timer(), '] ',
progressbar.Bar(),
' (', progressbar.ETA(), ') ',
])
# turn off refreshes while bulk loading
ic.put_settings(body={'index': {'refresh_interval': '-1'}}, index=flavor)
bar.start()
helpers.bulk(es, (mk_es(t, flavor) for t in page_query(torrent_class.query, progress_bar=bar)), chunk_size=10000)
bar.finish()
# Refresh the index immideately
ic.refresh(index=flavor)
print('Index refresh done.')
# restore to near-enough real time
ic.put_settings(body={'index': {'refresh_interval': '30s'}}, index=flavor)
|
jasminka/goska
|
goska/wsgi.py
|
Python
|
bsd-3-clause
| 385
| 0.002597
|
"""
WSGI con
|
fig for goska project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "goska.settings")
from django.core.w
|
sgi import get_wsgi_application
application = get_wsgi_application()
|
GovReady/govready-q
|
siteapp/migrations/0022_remove_project_title.py
|
Python
|
gpl-3.0
| 1,363
| 0.002935
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-10 17:17
from __future__ import unicode_literals
from django.db import migrations
# Move project.title into project.root_task.title_override before
# dropping the project.title column if project.title != project.root_task.module.title.
def forwards_func(apps, schema_editor):
Project = apps.get_model("siteapp", "Project")
db_alias = schema_editor.connection.alias
for project in Project.objects.using(db_alias).all():
if project.title != project.root_task.module.spec["title"]:
project.root_task.title_override = project.title
project.root_task.save()
# To reverse the migration, fill in Project.title from Project.root_task.title.
def reverse_func(apps, schema_editor):
Project = apps.get_model("siteapp", "Project")
db_alias = schema_editor.connection.alias
for project in Project.objects.using(db_alias).all():
project.title = proj
|
ect.root_task.title
project.save()
class Migration(migrations.Migration):
dependencies = [
(
|
'siteapp', '0021_auto_20171029_2217'),
('guidedmodules', '0034_title_override'),
]
operations = [
migrations.RunPython(forwards_func, reverse_func),
migrations.RemoveField(
model_name='project',
name='title',
),
]
|
isudox/leetcode-solution
|
python-algorithm/leetcode/problem_247.py
|
Python
|
mit
| 1,241
| 0.000806
|
"""247. Strobogrammatic Number II
https://leetcode.com/problems/strobogrammatic-number-ii/
Given an integer n, return all the strobogrammatic numbers that
are of length n. You may return the answer in any order.
A strobogrammatic number is a number that looks the same when
rotated 180 degrees (looked at upside down).
Example 1:
Input: n = 2
Output: ["11","69","88","96"]
Example 2:
Input: n = 1
Output: ["0","1","8"]
Constraints:
1 <= n <= 14
"""
from typing import List
class Solution:
def find_strobogr
|
ammatic(self, n: int) -> List[str]:
def helper(k: int) -> List[str]:
list1 = ["0", "1", "8"]
list2 = ["00", "11", "69", "88", "96"]
if k == 1:
return list1
if k == 2:
return list2
ret = []
prev = k - 2 if k % 2 == 0 else k - 1
split = prev // 2
prev_list = helper(prev)
for num in prev_list:
for
|
interval in (list2 if k % 2 == 0 else list1):
ret.append(num[:split] + interval + num[split:])
return ret
ans = helper(n)
ans = [num for num in ans if len(num) == 1 or not num.startswith('0')]
return ans
|
dnjohnstone/hyperspy
|
hyperspy/misc/machine_learning/import_sklearn.py
|
Python
|
gpl-3.0
| 1,124
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
|
published by
# the Free Software Foundation, either version 3 of the License,
|
or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
"""
Import sklearn.* and randomized_svd from scikit-learn
"""
import warnings
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import sklearn
import sklearn.decomposition
from sklearn.utils.extmath import randomized_svd
sklearn_installed = True
except ImportError:
randomized_svd = None
sklearn_installed = False
|
KevinGoodsell/caduceus
|
test/test_handle.py
|
Python
|
epl-1.0
| 5,924
| 0.002532
|
# Copyright 2012 Kevin Goodsell
#
# This software is licensed under the Eclipse Public License (EPL) V1.0.
from __future__ import with_statement
import time
import unittest
import STAF
class HandleTests(unittest.TestCase):
def assertSTAFResultError(self, rc, func, *args, **kwargs):
try:
func(*args, **kwargs)
self.fail('STAFResultError not raised')
except STAF.STAFResultError, exc:
self.assertEqual(exc.rc, rc)
def testBasicHandle(self):
with STAF.Handle('test handle') as h:
result = h.submit('local', 'ping', 'ping')
self.assertEqual(result, 'PONG')
result = h.submit('local', 'ping', ['ping'])
self.assertEqual(result, 'PONG')
result = h.submit('local', 'service', 'list')
services = dict((s['name'], s) for s in result)
# There's not much reason to check all these, so just pick a few.
self.assertEqual(services['DELAY'],
{'name': 'DELAY', 'executable': None,
'library': '<Internal>'})
self.assertEqual(services['DIAG'],
{'name': 'DIAG', 'executable': None,
'library': '<Internal>'})
self.assertEqual(services['ECHO'],
{'name': 'ECHO', 'executable': None,
'library': '<Internal>'})
# Submit using a list
result = h.submit('local', 'handle',
['list handles name', 'test handle', 'long'])
self.assertTrue(isinstance(result, list))
self.assertEqual(len(result), 1)
pieces = result[0]
self.assertEqual(pieces['name'], 'test handle')
self.assertEqual(pieces['state'], 'Registered')
self.assertTrue(h.is_registered())
self.assertFalse(h.is_registered())
def testErrors(self):
h = STAF.Handle('test handle')
self.assertSTAFResultError(STAF.errors.UnknownService,
h.submit, 'local', 'doesntexist', 'do magic')
self.assertSTAFResultError(STAF.errors.InvalidRequestString,
h.submit, 'local', 'ping', 'not a ping command')
h.unregister()
self.assertSTAFResultError(STAF.errors.HandleDoesNotExist,
h.submit, 'local', 'ping', 'ping')
# Unregistering a second time should not produce an error.
h.unregister()
def testStaticHandle(self):
with STAF.Handle('helper') as helper:
self.assertFalse(helper.is_static())
handle_num = helper.submit('local', 'handle',
'create handle name static-test')
handle_num = int(handle_num)
h = STAF.Handle(handle_num)
self.assertTrue(h.is_static())
self.assertEqual(h.submit('local', 'ping', 'ping'), 'PONG')
# Unregistering a static handle does nothing.
h.unregister()
self.assertEqual(h.submit('local', 'ping', 'ping'), 'PONG')
# Delete the static handle
helper.submit('local', 'handle',
['delete handle', str(h.handle_num())])
def testSyncModes(self):
with STAF.Handle('test handle') as h:
# FIRE AND FORGET
req = h.submit('local', 'ping', 'ping', STAF.REQ_FIRE_AND_FORGET)
self.assertTrue(req.isdigit())
time.sleep(2)
# No queued result
self.assertSTAFResultError(STAF.errors.NoQueueElement,
h.submit, 'local', 'queue', 'get type STAF/RequestComplete')
# No retained result
self.assertSTAFResultError(STAF.errors.RequestNumberNotFound,
h.submit, 'local', 'service', ['free request', req])
# QUEUE
req = h.submit('local', 'ping', 'ping', STAF.REQ_QUEUE)
self.assertTrue(req.isdigit())
time.sleep(2)
# Check queued result
result = h.submit('local', 'queue', 'get type STAF/RequestComplete')
msg = result['message']
self.assertEqual(msg['rc'], '0')
self.assertEqual(msg['requestNumber'], req)
self.assertEqual(msg['result'], 'PONG')
# No retained result
self.assertSTAFResultError(STAF.errors.RequestNumberNotFound,
h.submit, 'local', 'service', ['free request', req])
# RETAIN
req = h.submit('local', 'ping', 'ping', STAF.REQ_RETAIN)
self.assertTrue(req.isdigit())
time.sleep(2)
# No queued result
self.assertSTAFResultError(STAF.errors.NoQueueElement,
h.sub
|
mit, 'local', 'queue', 'get type STAF/RequestComplete')
# Check retained result
result = h.submit('local', 'service', ['free request', req])
self.assertEqual(result['rc'], '0')
self.assertEqual(resul
|
t['result'], 'PONG')
# QUEUE AND RETAIN
req = h.submit('local', 'ping', 'ping', STAF.REQ_QUEUE_RETAIN)
self.assertTrue(req.isdigit())
time.sleep(2)
# Check queued result
result = h.submit('local', 'queue', 'get type STAF/RequestComplete')
msg = result['message']
self.assertEqual(msg['rc'], '0')
self.assertEqual(msg['requestNumber'], req)
self.assertEqual(msg['result'], 'PONG')
# Check retained result
result = h.submit('local', 'service', ['free request', req])
self.assertEqual(result['rc'], '0')
self.assertEqual(result['result'], 'PONG')
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
jbedorf/tensorflow
|
tensorflow/python/ops/ragged/ragged_map_fn_op_test.py
|
Python
|
apache-2.0
| 10,897
| 0.00257
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_map_ops.map_fn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops as mo
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedMapOpTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters([
# The following test sets map over a RaggedTensor and apply a
# transformation that returns with shape:
# [d1, (d2)] -> [d1]
dict(
fn=mo.reduce_mean,
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[2, 4, 6],
),
dict(
fn=string_ops.reduce_join,
elems=[['foo', 'bar', 'baz'], ['a'], ['b', 'c']],
expected_output=[b'foobarbaz', b'a', b'bc'],
dtype=dtypes.string,
),
# [d1, (d2)] -> [d1, 2]
dict(
fn=lambda x: array_ops.stack([mo.reduce_mean(x), mo.reduce_sum(x)]),
# fn=self.stack_mean_and_sum,
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[[2, 6], [4.5, 9], [6.5, 13]],
dtype=dtypes.float32,
expected_ragged_rank=0,
),
# [d1, (d2)] -> [d1, (d2)]
dict(
fn=lambda x: x + np.int64(1),
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[[2, 3, 4], [5, 6], [7, 8]],
dtype=dtypes.int64,
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1, (d2), d3] -> [d1, (d2), d3]
dict(
fn=lambda x: x + np.int64(1),
elems=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
elems_ragged_rank=1,
expected_ragged_rank=1,
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
expected_output=[[[2, 3], [4, 5]], [], [[6, 7], [8, 9], [10, 1]]],
),
# [d1, (d2)] -> [d1, (d2), (d3)]
dict(
fn=lambda x: ragged_tensor.RaggedTensor.from_row_starts(x, [0]),
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[[[1, 2, 3]], [[4, 5]], [[6, 7]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2),
),
# [d1, (d2), (d3)] -> [d1, (d2), (d3)]
dict(
fn=lambda x: ragged_functional_ops.map_flat_values(mo.add, x, 1),
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[[[2, 3, 4]], [[5, 6], [7, 8]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2),
),
# [d1, (d2), (d3)] -> [d1, (d2)]
dict(
fn=lambda x: ragged_math_ops.reduce_sum(x, axis=1),
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[[6], [9, 13]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1, (d2), (d3)] -> [d1, (d3)]
dict(
fn=lambda x: ragged_math_ops.reduce_sum(x, axis=0),
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[[1, 2, 3], [10, 12]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1, (d2), (d3)] -> [d1]
dict(
fn=ragged_math_ops.reduce_sum,
elems=[[[1, 2, 3]], [[4, 5]
|
, [6, 7]]],
expected_output=[6, 22],
result_dtype=dtypes.int64,
),
# [d1] -> [d1, (d2)]
dict(
fn=mo.range,
elems=[4, 0, 2],
expected_output=[[0, 1, 2, 3], [], [0, 1]],
result_dtype=ragged_tensor.RaggedTensorT
|
ype(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1] -> [d1, (d2), (d3)]
dict(
fn=lambda x: ragged_math_ops.range(mo.range(x)),
elems=[5, 0, 3],
expected_output=[[[], [0], [0, 1], [0, 1, 2], [0, 1, 2, 3]], [],
[[], [0], [0, 1]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2),
),
# [d1, (d2), (d3), (d4a), (d5)] -> [d1, (d2), (d3), (d4b), (d5)]
dict(
fn=lambda x: x + np.int64(1),
elems=[[[[[1, 2, 3]], [[4], [5]]]], [[[[6, 7]]], [[[8], []]]]],
expected_output=[[[[[2, 3, 4]], [[5], [6]]]], [[[[7, 8]]], [[[9],
[]]]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=4),
),
])
def testRaggedMap(
self,
fn,
elems,
expected_output,
expected_ragged_rank=None,
result_ragged_rank=None,
elems_ragged_rank=None,
dtype=dtypes.int64,
result_dtype=None,
infer_shape=False,
):
elems = ragged_factory_ops.constant(elems, dtype, elems_ragged_rank)
output = ragged_map_ops.map_fn(
fn=fn, elems=elems, dtype=result_dtype, infer_shape=infer_shape)
expected_rt = ragged_factory_ops.constant(
expected_output, ragged_rank=expected_ragged_rank)
self.assertRaggedEqual(expected_rt, output)
def testRaggedMapOnStructure(self):
batman = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6, 7]])
# [[10, 20, 30], [40], [50, 60, 70]]
robin = ragged_functional_ops.map_flat_values(mo.multiply, batman, 10)
features = {'batman': batman, 'robin': robin}
def _reduce_sum_from_all(f):
return mo.reduce_sum(f['batman']) + mo.reduce_sum(f['robin'])
output = ragged_map_ops.map_fn(
fn=_reduce_sum_from_all,
elems=features,
dtype=dtypes.int32,
)
self.assertRaggedEqual(output, [66, 44, 198])
# Test mapping over a dict of RTs can produce a dict of RTs.
def testRaggedMapOnStructure_RaggedOutputs(self):
batman = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6, 7]])
# [[10, 20, 30], [40], [50, 60, 70]]
robin = ragged_functional_ops.map_flat_values(mo.multiply, batman, 10)
features = {'batman': batman, 'robin': robin}
def _increment(f):
return {
'batman': f['batman'] + 1,
'robin': f['robin'] + 1,
}
output = ragged_map_ops.map_fn(
fn=_increment,
elems=features,
infer_shape=False,
dtype={
'batman':
ragged_tensor.RaggedTensorType(
dtype=dtypes.int32, ragged_rank=1),
'robin':
ragged_tensor.RaggedTensorType(
dtype=dtypes.int32, ragged_rank=1)
},
)
self.assertRaggedEqual(output['batman'], [[2, 3, 4], [5], [6, 7, 8]])
self.assertRaggedEqual(output['robin'], [[11, 21, 31],
|
spookylukey/django-autocomplete-light
|
test_project/fk_autocomplete/autocomplete_light_registry.py
|
Python
|
mit
| 196
| 0.005102
|
i
|
mport autocomplete_light
from cities_light.models import City
autocomplete_light.register(City, search_fields=('search_names',),
autocomplete_js_attributes={'placeho
|
lder': 'city name ..'})
|
kevinleake01/textpatgen
|
12-workspace-py/tpl-py-0001.py
|
Python
|
gpl-2.0
| 774
| 0.011628
|
#!/usr/bin/env python
####################################
#
# --- TEXTP
|
ATGEN TEMPLATE ---
#
# Users can change the output by editing
# this file directly.
#
####################################
import sys
sys.stdout.write('####################################\n')
sys.stdout.write('#\n')
sys.stdout.write('# -- TEXTPATGEN GENERATED FILE --\n')
sys.stdout.write('#\n')
sys.stdout.write('# -- Created from a Python script.\n')
sys.stdout.write('#\n')
sys.stdout.write("####################################\n")
num=0
for length
|
in range(0, 16):
for width in range(0, 15):
sys.stdout.write('X-%04X ' % num)
num=num+1
width=width+1
length=length+1
sys.stdout.write('X-%04X\n' % num)
num=num+1
sys.stdout.write('# -- End of file.\n');
sys.stdout.flush()
|
unho/translate
|
translate/convert/csv2po.py
|
Python
|
gpl-2.0
| 9,967
| 0.000602
|
# -*- coding: utf-8 -*-
#
# Copyright 2003-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert Comma-Separated Value (.csv) files to Gettext PO localization files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/csv2po.html
for examples and usage instructions.
"""
import logging
from translate.storage import csvl10n, po
logger = logging.getLogger(__name__)
def replacestrings(source, *pairs):
r"""Use ``pairs`` of ``(original, replacement)`` to replace text found in
``source``.
:param source: String to on which ``pairs`` of strings are to be replaced
:type source: String
:param \*pairs: Strings to be matched and replaced
:type \*pairs: One or more tuples of (original, replacement)
:return: String with ``*pairs`` of strings replaced
"""
for orig, new in pairs:
source = source.replace(orig, new)
return source
def quotecsvstr(source):
return '"' + \
replacestrings(source,
('\\"', '"'), ('"', '\\"'),
("\\\\'", "\\'"), ('\\\\n', '\\n')) + \
'"'
def simplify(string):
return ''.join(filter(type(string).isalnum, string))
class csv2po:
"""a class that takes translations from a .csv file and puts them in a .po
file
"""
def __init__(self, templatepo=None, charset=None, duplicatestyle="keep"):
"""construct the converter..."""
self.pofile = templatepo
self.charset = charset
self.duplicatestyle = duplicatestyle
self.commentindex = {}
self.sourceindex = {}
self.simpleindex = {}
self.csvfile = None
self.duplicatecomments = []
if self.pofile is not None:
self.unmatched = 0
self.makeindex()
def makeindex(self):
"""makes indexes required for searching..."""
for pounit in self.pofile.units:
joinedcomment = " ".join(pounit.getlocations())
source = pounit.source
# the definitive way to match is by source comment (joinedcomment)
if joinedcomment in self.commentindex:
# unless more than one thing matches...
self.duplicatecomments.append(joinedcomment)
else:
self.commentindex[joinedcomment] = pounit
# do simpler matching in case things have been mangled...
simpleid = simplify(source)
# but check for duplicates
if (simpleid in self.simpleindex and
not (source in self.sourceindex)):
# keep a list of them...
self.simpleindex[simpleid].append(pounit)
else:
self.simpleindex[simpleid] = [pounit]
# also match by standard msgid
self.sourceindex[source] = pounit
for comment in self.duplicatecomments:
if comment in self.commentindex:
del self.commentindex[comment]
def convertunit(self, csvunit):
"""converts csv unit to po unit"""
pounit = po.pounit(encoding="UTF-8")
if csvunit.location:
pounit.addlocation(csvunit.location)
pounit.source = csvunit.source
pounit.target = csvunit.target
return pounit
def handlecsvunit(self, csvunit):
"""handles reintegrating a csv unit into the .po file"""
if (len(csvunit.location.strip()) > 0 and
csvunit.location in self.commentindex):
pounit = self.commentindex[csvunit.location]
elif csvunit.source in self.sourceindex:
pounit = self.sourceindex[csvunit.source]
elif simplify(csvunit.source) in self.simpleindex:
thepolist = self.simpleindex[simplify(csvunit.source)]
if len(thepolist) > 1:
csvfilename = getattr(self.csvfile, "filename", "(unknown)")
matches = "\n ".join(["possible match: " +
pounit.source for pounit in thepolist])
logger.warning("%s - csv entry not unique in pofile, "
"multiple matches found:\n"
" location\t%s\n"
" original\t%s\n"
" translation\t%s\n"
" %s",
csvfilename, csvunit.location,
csvunit.source, csvunit.target, matches)
self.unmatched += 1
return
pounit = thepolist[0]
else:
csvfilename = getattr(self.csvfile, "filename", "(unknown)")
logger.warning("%s - csv entry not found in pofile:\n"
" location\t%s\n"
" original\t%s\n"
" translation\t%s",
csvfilename, csvunit.location,
csvunit.source, csvunit.target)
self.unmatched += 1
return
if pounit.hasplural():
# we need to work out whether we matched the singular or the plural
singularid = pounit.source.strings[0]
pluralid = pounit.source.strings[1]
if csvunit.source == singularid:
pounit.msgstr[0] = csvunit.target
elif csvunit.source == pluralid:
pounit.msgstr[1] = csvunit.target
elif simplify(csvunit.source) == simplify(singularid):
pounit.msgstr[0] = csvunit.target
elif simplify(csvunit.source) == simplify(pluralid):
pounit.msgstr[1] = csvunit.target
else:
logger.warning("couldn't work out singular/plural: %r, %r, %r",
csvunit.source, singularid, pluralid)
self.unmatched += 1
return
else:
pounit.target = csvunit.target
def convertstore(self, thecsvfile):
"""converts a csvfile to
|
a pofile, and returns it. uses templatepo if
given at construction
"""
self.csvfile = thecsvfile
if self.pofile is None:
self.pofile = po.pofile()
mergemode = False
else:
mergemode = True
if self.pofile.units and self.pofile.units[0].isheader():
targetheader = self.pofile.units[0]
|
self.pofile.updateheader(content_type="text/plain; charset=UTF-8",
content_transfer_encoding="8bit")
else:
targetheader = self.pofile.makeheader(charset="UTF-8",
encoding="8bit")
targetheader.addnote("extracted from %s" % self.csvfile.filename,
"developer")
mightbeheader = True
for csvunit in self.csvfile.units:
#if self.charset is not None:
# csvunit.source = csvunit.source.decode(self.charset)
# csvunit.target = csvunit.target.decode(self.charset)
if mightbeheader:
# ignore typical header strings...
mightbeheader = False
if csvunit.match_header():
continue
if (len(csvunit.location.strip()) == 0 and
csvunit.source.find("Content-Type:") != -1):
continue
if mergemode:
self.handlecsvunit(csvunit)
else:
poun
|
smart-solution/icy
|
icy_sale_webkit/__init__.py
|
Python
|
lgpl-3.0
| 1,084
| 0.000923
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2013 Serpent Consulting Services (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero
|
General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###########################
|
#################################################
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
openstack/mistral
|
mistral/tests/unit/services/test_workbook_service.py
|
Python
|
apache-2.0
| 8,847
| 0
|
# Copyright 2014 - Mirantis, Inc.
# Copyright 2020 Nokia Software.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from mistral.db.v2 import api as db_api
from mistral.lang import parser as spec_parser
from mistral.services import workbooks as wb_service
from mistral.tests.unit import base
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
WORKBOOK = """
---
version: '2.0'
name: my_wb
tags: [test]
actions:
concat:
base: std.echo
base-input:
output: "{$.str1}{$.str2}"
workflows:
wf1:
#Sample Comment 1
type: reverse
tags: [wf_test]
input:
- param1
output:
result: "{$.result}"
tasks:
task1:
action: std.echo output="{$.param1}"
publish:
result: "{$}"
wf2:
type: direct
output:
result: "{$.result}"
tasks:
task1:
workflow: my_wb.wf1 param1='Hi' task_name='task1'
publish:
result: "The result of subworkflow is '{$.final_result}'"
"""
WORKBOOK_WF1_DEFINITION = """wf1:
#Sample Comment 1
type: reverse
tags: [wf_test]
input:
- param1
output:
result: "{$.result}"
tasks:
task1:
action: std.echo output="{$.param1}"
publish:
result: "{$}"
"""
WORKBOOK_WF2_DEFINITION = """wf2:
type: direct
output:
result: "{$.result}"
tasks:
task1:
workflow: my_wb.wf1 param1='Hi' task_name='task1'
publish:
result: "The result of subworkflow is '{$.final_result}'"
"""
UPDATED_WORKBOOK = """
---
version: '2.0'
name: my_wb
tags: [test]
actions:
concat:
base: std.echo
base-input:
output: "{$.str1}{$.str2}"
workflows:
wf1:
type: direct
output:
result: "{$.result}"
tasks:
task1:
workflow: my_wb.wf2 param1='Hi' task_name='task1'
publish:
result: "The result of subworkflow is '{$.final_result}'"
wf2:
type: reverse
input:
- param1
output:
result: "{$.result}"
tasks:
task1:
action: std.echo output="{$.param1}"
publish:
result: "{$}"
"""
UPDATED_WORKBOOK_WF1_DEFINITION = """wf1:
type: direct
output:
result: "{$.result}"
tasks:
task1:
workflow: my_wb.wf2 param1='Hi' task_name='task1'
publish:
result: "The result of subworkflow is '{$.final_result}'"
"""
UPDATED_WORKBOOK_WF2_DEFINITION = """wf2:
type: reverse
input:
- param1
output:
result: "{$.result}"
tasks:
task1:
action: std.echo output="{$.param1}"
publish:
result: "{$}"
"""
ACTION_DEFINITION = """concat:
base: std.echo
base-input:
output: "{$.str1}{$.str2}"
"""
class WorkbookServiceTest(base.DbTestCase):
def test_create_workbook(self):
namespace = 'test_workbook_service_0123_namespace'
wb_db = wb_service.create_workbook_v2(WORKBOOK, namespace=namespace)
self.assertIsNotNone(wb_db)
self.assertEqual('my_wb', wb_db.name)
self.assertEqual(namespace, wb_db.namespace)
self.assertEqual(WORKBOOK, wb_db.definition)
self.assertIsNotNone(wb_db.spec)
self.assertListEqual(['test'], wb_db.tags)
db_actions = db_api.get_action_definitions(
name='my_wb.concat',
namespace=namespace
)
self.assertEqual(1, len(db_actions))
# Action.
action_db = self._assert_single_item(db_actions, name='my_wb.concat')
self.assertFalse(action_db.is_system)
action_spec = spec_parser.get_action_spec(action_db.spec)
self.assertEqual('concat', action_spec.get_name())
self.assertEqual('std.echo', action_spec.get_base())
self.assertEqual(ACTION_DEFINITION, action_db.definition)
db_wfs = db_api.get_workflow_definitions()
self.assertEqual(2, len(db_wfs))
# Workflow 1.
wf1_db = self._assert_single_item(db_wfs, name='my_wb.wf1')
wf1_spec = spec_parser.get_workflow_spec(wf1_db.spec)
self.assertEqual('wf1', wf1_spec.get_name())
self.assertEqual('reverse', wf1_spec.get_type())
self.assertListEqual(['wf_test'], wf1_spec.get_tags())
self.assertListEqual(['wf_test'], wf1_db.tags)
self.assertEqual(namespace, wf1_db.namespace)
self.assertEqual(WORKBOOK_WF1_DEFINITION, wf1_db.definition)
# Workflow 2.
wf2_db = self._assert_single_item(db_wfs, name='my_wb.wf2')
wf2_spec = spec_parser.get_workflow_spec(wf2_db.spec)
self.assertEqual('wf2', wf2_spec.get_name())
self.assertEqual('direct', wf2_spec.get_type())
self.assertEqual(namespace, wf2_db.namespace)
self.assertEqual(WORKBOOK_WF2_DEFINITION, wf2_db.definition)
def test_create_same_workbook_in_different_namespaces(self):
first_namespace = 'first_namespace'
second_namespace = 'second_namespace'
first_wb = wb_service.create_workbook_v2(WORKBOOK,
namespace=first_namespace)
self.assertIsNotNone(first_wb)
self.assertEqual('my_wb', first_wb.name)
self.assertEqual(first_namespace, first_wb.namespace)
second_wb = wb_service.create_workbook_v2(WORKBOOK,
namespace=second_namespace)
self.assertIsNotNone(second_wb)
|
self.assertEqual('my_wb', second_wb.name)
self.assertEqual(second_namespace, second_wb.namespace)
def test_create_workbook_with_default_namespace(self):
wb_db = wb_service.create_workbook_v2(WORKBOOK)
self.assertIsNotNone(wb_db)
self.as
|
sertEqual('my_wb', wb_db.name)
self.assertEqual('', wb_db.namespace)
db_api.delete_workbook('my_wb')
def test_update_workbook(self):
namespace = 'test_workbook_service_0123_namespace'
# Create workbook.
wb_db = wb_service.create_workbook_v2(WORKBOOK, namespace=namespace)
self.assertIsNotNone(wb_db)
self.assertEqual(2, len(db_api.get_workflow_definitions()))
# Update workbook.
wb_db = wb_service.update_workbook_v2(
UPDATED_WORKBOOK,
namespace=namespace
)
self.assertIsNotNone(wb_db)
self.assertEqual('my_wb', wb_db.name)
self.assertEqual(namespace, wb_db.namespace)
self.assertEqual(UPDATED_WORKBOOK, wb_db.definition)
self.assertListEqual(['test'], wb_db.tags)
db_wfs = db_api.get_workflow_definitions()
self.assertEqual(2, len(db_wfs))
# Workflow 1.
wf1_db = self._assert_single_item(db_wfs, name='my_wb.wf1')
wf1_spec = spec_parser.get_workflow_spec(wf1_db.spec)
self.assertEqual('wf1', wf1_spec.get_name())
self.assertEqual('direct', wf1_spec.get_type())
self.assertEqual(namespace, wf1_db.namespace)
self.assertEqual(UPDATED_WORKBOOK_WF1_DEFINITION, wf1_db.definition)
# Workflow 2.
wf2_db = self._assert_single_item(db_wfs, name='my_wb.wf2')
wf2_spec = spec_parser.get_workflow_spec(wf2_db.spec)
self.assertEqual('wf2', wf2_spec.get_name())
self.assertEqual('reverse', wf2_spec.get_type())
self.assertEqual(namespace, wf2_db.namespace)
self.assertEqual(UPDATED_WORKBOOK_WF2_DEFINITION, wf2_db.definition)
def test_delete_workbook(self):
namespace = 'pqr'
# Create workbook.
wb_service.create_workbook_v2(WORKBOOK, namespace=namespace)
db_wfs = db_api.ge
|
912/M-new
|
virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/creation.py
|
Python
|
gpl-2.0
| 4,546
| 0.00176
|
from django.conf import settings
|
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.utils.functional import cached_property
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_ops = 'GIST_GEOMETRY_OPS'
geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND'
@cached_property
def template_postgis(self):
template_postgis = getattr(s
|
ettings, 'POSTGIS_TEMPLATE', 'template_postgis')
with self.connection.cursor() as cursor:
cursor.execute('SELECT 1 FROM pg_database WHERE datname = %s LIMIT 1;', (template_postgis,))
if cursor.fetchone():
return template_postgis
return None
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography or self.connection.ops.geometry:
# Geography and Geometry (PostGIS 2.0+) columns are
# created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns.
# PostGIS 2.0 does not support GIST_GEOMETRY_OPS. So, on 1.5
# we use GIST_GEOMETRY_OPS, on 2.0 we use either "nd" ops
# which are fast on multidimensional cases, or just plain
# gist index for the 2d case.
if f.geography:
index_ops = ''
elif self.connection.ops.geometry:
if f.dim > 2:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops_nd)
else:
index_ops = ''
else:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_ops + ' );')
return output
def sql_table_creation_suffix(self):
if self.template_postgis is not None:
return ' TEMPLATE %s' % (
self.connection.ops.quote_name(self.template_postgis),)
return ''
def _create_test_db(self, verbosity, autoclobber):
test_database_name = super(PostGISCreation, self)._create_test_db(verbosity, autoclobber)
if self.template_postgis is None:
# Connect to the test database in order to create the postgis extension
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
with self.connection.cursor() as cursor:
cursor.execute("CREATE EXTENSION IF NOT EXISTS postgis")
cursor.connection.commit()
return test_database_name
|
adamwen829/instapush-py
|
instapush/instapush.py
|
Python
|
mit
| 2,190
| 0.00411
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
import json
import requests
class Instapush(object):
def __init__(self, user_token):
self.user_token = user_token
self._headers = {}
@property
def headers(self):
if not self._headers:
self._headers = {'Content-Type': 'application/json',
'x-instapush-token': self.user_token}
return self._headers
def add_app(self, title):
payload = {'title': title}
ret = requests.post('http://api.instapush.im/v1/apps/add',
headers=self.headers,
data=json.dumps(payload)).json()
return ret
def list_app(self):
ret= requests.get('http://api.instapush.im/v1/apps/list',
headers=self.headers).json()
return ret
class App(object):
def __init__(self, appid, secret):
self.appid = app
|
id
self.secret = secret
self._headers = {}
@property
def headers(self):
if not self._headers:
self._headers = {'Content-Type': 'application/json',
'x-instapush-appid': self.appid,
'x-instapush-appsecret': self.secret}
return self._headers
def add_event(self, event_name, trackers, message):
payl
|
oad = {'title': event_name,
'trackers': trackers,
'message': message}
ret = requests.post('http://api.instapush.im/v1/events/add',
headers=self.headers,
data=json.dumps(payload)).json()
return ret
def list_event(self):
ret = requests.get('http://api.instapush.im/v1/events/list',
headers=self.headers).json()
return ret
def notify(self, event_name, trackers):
payload = {'event': event_name, 'trackers': trackers}
ret = requests.post('http://api.instapush.im/v1/post',
headers=self.headers,
data=json.dumps(payload)).json()
return ret
|
xtso520ok/mitmproxy
|
libmproxy/console/flowlist.py
|
Python
|
mit
| 8,997
| 0.002223
|
from __future__ import absolute_import
import urwid
from . import common
def _mkhelp():
text = []
keys = [
("A", "accept all intercepted flows"),
("a", "accept this intercepted flow"),
("C", "clear flow list or eventlog"),
("d", "delete flow"),
("D", "duplicate flow"),
("e", "toggle eventlog"),
("F", "toggle follow flow list"),
("l", "set limit filter pattern"),
("L", "load saved flows"),
("r", "replay request"),
("V", "revert changes to request"),
("w", "save flows "),
("W", "stream flows to file"),
("X", "kill and delete flow, even if it's mid-intercept"),
("tab", "tab between eventlog and flow list"),
("enter", "view flow"),
("|", "run script on this flow"),
]
text.extend(common.format_keyvals(keys, key="key", val="text", indent=4))
return text
help_context = _mkhelp()
footer = [
('heading_key', "?"), ":help ",
]
class EventListBox(urwid.ListBox):
def __init__(self, master):
self.master = master
urwid.ListBox.__init__(self, master.eventlist)
def keypress(self, size, key):
key = common.shortcuts(key)
if key == "C":
self.master.clear_events()
key = None
return urwid.ListBox.keypress(self, size, key)
class BodyPile(urwid.Pile):
def __init__(self, master):
h = urwid.Text("Event log")
h = urwid.Padding(h, align="left", width=("relative", 100))
self.inactive_header = urwid.AttrWrap(h, "heading_inactive")
self.active_header = urwid.AttrWrap(h, "heading")
urwid.Pile.__init__(
self,
[
FlowListBox(master),
urwid.Frame(EventListBox(master), header = self.inactive_header)
]
)
self.master = master
def keypress(self, size, key):
if key == "tab":
self.focus_position = (self.focus_position + 1)%len(self.widget_list)
if self.focus_position == 1:
self.widget_list[1].header = self.active_header
else:
self.widget_list[1].header = self.inactive_header
key = None
elif key == "e":
self.master.toggle_eventlog()
key = None
# This is essentially a copypasta from urwid.Pile's keypress handler.
# So much for "closed for modification, but open for extension".
item_rows = None
if len(size)==2:
item_rows = self.get_item_rows( size, focus=True )
i = self.widget_list.index(self.focus_item)
tsize = self.get_item_size(size,i,True,item_rows)
return self.focus_item.keypress( tsize, key )
class ConnectionItem(common.WWrap):
def __init__(self, master, state, flow, focus):
self.master, self.state, self.flow = master, state, flow
self.f = focus
w = self.get_text()
common.WWrap.__init__(self, w)
def get_text(self):
return common.format_flow(self.flow, self.f, hostheader=self.master.showhost)
def selectable(self):
return True
def save_flows_prompt(self, k):
if k == "a":
self.master.path_prompt(
"Save all flows to: ",
self.state.last_saveload,
self.master.save_flows
)
else:
self.master.path_prompt(
"Save this flow to: ",
self.state.last_saveload,
self.master.save_one_flow,
self.flow
)
def stop_server_playback_prompt(self, a):
if a != "n":
self.master.stop_server_playback()
def server_replay_prompt(self, k):
if k == "a":
self.master.start_server_playback(
[i.copy() for i in self.master.state.view],
self.master.killextra, self.master.rheaders,
False, self.master.nopop,
self.master.options.replay_ignore_params, self.master.options.replay_ignore_content
)
elif k == "t":
self.master.start_server_playback(
[self.flow.copy()],
self.master.killextra, self.master.rheaders,
False, self.master.nopop,
self.master.options.replay_ignore_params, self.master.options.replay_ignore_content
)
else:
self.master.path_prompt(
"Server replay path: ",
self.state.last_saveload,
self.master.server_playback_path
)
def keypress(self, (maxcol,), key):
key = common.shortcuts(key)
if key == "a":
self.flow.accept_intercept(self.master)
self.master.sync_list_view()
elif key == "d":
self.flow.kill(self.master)
self.state.delete_flow(self.flow)
self.master.sync_list_view()
elif key == "D":
f = self.master.duplicate_flow(self.flow)
self.master.view_flow(f)
elif key == "r":
r = self.master.replay_request(self.flow)
if r:
self.master.statusbar.message(r)
self.master.sync_list_view()
elif key == "S":
if not self.master.server_playback:
self.master.prompt_onekey(
"Server Replay",
(
("all flows", "a"),
("this flow", "t"),
("file", "f"),
),
self.server_replay_prompt,
)
else:
self.master.prompt_onekey(
"Stop current server replay?",
(
("yes", "y"),
("no", "n"),
),
self.stop_server_playback_prompt,
)
elif key == "V":
if not self.flow.modified():
self.master.statusbar.message("Flow not modi
|
fied.")
return
self.state.revert(self.flow)
self.master.sync_list_view()
self.master.statusbar.message("Reverted.")
elif key == "w":
self.master.prompt_onekey(
"Save",
(
("all flows", "a"),
("this flow", "t"),
|
),
self.save_flows_prompt,
)
elif key == "X":
self.flow.kill(self.master)
elif key == "enter":
if self.flow.request:
self.master.view_flow(self.flow)
elif key == "|":
self.master.path_prompt(
"Send flow to script: ",
self.state.last_script,
self.master.run_script_once,
self.flow
)
else:
return key
class FlowListWalker(urwid.ListWalker):
def __init__(self, master, state):
self.master, self.state = master, state
if self.state.flow_count():
self.set_focus(0)
def get_focus(self):
f, i = self.state.get_focus()
f = ConnectionItem(self.master, self.state, f, True) if f else None
return f, i
def set_focus(self, focus):
ret = self.state.set_focus(focus)
return ret
def get_next(self, pos):
f, i = self.state.get_next(pos)
f = ConnectionItem(self.master, self.state, f, False) if f else None
return f, i
def get_prev(self, pos):
f, i = self.state.get_prev(pos)
f = ConnectionItem(self.master, self.state, f, False) if f else None
return f, i
class FlowListBox(urwid.ListBox):
def __init__(self, master):
self.master = master
urwid.ListBox.__init__(self, master.flow_list_walker)
def keypress(self, size, key):
key = common.shortcuts(key)
if key == "A":
self.master.accept_all()
self.master.sync_list_view()
elif key == "C":
self.master.clear_flows()
elif key == "e":
self.ma
|
Tehsmash/networking-cisco
|
networking_cisco/plugins/cisco/common/utils.py
|
Python
|
apache-2.0
| 3,017
| 0
|
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from functools import
|
wraps
import imp
import time
from oslo_log import log as logging
from neutron_lib import exceptions as nexception
from networking_cisco._i18n import _
LOG = logging.getLogger(__name__)
class DriverNotFound(nexception.NotFound):
m
|
essage = _("Driver %(driver)s does not exist")
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2):
"""Retry calling the decorated function using an exponential backoff.
Reference: http://www.saltycrane.com/blog/2009/11/trying-out-retry
-decorator-python/
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:param tries: number of times to try (not retry) before giving up
:param delay: initial delay between retries in seconds
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
LOG.debug("%(err_mess)s. Retry calling function "
"'%(f_name)s' in %(delta)d seconds.",
{'err_mess': str(e), 'f_name': f.__name__,
'delta': mdelay})
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
LOG.debug("Last retry calling function '%s'.", f.__name__)
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
def convert_validate_driver_class(driver_class_name):
# Verify that import_obj is a loadable class
if driver_class_name is None or driver_class_name == '':
return driver_class_name
else:
parts = driver_class_name.split('.')
m_pathname = '/'.join(parts[:-1])
try:
info = imp.find_module(m_pathname)
mod = imp.load_module(parts[-2], *info)
if parts[-1] in dir(mod):
return driver_class_name
except ImportError as e:
LOG.error('Failed to verify driver module %(name)s: %(err)s',
{'name': driver_class_name, 'err': e})
raise DriverNotFound(driver=driver_class_name)
|
daniLOLZ/variaRoba
|
Python/32.py
|
Python
|
mit
| 1,364
| 0.040323
|
# Hangman for real tho(t)
import random
def pickWord(words):
toReturn = random.choice(lol)
return toReturn
def drawHangman(parts):
if parts >= 1:
print(" O")
if parts >= 4:
print("/|\\")
|
elif parts >= 3:
print("/|")
elif parts >= 2:
print(" |")
if parts >= 6:
print("/ \\")
elif parts == 5:
pri
|
nt("/")
print("\n")
with open("scrabble.txt", "r") as paroleFile:
lol = paroleFile.read().split("\n")
word = pickWord(lol)
completo = False
okLetter = False
guessedLetters = set()
progress = ["_" for i in range(len(word))]
remainingLetters = len(word)
guesses = 0
while not completo:
okLetter = False
for i in progress:
print(i, end="")
while not okLetter:
print("\n\n\nGuess your letter: ")
letter = input().upper()
if letter in guessedLetters:
print("You already tried that ")
else:
guessedLetters.add(letter)
okLetter = True
if letter not in word:
print("Wrong letter ")
guesses += 1
print("Guesses remaining: ", 7 - guesses, "\n")
else:
for i in range(len(word)):
if word[i] == letter:
progress[i] = letter
remainingLetters -= 1
drawHangman(guesses)
if remainingLetters <= 0:
for i in progress:
print(i, end="")
print("\n\nYou won ye")
completo = True
if guesses > 6:
print(" ^^ DED ^^ \n")
print("Hai perso lol\n")
print("\nLa parola era", str(word))
completo = True
|
ravenac95/virtstrap
|
virtstrap-core/virtstrap/project.py
|
Python
|
mit
| 3,644
| 0.000823
|
"""
virtstrap.project
-----------------
This module contains all the abstractions for dealing with a Project.
Using this object simplifies creating commands that are used to manage
the project.
"""
import os
import sys
from virtstrap import constants
from virtstrap.config import VirtstrapConfig
from virtstrap.utils import call_subprocess
VIRTSTRAP_DIR = constants.VIRTSTRAP_DIR
class Project(object):
@classmethod
def load(cls, options):
"""Creates a project and loads it's configuration immediately"""
project = cls()
project.load_settings(options)
return project
def __init__(self):
self._options = None
self._config = None
def load_settings(self, options):
# Check if project directory is specified
project_dir = getattr(options, 'project_dir', None)
if not project_dir:
project_dir = self._find_project_dir()
project_dir = os.path.abspath(project_dir)
self._project_dir = project_dir
config_file = os.path.join(project_dir, options.config_file)
config = VirtstrapConfig.from_file(config_file,
profiles=options.profiles)
processor = ProjectNameProcessor(project_dir)
project_name = config.process_section('project_name', processor)
self._project_name = project_name
self._config = config
self._config_file = config_file
|
self._options = opti
|
ons
def _find_project_dir(self):
return find_project_dir()
@property
def name(self):
return self._project_name
@property
def config_file(self):
if not os.path.isfile(self._config_file):
return None
return self._config_file
def set_options(self, options):
self._options = options
def path(self, *paths):
"""Create a path relative to the project"""
return os.path.join(self._project_dir, *paths)
def env_path(self, *paths):
"""Create a path relative to the virtstrap-dir"""
return os.path.join(self._project_dir,
self._options.virtstrap_dir, *paths)
def bin_path(self, *paths):
"""Create a path relative to the virtstrap-dir's bin directory"""
bin_py = 'bin'
if sys.platform == 'win32':
bin_py = 'Scripts'
return self.env_path(bin_py, *paths)
def process_config_section(self, section, processor):
return self._config.process_section(section, processor)
def config(self, section):
"""Grabs processed section data"""
return self._config.processed(section)
def call_bin(self, command_name, args, **options):
command = [self.bin_path(command_name)]
command.extend(args)
return call_subprocess(command, **options)
class NoProjectFound(Exception):
pass
def find_project_dir(current_dir=None):
"""Finds the project directory for the current directory"""
current_dir = current_dir or os.path.abspath(os.curdir)
if VIRTSTRAP_DIR in os.listdir(current_dir):
vs_dir = os.path.join(current_dir, VIRTSTRAP_DIR)
if os.path.islink(vs_dir) or os.path.isdir(vs_dir):
return current_dir
parent_dir = os.path.abspath(os.path.join(current_dir, os.pardir))
if parent_dir == current_dir:
raise NoProjectFound('No project found')
return find_project_dir(parent_dir)
class ProjectNameProcessor(object):
def __init__(self, project_dir):
self._project_dir = os.path.abspath(project_dir)
def __call__(self, project_name):
return project_name or os.path.basename(self._project_dir)
|
contactr2m/remote_repo
|
src/rating/middleware.py
|
Python
|
mit
| 625
| 0.0016
|
try:
from hashlib import md5
except ImportError:
from md5 import md5
class ratingMiddleware(object):
def process_request(self, request):
request.rating_token = self.generate_token(request)
def generate_token(self, request):
raise NotImple
|
mentedError
class ratingIpMiddleware(ratingMiddleware):
def generate_token(self, request):
return request.META['REMOTE_ADDR']
class ratingIpUseragentMiddleware(ratingMiddleware):
def generate_token(self, request):
s = ''.join((request.META['REMOTE_ADDR'], reques
|
t.META['HTTP_USER_AGENT']))
return md5(s).hexdigest()
|
scipy/scipy-svn
|
scipy/stats/tests/test_mstats_basic.py
|
Python
|
bsd-3-clause
| 20,295
| 0.034097
|
"""
Tests for the stats.mstats module (support for maskd arrays)
"""
import numpy as np
from numpy import nan
import numpy.ma as ma
from numpy.ma import masked, nomask
import scipy.stats.mstats as mstats
from numpy.testing import TestCase, run_module_suite
from numpy.ma.testutils import assert_equal, assert_almost_equal, \
assert_array_almost_equal, assert_
class TestMquantiles(TestCase):
"""Regression tests for mstats module."""
def test_mquantiles_limit_keyword(self):
"""Ticket #867"""
data = np.array([[ 6., 7., 1.],
[ 47., 15., 2.],
[ 49., 36., 3.],
[ 15., 39., 4.],
[ 42., 40., -999.],
[ 41., 41., -999.],
[ 7., -999., -999.],
[ 39., -999., -999.],
[ 43., -999., -999.],
[ 40., -999., -999.],
[ 36., -999., -999.]])
desired = [[19.2, 14.6, 1.45],
[40.0, 37.5, 2.5 ],
[42.8, 40.05, 3.55]]
quants = mstats.mquantiles(data, axis=0, limit=(0, 50))
assert_almost_equal(quants, desired)
class TestGMean(TestCase):
def test_1D(self):
a = (1,2,3,4)
actual= mstats.gmean(a)
desired = np.power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
assert_(not isinstance(desired1, ma.MaskedArray))
#
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual= mstats.gmean(a)
desired = np.power(1*2*3,1./3.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
#
def test_2D(self):
a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)),
mask=((0,0,0,0),(1,0,0,1),(0,1,1,0)))
actual= mstats.gmean(a)
desired = np.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
#
desired1 = mstats.gmean(a,axis=0)
assert_array_almost_equal(actual, desired1, decimal=14)
#
actual= mstats.gmean(a, -1)
desired = ma.array((np.power(1*2*3*4,1./4.),
np.power(2*3,1./2.),
np.power(1*4,1./2.)))
assert_array_almost_equal(actual, desired, decimal=14)
class TestHMean(TestCase):
def test_1D(self):
a = (1,2,3,4)
actual= mstats.hmean(a)
desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)
assert_almost_equal(actual, desired, decimal=14)
desired1 = mstats.hmean(ma.array(a),axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
#
a = ma.array((1,2,3,4),mask=(0,0,0,1))
actual= mstats.hmean(a)
desired = 3. / (1./1 + 1./2 + 1./3)
assert_almost_equal(actual, desired,decimal=14)
desired1 = mstats.hmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
def test_2D(self):
a = ma.array(((1,2,3,4),(1,2,3,4),(1,2,3,4)),
mask=((0,0,0,0),(1,0,0,1),(0,1,1,0)))
actual= mstats.hmean(a)
desired = ma.array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
#
actual1 = mstats.hmean(a,axis=-1)
desired = (4./(1/1.+1/2.+1/3.+1/4.),
2./(1/2.+1/3.),
2./(1/1.+1/4.)
)
assert_array_almost_equal(actual1, desired, decimal=14)
class TestRanking(TestCase):
#
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
#
def test_ranking(self):
x = ma.array([0,1,1,1,2,3,4,5,5,6,])
assert_almost_equal(mstats.rankdata(x),[1,3,3,3,5,6,7,8.5,8.5,10])
x[[3,4]] = masked
assert_almost_equal(mstats.rankdata(x),[1,2.5,2.5,0,0,4,5,6.5,6.5,8])
assert_almost_equal(mstats.rankdata(x,use_missing=True),
[1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
x = ma.array([0,1,5,1,2,4,3,5,1,6,])
assert_almost_equal(mstats.rankdata(x),[1,3,8.5,3,5,7,6,8.5,3,10])
x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
assert_almost_equal(mstats.rankdata(x),[[1,3,3,3,5],[6,7,8.5,8.5,10]])
assert_almost_equal(mstats.rankdata(x,axis=1),[[1,3,3,3,5],[1,2,3.5,3.5,5]])
assert_almost_equal(mstats
|
.rankdata(x,axis=0),[[1,1,1,1,1],[2,2,2,2,2,]])
class TestCorr(TestCase):
#
def test_pearsonr(self):
"Tests some computations of Pearson's r"
x = ma.arange(10)
olderr = np.seterr(all='ignore')
try:
assert_almost_equal(mstats.pearsonr(x,x)[0], 1.0)
assert_almost_equal(mstats.pearsonr(x,x[::-1])[0], -1.0)
|
x = ma.array(x, mask=True)
pr = mstats.pearsonr(x,x)
finally:
np.seterr(**olderr)
assert_(pr[0] is masked)
assert_(pr[1] is masked)
#
def test_spearmanr(self):
"Tests some computations of Spearman's rho"
(x, y) = ([5.05,6.75,3.21,2.66],[1.65,2.64,2.64,6.95])
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
(x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
#
x = [ 2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 08.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
x = [ 2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
y = [22.6, 08.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
#
def test_kendalltau(self):
"Tests some computations of Kendall's tau"
x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66,np.nan])
y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
assert_almost_equal(np.asarray(mstats.kendalltau(x,y)),
[+0.3333333,0.4969059])
assert_almost_equal(np.asarray(mstats.kendalltau(x,z)),
[-0.5477226,0.2785987])
#
x = ma.fix_invalid([ 0, 0, 0, 0,20,20, 0,60, 0,20,
10,10, 0,40, 0,20, 0, 0, 0, 0, 0, np.nan])
y = ma.fix_invalid([ 0,80,80,80,10,33,60, 0,67,27,
25,80,80,80,80,80,80, 0,10,45, np.nan, 0])
result = mstats.kendalltau(x,y)
assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
#
def test_kendalltau_seasonal(self):
"Tests the seasonal Kendall tau."
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[ 4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[ 3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
output = mstats.kendalltau_seasonal(x)
assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
assert_almost_equal(output['seasonal p-value'].round(2),
[0.18,0.53,0.20,0.04])
#
def test_pointbiserial(self):
"Tests point biserial"
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1,-1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan]
assert_almost_equal(mstats.pointbiserialr(x, y)[0
|
bitglue/pyflakes
|
pyflakes/test/test_other.py
|
Python
|
mit
| 42,084
| 0
|
"""
Tests for various Pyflakes behavior.
"""
from sys import version_info
from pyflakes import messages as m
from pyflakes.test.harness import TestCase, skip, skipIf
class Test(TestCase):
def test_duplicateArgs(self):
self.flakes('def fu(bar, bar): pass', m.DuplicateArgument)
def test_localReferencedBeforeAssignment(self):
self.flakes('''
a = 1
def f():
a; a=1
f()
''', m.UndefinedLocal, m.UnusedVariable)
@skipIf(version_info >= (3,),
'in Python 3 list comprehensions execute in a separate scope')
def test_redefinedInListComp(self):
"""
Test that shadowing a variable in a list comprehension raises
a warning.
"""
self.flakes('''
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
class A:
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
def f():
a = 1
[1 for a, b in [(1, 2)]]
''', m.RedefinedInListComp)
self.flakes('''
[1 for a, b in [(1, 2)]]
[1 for a, b in [(1, 2)]]
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
[1 for a, b in [(1, 2)]]
''')
def test_redefinedInGenerator(self):
"""
Test that reusing a variable in a generator does not raise
a warning.
"""
self.flakes('''
a = 1
(1 for a, b in [(1, 2)])
''')
self.flakes('''
class A:
a = 1
list(1 for a, b in [(1, 2)])
''')
self.flakes('''
def f():
a = 1
(1 for a, b in [(1, 2)])
''', m.UnusedVariable)
self.flakes('''
(1 for a, b in [(1, 2)])
(1 for a, b in [(1, 2)])
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
(1 for a, b in [(1, 2)])
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_redefinedInSetComprehension(self):
"""
Test that reusing a variable in a set comprehension does not raise
a warning.
"""
self.flakes('''
a = 1
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
class A:
a = 1
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
def f():
a = 1
{1 for a, b in [(1, 2)]}
''', m.UnusedVariable)
self.flakes('''
{1 for a, b in [(1, 2)]}
{1 for a, b in [(1, 2)]}
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
{1 for a, b in [(1, 2)]}
''')
@skipIf(version_info < (2, 7), "Python >= 2.7 only")
def test_redefinedInDictComprehension(self):
"""
Test that reusing a variable in a dict comprehension does not raise
a warning.
"""
self.flakes('''
a = 1
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
class A:
a = 1
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
def f():
a = 1
{1: 42 for a, b in [(1, 2)]}
''', m.UnusedVariable)
self.flakes('''
{1: 42 for a, b in [(1, 2)]}
{1: 42 for a, b in [(1, 2)]}
''')
self.flakes('''
for a, b in [(1, 2)]:
pass
{1: 42 for a, b in [(1, 2)]}
''')
def test_redefinedFunction(self):
"""
Test that shadowing a function definition with another one raises a
warning.
"""
self.flakes('''
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedClassFunction(self):
"""
Test that shadowing a function definition in a class suite with another
one raises a warning.
"""
self.flakes('''
class A:
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedIfElseFunction(self):
"""
Test that shadowing a function definition twice in an if
and else block does not raise a warning.
"""
self.flakes('''
if True:
def a(): pass
else:
def a(): pass
''')
def test_redefinedIfFunction(self):
"""
Test that shadowing a function definition within an if block
raises a warning.
"""
self.flakes('''
if True:
def a(): pass
def a(): pass
''', m.RedefinedWhileUnused)
def test_redefinedTryExceptFunction(self):
"""
Test that shadowing a function definition twice in try
and except block does not raise a warning.
"""
self.flakes('''
try:
def a(): pass
except:
def a(): pass
''')
def test_redefinedTryFunction(self):
"""
Test that shadowing a function definition within a try block
raises a warning.
"""
self.flakes('''
try:
def a():
|
pass
def a(): pass
except:
pass
''', m.RedefinedWhileUnused)
def test_redefinedIfElseInListComp(self):
"""
Test that shadowi
|
ng a variable in a list comprehension in
an if and else block does not raise a warning.
"""
self.flakes('''
if False:
a = 1
else:
[a for a in '12']
''')
@skipIf(version_info >= (3,),
'in Python 3 list comprehensions execute in a separate scope')
def test_redefinedElseInListComp(self):
"""
Test that shadowing a variable in a list comprehension in
an else (or if) block raises a warning.
"""
self.flakes('''
if False:
pass
else:
a = 1
[a for a in '12']
''', m.RedefinedInListComp)
def test_functionDecorator(self):
"""
Test that shadowing a function definition with a decorated version of
that function does not raise a warning.
"""
self.flakes('''
from somewhere import somedecorator
def a(): pass
a = somedecorator(a)
''')
def test_classFunctionDecorator(self):
"""
Test that shadowing a function definition in a class suite with a
decorated version of that function does not raise a warning.
"""
self.flakes('''
class A:
def a(): pass
a = classmethod(a)
''')
@skipIf(version_info < (2, 6), "Python >= 2.6 only")
def test_modernProperty(self):
self.flakes("""
class A:
@property
def t(self):
pass
@t.setter
def t(self, value):
pass
@t.deleter
def t(self):
pass
""")
def test_unaryPlus(self):
"""Don't die on unary +."""
self.flakes('+1')
def test_undefinedBaseClass(self):
"""
If a name in the base list of a class definition is undefined, a
warning is emitted.
"""
self.flakes('''
class foo(foo):
pass
''', m.UndefinedName)
def test_classNameUndefinedInClassBody(self):
"""
If a class name is used in the body of that class's definition and
the name is not already defined, a warning is emitted.
"""
self.flakes('''
class foo:
foo
''', m.UndefinedName)
def test_classNameDefinedPreviously(self):
"""
If a class name is used in the body of that class's definition and
the name was previously defined in some other way, no warning is
emitted.
"""
self.flakes('''
foo = None
class foo:
foo
''')
def test_classRedefinition(self):
"""
If a
|
i3visio/osrframework
|
osrframework/wrappers/disqus.py
|
Python
|
agpl-3.0
| 3,885
| 0.004377
|
################################################################################
#
# Copyright 2015-2020 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
__author__ = "Felix Brezo, Yaiza Rubio <contacto@i3visio.com>"
__version__ = "2.0"
from osrframework.utils.platforms import Platform
class Disqus(Platform):
"""A <Platform> object for Disqus"""
def __init__(self):
self.platformName = "Disqus"
self.tags = ["tools", "opinions"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "https://disqus.com/" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
|
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query.
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"]
|
= []
self.notFoundText["usufy"] = ["Page not found (404) - Disqus"]
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
#self.fieldsRegExp["usufy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
|
Intel-Corporation/tensorflow
|
tensorflow/python/pywrap_tensorflow.py
|
Python
|
apache-2.0
| 3,448
| 0.008121
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""A Python wrapper that loads _pywrap_tensorflow_internal.so."""
import ctypes
import sys
import traceback
from tensorflow.python.platform import self_check
# Perform pre-load sanity checks in order to produce a more actionable error.
self_check.preload_check()
# pylint: disable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
try:
# This import is expected to fail if there is an explicit shared object
# dependency (with_framework_lib=true), since we do not need RTLD_GLOBAL.
from tensorflow.python import pywrap_dlopen_global_flags
_use_dlopen_global_flags = True
except ImportError:
_use_dlopen_global_flags = False
# On UNIX-based platforms, pywrap_tensorflow is a python library that
# dynamically loads _pywrap_tensorflow.so.
_can_set_rtld_local = (
hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'))
if _can_set_rtld_local:
_default_dlopen_flags = sys.getdlopenflags()
try:
if _use_dlopen_global_flags:
pywrap_dlopen_global_flags.set_dlopen_flags()
elif _can_set_rtld_local:
# Ensure RTLD_LOCAL behavior for platforms where it isn't the default
# (macOS). On Linux RTLD_LOCAL is 0, so this does nothing (and would not
# override an RTLD_GLOBAL in _default_dlopen_flags).
sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_LOCAL)
# Python2.7 does not have a ModuleNotFoundError.
try:
ModuleNotFoundError
except NameError:
ModuleNotFoundError = ImportError # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import,g-import-not-at-top,line-too-long,undefined-variable
try:
from tensorflow.python._pywrap_tensorflow_internal import *
# This try catch logic is because there is no baze
|
l equivalent for py_extension.
# Externally in opensource we must enable exceptions to load the shared object
# by exposing the PyInit symbols with pybind. This error will only be
# caught internally or if someone changes the name of the target _pywrap_tensorflow_internal.
# This logic is used in other internal projects us
|
ing py_extension.
except ModuleNotFoundError:
pass
if _use_dlopen_global_flags:
pywrap_dlopen_global_flags.reset_dlopen_flags()
elif _can_set_rtld_local:
sys.setdlopenflags(_default_dlopen_flags)
except ImportError:
raise ImportError(
f'{traceback.format_exc()}'
f'\n\nFailed to load the native TensorFlow runtime.\n'
f'See https://www.tensorflow.org/install/errors '
f'for some common causes and solutions.\n'
f'If you need help, create an issue '
f'at https://github.com/tensorflow/tensorflow/issues '
f'and include the entire stack trace above this error message.')
# pylint: enable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
|
bcroq/kansha
|
kansha/app/comp.py
|
Python
|
bsd-3-clause
| 15,658
| 0.001086
|
# -*- coding:utf-8 -*-
# --
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
# --
import cgi
import sys
import json
import time
import pstats
import urllib
import urlparse
import cProfile as profile
from collections import OrderedDict
import configobj
import pkg_resources
from nagare.i18n import _, _L
from nagare.admin import command
from nagare.namespaces import xhtml5
from nagare import component, wsgi, security, config, log, i18n
from kansha import events
from kansha.card import Card
from kansha import exceptions
from kansha.menu import MenuEntry
from kansha.authentication import login
from kansha import services, notifications
from kansha.services.search import SearchEngine
from kansha.user.usermanager import UserManager
from kansha.user.user_profile import get_userform # !!!!!!!!!!!!!!!
from kansha.board.boardsmanager import BoardsManager
from kansha.security import SecurityManager, Unauthorized
def run():
return command.run('kansha.commands')
class Kansha(object):
"""The Kansha root component"""
def __init__(self, app_title, app_banner, favicon, theme,
card_extensions, services_service):
"""Initialization
"""
self.app_title = app_title
self.app_banner = app_banner
self.favicon = favicon
self.theme = theme
self.card_extensions = card_extensions
self._services = services_service
self.title = component.Component(self, 'tab')
self.user_menu = component.Component(None)
self.content = component.Component(None)
self.user_manager = UserManager()
self.boards_manager = self._services(
BoardsManager, self.app_title, self.app_banner, self.theme,
card_extensions)
self.home_menu = OrderedDict()
self.selected = 'board'
def _on_menu_entry(self, id_):
"""Select a configuration menu entry
In:
- ``id_`` -- the id of the selected menu entry
"""
if id_ == 'boards':
self.boards_manager.load_user_boards()
self.content.becomes(self.home_menu[id_].content)
self.selected = id_
def initialization(self):
""" Initialize Kansha application
Initialize user_menu with current user,
Initialize last board
Return:
- app initialized
"""
user = security.get_user()
self.home_menu['boards'] = MenuEntry(
_L(u'Boards'),
'table2',
self.boards_manager
)
self.home_menu['profile'] = MenuEntry(
_L(u'Profile'),
'user',
self._services(
get_userform(
self.app_title, self.app_banner, self.theme, user.data.source
),
user.data,
)
)
self.user_menu = component.Component(user)
if user and self.content() is None:
self.select_last_board()
return self
def _select_board(self, board):
self.content.becomes(board)
# if user is logged, update is last board
user = security.get_user()
if user:
user.set_last_board(board)
def select_board(self, id_):
"""Redirect to a board by id
In:
- ``id_`` -- the id of the board
"""
if not id_:
return
board = self.boards_manager.get_by_id(id_)
if board is not None and not board.archived:
self.content.becomes(board, 'redirect')
self.selected = 'board'
else:
raise exceptions.BoardNotFound()
def select_board_by_uri(self, uri):
"""Selected a board by URI
In:
- ``uri`` -- the uri of the board
"""
if not uri:
return
board = self.boards_manager.get_by_uri(uri)
if board is not None and not board.archived:
self._select_board(board)
else:
raise exceptions.BoardNotFound()
def select_last_board(self):
"""Selects the last used board if it's possible
Otherwise, content becomes user's home
"""
user = security.get_user()
data_board = user.get_last_board()
if data_board and not data_board.archived and data_board.has_member(user.data):
self.select_board(data_board.id)
else:
self._on_menu_entry('boards')
def handle_event(self, event):
if event.is_(events.BoardLeft) or event.is_(events.BoardArchived):
return self._on_menu_entry('boards')
elif event.is_(events.NewTemplateRequested):
return self.boards
|
_manager.create_template_from_board(event.emitter, *event.data)
class MainTask(component.Task):
def __init__(self, app_title, theme, config, card_extensions, services_service):
self.app_title = app_title
self.theme = theme
self._services = services_service
self.ap
|
p_banner = config['pub_cfg']['banner']
self.favicon = config['pub_cfg']['favicon']
self.app = services_service(
Kansha,
self.app_title,
self.app_banner,
self.favicon,
self.theme,
card_extensions,
)
self.config = config
def go(self, comp):
user = security.get_user()
while user is None:
# not logged ? Call login component
comp.call(
self._services(
login.Login,
self.app_title,
self.app_banner,
self.favicon,
self.theme,
self.config
)
)
user = security.get_user()
user.update_last_login()
comp.call(self.app.initialization())
# Logout
if user is not None:
security.get_manager().logout()
class WSGIApp(wsgi.WSGIApp):
"""This application uses a HTML5 renderer"""
renderer_factory = xhtml5.Renderer
ConfigSpec = {
'application': {'as_root': 'boolean(default=True)',
'title': 'string(default="")',
'banner': 'string(default="")',
'theme': 'string(default="kansha_flat")',
'favicon': 'string(default="img/favicon.ico")',
'disclaimer': 'string(default="")',
'activity_monitor': "string(default='')"},
'locale': {
'major': 'string(default="en")',
'minor': 'string(default="US")'
}
}
def set_config(self, config_filename, conf, error):
super(WSGIApp, self).set_config(config_filename, conf, error)
conf = configobj.ConfigObj(
conf, configspec=configobj.ConfigObj(self.ConfigSpec), interpolation='Template')
config.validate(config_filename, conf, error)
self._services = services.ServicesRepository(
config_filename, error, conf
)
self.card_extensions = services.CardExtensions(
config_filename, error, conf
)
self.as_root = conf['application']['as_root']
self.app_title = unicode(conf['application']['title'], 'utf-8')
self.app_name = conf['application']['name']
self.theme = conf['application']['theme']
self.application_path = conf['application']['path']
# search_engine engine configuration
self.search_engine = SearchEngine(**conf['search'])
self._services.register('search_engine', self.search_engine)
Card.update_schema(self.card_extensions)
# Make assets_manager available to kansha-admin commands
self.assets_manager = self._services['assets_manager']
# other
self.security = SecurityManager(conf['application']['crypto_key'])
self.debug = conf['application']['debug']
self.default_locale = i18n.Locale(
conf['loca
|
CartoDB/cartoframes
|
cartoframes/data/clients/sql_client.py
|
Python
|
bsd-3-clause
| 11,090
| 0.001623
|
from ...io.managers.context_manager import ContextManager
COLLISION_STRATEGIES = ['fail', 'replace']
class SQLClient:
"""SQLClient class is a client to run SQL queries in a CARTO account.
It also provides basic SQL utilities for analyzing and managing tables.
Args:
credentials (:py:class:`Credentials <cartoframes.auth.Credentials>`):
A :py:class:`Credentials <cartoframes.auth.Credentials>`
instance can be used in place of a `username`|`base_url` / `api_key` combination.
Example:
>>> sql = SQLClient(credentials)
"""
def __init__(self, credentials=None):
self._context_manager = ContextManager(credentials)
def query(self, query, verbose=False):
"""Run a SQL query. It returns a `list` with content of the response.
If the `verbose` param is True it returns the full SQL response in a `dict`.
For more information check the `SQL API
documentation
<https://carto.com/developers/sql-api/reference/#tag/Single-SQL-Statement>`.
Args:
query (str): SQL query.
verbose (bool, optional): flag to return all the response. Default False.
Example:
>>> sql.query('SELECT * FROM table_name')
"""
response = self._context_manager.execute_query(query.strip())
if not verbose:
return response.get('rows')
else:
return response
def execute(self, query):
"""Run a long running query. It returns an object with the
status and information of the job. For more information check the `Batch API
documentation
<https://carto.com/developers/sql-api/reference/#tag/Batch-Queries>`.
Args:
query (str): SQL query.
Example:
>>> sql.execute('DROP TABLE table_name')
"""
return self._context_manager.execute_long_running_query(query.strip())
def distinct(self, table_name, column_name):
"""Get the distict values and their count in a table
for a specific column.
Args:
table_name (str): name of the table.
column_name (str): name of the column.
Example:
>>> sql.distinct('table_name', 'column_name')
[('value1', 10), ('value2', 5)]
"""
query = '''
SELECT {0}, COUNT(*) FROM {1}
GROUP BY 1 ORDER BY 2 DESC
'''.format(column_name, table_name)
output = self.query(query)
return [(x.get(column_name), x.get('count')) for x in output]
def count(self, table_name):
"""Get the number of elements of a table.
Args:
table_name (str): name of the table.
Example:
>>> sql.count('table_name')
15
"""
query = 'SELECT COUNT(*) FROM {};'.format(table_name)
output = self.query(query)
return output[0].get('count')
def bounds(self, table_name):
"""Get the bounds of the geometries in a table.
Args:
table_name (str): name of the table containing a "the_geom" column.
Example:
>>> sql.bounds('table_name')
[[-1,-1], [1,1]]
"""
query = '''
SELECT ARRAY[
ARRAY[st_xmin(geom_env), st_ymin(geom_env)],
ARRAY[st_xmax(geom_env), st_ymax(geom_env)]
] bounds FROM (
SELECT ST_Extent(the_geom) geom_env
FROM (SELECT the_geom FROM {}) q
) q;
'''.format(table_name)
output = self.query(query)
return output[0].get('bounds')
def schema(self, table_name, raw=False):
"""Show information abo
|
ut the schema of a table.
Args:
table_name (str): name of the table.
raw (bool, optional): return raw dict data if set to True.
Default False.
Example:
>>> sql.schem
|
a('table_name')
Column name Column type
-------------------------------------
cartodb_id number
the_geom geometry
the_geom_webmercator geometry
column1 string
column2 number
"""
query = 'SELECT * FROM {0} LIMIT 0;'.format(table_name)
output = self.query(query, verbose=True)
fields = output.get('fields')
if raw is True:
return {key: fields[key]['type'] for key in fields}
else:
columns = ['Column name', 'Column type']
rows = [(key, fields[key]['type']) for key in fields]
self._print_table(rows, columns=columns, padding=[10, 5])
return None
def describe(self, table_name, column_name):
"""Show information about a column in a specific table.
It returns the COUNT of the table. If the column type is number
it also returns the AVG, MIN and MAX.
Args:
table_name (str): name of the table.
column_name (str): name of the column.
Example:
>>> sql.describe('table_name', 'column_name')
count 1.00e+03
avg 2.00e+01
min 0.00e+00
max 5.00e+01
type: number
"""
column_type = self._get_column_type(table_name, column_name)
stats = ['COUNT(*)']
if column_type == 'number':
stats.append('AVG({})'.format(column_name))
stats.append('MIN({})'.format(column_name))
stats.append('MAX({})'.format(column_name))
query = '''
SELECT {0}
FROM {1};
'''.format(','.join(stats), table_name)
output = self.query(query, verbose=True)
fields = output.get('rows')[0]
rows = [(key, '{:0.2e}'.format(fields[key])) for key in fields if fields[key] is not None]
self._print_table(rows, padding=[5, 10])
print('type: {}'.format(column_type))
def create_table(self, table_name, columns_types, if_exists='fail', cartodbfy=True):
"""Create a table with a specific table name and columns.
Args:
table_name (str): name of the table.
column_types (dict): dictionary with the column names and types.
if_exists (str, optional): collision strategy if the table already exists in CARTO.
Options are 'fail' or 'replace'. Default 'fail'.
cartodbfy (bool, optional): convert the table to CARTO format.
Default True. More info `here
<https://carto.com/developers/sql-api/guides/creating-tables/#create-tables>`.
Example:
>>> sql.create_table('table_name', {'column1': 'text', 'column2': 'integer'})
"""
if not isinstance(columns_types, dict):
raise ValueError('The columns_types parameter should be a dictionary of column names and types.')
if if_exists not in COLLISION_STRATEGIES:
raise ValueError('Please provide a valid if_exists value among {}'.format(', '.join(COLLISION_STRATEGIES)))
columns = ['{0} {1}'.format(cname, ctype) for cname, ctype in columns_types.items()]
schema = self._context_manager.get_schema()
query = '''
BEGIN;
{drop};
{create};
{cartodbfy};
COMMIT;
'''.format(
drop='DROP TABLE IF EXISTS {}'.format(table_name) if if_exists == 'replace' else '',
create='CREATE TABLE {0} ({1})'.format(table_name, ','.join(columns)),
cartodbfy='SELECT CDB_CartoDBFyTable(\'{0}\', \'{1}\')'.format(
schema, table_name) if cartodbfy else ''
)
self.execute(query)
def insert_table(self, table_name, columns_values):
"""Insert a row to the table.
Args:
table_name (str): name of the table.
columns_values (dict): dictionary with the column names and values.
Example:
>>> sql.insert_table('table_name', {'column1': ['value1', 'value2'], 'column2': [1, 2]})
|
webmasterraj/FogOrNot
|
flask/lib/python2.7/site-packages/pandas/io/tests/test_pytables.py
|
Python
|
gpl-2.0
| 175,872
| 0.008284
|
import nose
import sys
import os
import warnings
import tempfile
from contextlib import contextmanager
import datetime
import numpy as np
import pandas
import pandas as pd
from pandas import (Series, DataFrame, Panel, MultiIndex, Categorical, bdate_range,
date_range, Index, DatetimeIndex, isnull)
from pandas.io.pytables import _tables
try:
_tables()
except ImportError as e:
raise nose.SkipTest(e)
from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,
IncompatibilityWarning, PerformanceWarning,
AttributeConflictWarning, DuplicateWarning,
PossibleDataLossError, ClosedFileError)
from pandas.io import pytables as pytables
import pandas.util.testing as tm
from pandas.util.testing import (assert_panel4d_equal,
assert_panel_equal,
assert_frame_equal,
assert_series_equal)
from pandas import concat, Timestamp
from pandas import compat
from pandas.compat import range, lrange, u
from pandas.util.testing import assert_produces_warning
from numpy.testing.decorators import slow
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(),path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [ create_tempfile(p) for p in path ]
|
yield filenames
else:
filenames = [ create_tempfile(path) ]
yield filenames[0]
finally:
for f in filenames:
|
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except:
pass
def compat_assert_produces_warning(w,f):
""" don't produce a warning under PY3 """
if compat.PY3:
f()
else:
with tm.assert_produces_warning(expected_warning=w):
f()
class TestHDFStore(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestHDFStore, cls).setUpClass()
# Pytables 3.0.0 deprecates lots of things
tm.reset_testing_mode()
@classmethod
def tearDownClass(cls):
super(TestHDFStore, cls).tearDownClass()
# Pytables 3.0.0 deprecates lots of things
tm.set_testing_mode()
def setUp(self):
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def tearDown(self):
pass
def test_factory_fun(self):
try:
with get_store(self.path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(self.path)
try:
with get_store(self.path) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.path) as tbl:
self.assertEqual(len(tbl), 1)
self.assertEqual(type(tbl['a']), DataFrame)
finally:
safe_remove(self.path)
def test_context(self):
try:
with HDFStore(self.path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(self.path)
try:
with HDFStore(self.path) as tbl:
tbl['a'] = tm.makeDataFrame()
with HDFStore(self.path) as tbl:
self.assertEqual(len(tbl), 1)
self.assertEqual(type(tbl['a']), DataFrame)
finally:
safe_remove(self.path)
def test_conv_read_write(self):
try:
def roundtrip(key, obj,**kwargs):
obj.to_hdf(self.path, key,**kwargs)
return read_hdf(self.path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series',o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series',o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame',o))
o = tm.makePanel()
assert_panel_equal(o, roundtrip('panel',o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(self.path,'table',append=True)
result = read_hdf(self.path, 'table', where = ['index>2'])
assert_frame_equal(df[df.index>2],result)
finally:
safe_remove(self.path)
def test_long_strings(self):
# GH6166
# unconversion of long strings was being chopped in earlier
# versions of numpy < 1.7.2
df = DataFrame({'a': tm.rands_array(100, size=10)},
index=tm.rands_array(100, size=10))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['a'])
result = store.select('df')
assert_frame_equal(df, result)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True)
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True)
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',append=False,format='fixed')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False,format='f')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False)
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df')
assert_frame_equal(read_hdf(path,'df'),df)
with e
|
timdelbruegger/freecopter
|
src/python3/sensorfusion/fusion_master.py
|
Python
|
mit
| 987
| 0.004053
|
from state.logging_state_provider import LoggingStateProviderWithListeners
from sensorfusion.height_provider import HeightProvider
from sensorfusion.attitude_provider import AttitudeProvider, AttitudeState
class SensorFusionMaster(LoggingStateProviderWithListeners):
"""
This class is responsib
|
le for taking the single sensor fusion results and integrating them into a VehicleState.
- AttitudeProvider (gyro, accelerometer, magnetometer) -> AttitudeState (orientation in air)
- HeightProvider (gps, ultrasonic sensor, barometer, attitude) -> HeightState (height above ground, vertical speed)
"""
def __init__(self):
self.attitudeProvider = AttitudeProvider
|
()
self.heightProvider = HeightProvider()
def update(self):
# this will trigger the heightProvider via the listener
attitude = self.attitudeProvider.update()
vehicle_state = self.heightProvider.update(attitude)
return vehicle_state
|
rebaltina/DAT210x
|
Module5/assignment5.py
|
Python
|
mit
| 5,502
| 0.011087
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot') # Look Pretty
def plotDecisionBoundary(model, X, y):
fig = plt.figure()
ax = fig.add_subplot(111)
padding = 0.6
resolution = 0.0025
colors = ['royalblue','forestgreen','ghostwhite']
# Calculate the boundaris
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
x_range = x_max - x_min
y_range = y_max - y_min
x_min -= x_range * padding
y_min -= y_range * padding
x_max += x_range * padding
y_max += y_range * padding
# Create a 2D Grid Matrix. The values stored in the matrix
# are the predictions of the class at at said location
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution))
# What class does the classifier say?
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour map
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.terrain)
# Plot the test original points as well...
for label in range(len(np.unique(y))):
indices = np.where(y == label)
plt.scatter(X[indices, 0], X[indices, 1], c=colors[label], label=str(label), alpha=0.8)
p = model.get_params()
plt.axis('tight')
plt.title('K = ' + str(p['n_neighbors']))
#
# TODO: Load up the dataset into a variable called X. Check the .head and
# compare it to the file you loaded in a text editor. Make sure you're
# loading your data properly--don't fail on the 1st step!
#
X=pd.read_csv('c:/Users/User/workspace/DAT210x/Module5/Datasets/wheat.data')
X.head()
#
# TODO: Copy the 'wheat_type' series slice out of X, and into a series
# called 'y'. Then drop the original 'wheat_type' column from the X
#
# .. your code here ..
y=X.wheat_type
X=X.drop(labels=['wheat_type','id'], axis=1)
# TODO: Do a quick, "ordinal" conversion of 'y'. In actuality our
# classification isn't ordinal, but just as an experiment...
#
# .. your code here ..
ordered_y=['kama', 'canadian', 'rosa']
y=y.astype("category",ordered=True,categories=ordered_y).cat.codes
#
# TODO: Basic nan munging. Fill each row's nans with the mean of the feature
#
# .. your code here ..
for column in X:
X[column] = X[column].fillna( X[column].mean() )
# return
# TODO: Split X into training and testing data sets using train_test_split().
# INFO: Use 0.33 test size, and use random_state=1. This is important
# so that your answers are ve
|
rifiable. In the real world, you wouldn't
# specify a random_state.
#
# .. your code here ..
from sklearn.cross_validation import train_test_split
data_train, data_test, label_train, label_test = train_test_split(X, y, test_size=0.33, random_state=1)
#
# TODO: Create an instance of SKLearn's Normalizer class and then train it
# using its .fit() method against your *training* data.
preprocessor = preprocessing.Normalizer()
preprocessor=preprocessor.fit(dat
|
a_train)
# NOTE: The reason you only fit against your training data is because in a
# real-world situation, you'll only have your training data to train with!
# In this lab setting, you have both train+test data; but in the wild,
# you'll only have your training data, and then unlabeled data you want to
# apply your models to.
#
# .. your code here ..
#
# TODO: With your trained pre-processor, transform both your training AND
# testing data.
T_data_train = preprocessor.transform(data_train)
T_data_test = preprocessor.transform(data_test)
# NOTE: Any testing data has to be transformed with your preprocessor
# that has ben fit against your training data, so that it exist in the same
# feature-space as the original data used to train your models.
#
# .. your code here ..
#
# TODO: Just like your preprocessing transformation, create a PCA
# transformation as well. Fit it against your training data, and then
# project your training and testing features into PCA space using the
# PCA model's .transform() method.
#
# NOTE: This has to be done because the only way to visualize the decision
# boundary in 2D would be if your KNN algo ran in 2D as well:
#
# .. your code here ..
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(T_data_train)
T_pca_train = pca.transform(T_data_train)
T_pca_test = pca.transform(T_data_test)
#
# TODO: Create and train a KNeighborsClassifier. Start with K=9 neighbors.
# NOTE: Be sure train your classifier against the pre-processed, PCA-
# transformed training data above! You do not, of course, need to transform
# your labels.
#
# .. your code here ..
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=9)
knn.fit(T_pca_train, label_train)
# HINT: Ensure your KNeighbors classifier object from earlier is called 'knn'
plotDecisionBoundary(knn, T_pca_train, label_train)
#------------------------------------
#
# TODO: Display the accuracy score of your test data/labels, computed by
# your KNeighbors model.
#
# NOTE: You do NOT have to run .predict before calling .score, since
# .score will take care of running your predictions for you automatically.
#
# .. your code here ..
from sklearn.metrics import accuracy_score
predictions = knn.predict(T_pca_test)
accuracy_score(label_train, predictions)
#
# BONUS: Instead of the ordinal conversion, try and get this assignment
# working with a proper Pandas get_dummies for feature encoding. HINT:
# You might have to update some of the plotDecisionBoundary code.
plt.show()
|
arruda/rmr
|
rmr/scripts/skoob_crawler.py
|
Python
|
mit
| 1,068
| 0.011236
|
# -*- coding: utf-8 -*-
import requests
import lxml
from lxml import html
main_url = "http://www.skoob.com.br"
def books_for_author(url=None):
"return the books of a given author"
print "acessing: %s" % url
books_found = []
r = requests.get(url)
root = lxml.html.fromstring(r.content)
all_infos = root.cssselect("div.dados_lista_busca")
print "books in this page:"
for book_infos in all_infos:
title = book_infos.csssele
|
ct("a.l15ab")[0].text_content()
books_found.append(title,)
# print title
next_page = None
try:
next_page = root.cssselect("div.proximo span.l13 a")[0].get("href")
books_found.extend(books_for_author(main_url+next_page))
except IndexError:
pass
return books_found
def run():
"get all books from a given author in saraiva bookstore"
url = main_url+"/livro/lista/tag:sarah%20dessen/tipo:autor"
books = books_for_au
|
thor(url)
print "============"
for book in books:
print book
|
ppwwyyxx/tensorflow
|
tensorflow/python/training/saving/functional_saver.py
|
Python
|
apache-2.0
| 10,645
| 0.004039
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Saves and restore variables inside traced @tf.functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from tensorflow.core.protobuf import saver_pb2
from tensorf
|
low.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from te
|
nsorflow.python.ops import string_ops
from tensorflow.python.training.saving import saveable_hook
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util import nest
class _SingleDeviceSaver(object):
"""Saves and restores checkpoints from the current device."""
def __init__(self, saveable_objects):
"""Specify a list of `SaveableObject`s to save and restore.
Args:
saveable_objects: A list of `SaveableObject`s.
"""
saveable_objects = list(saveable_objects)
for saveable in saveable_objects:
if not isinstance(saveable, saveable_object.SaveableObject):
raise ValueError(
"Expected a list of SaveableObjects, got %s." % (saveable,))
self._saveable_objects = saveable_objects
def save(self, file_prefix):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
Returns:
An `Operation`, or None when executing eagerly.
"""
tensor_names = []
tensors = []
tensor_slices = []
for saveable in self._saveable_objects:
for spec in saveable.specs:
tensor_names.append(spec.name)
tensors.append(spec.tensor)
tensor_slices.append(spec.slice_spec)
with ops.device("cpu:0"):
return io_ops.save_v2(file_prefix, tensor_names, tensor_slices, tensors)
def restore(self, file_prefix):
"""Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
Returns:
A dictionary mapping from SaveableObject names to restore operations.
"""
restore_specs = []
tensor_structure = []
for saveable in self._saveable_objects:
saveable_tensor_structure = []
tensor_structure.append(saveable_tensor_structure)
for spec in saveable.specs:
saveable_tensor_structure.append(spec.name)
restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
tensor_names, tensor_slices, tensor_dtypes = zip(*restore_specs)
with ops.device("cpu:0"):
restored_tensors = io_ops.restore_v2(
file_prefix, tensor_names, tensor_slices, tensor_dtypes)
structured_restored_tensors = nest.pack_sequence_as(
tensor_structure, restored_tensors)
restore_ops = {}
for saveable, restored_tensors in zip(self._saveable_objects,
structured_restored_tensors):
restore_ops[saveable.name] = saveable.restore(
restored_tensors, restored_shapes=None)
return restore_ops
def sharded_filename(filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)
class MultiDeviceSaver(object):
"""Saves checkpoints directly from multiple devices.
Note that this is a low-level utility which stores Tensors in the keys
specified by `SaveableObject`s. Higher-level utilities for object-based
checkpointing are built on top of it.
"""
def __init__(self, saveable_objects):
"""Specify a list of `SaveableObject`s to save and restore.
Args:
saveable_objects: A list of `SaveableObject`s.
Objects extending `SaveableObject` will be saved and restored, and
objects extending `SaveableHook` will be called into at save and
restore time.
"""
self._before_save_callbacks = []
self._after_restore_callbacks = []
saveable_objects = list(saveable_objects)
saveables_by_device = {}
for saveable in saveable_objects:
is_saveable = isinstance(saveable, saveable_object.SaveableObject)
is_hook = isinstance(saveable, saveable_hook.SaveableHook)
if not is_saveable and not is_hook:
raise ValueError(
"Expected a dictionary of SaveableObjects, got {}."
.format(saveable))
if is_hook:
self._before_save_callbacks.append(saveable.before_save)
self._after_restore_callbacks.append(saveable.after_restore)
if is_saveable:
saveables_by_device.setdefault(saveable.device, []).append(saveable)
self._single_device_savers = {
device: _SingleDeviceSaver(saveables)
for device, saveables in saveables_by_device.items()}
def to_proto(self):
"""Serializes to a SaverDef referencing the current graph."""
filename_tensor = array_ops.placeholder(
shape=[], dtype=dtypes.string, name="saver_filename")
save_tensor = self._traced_save(filename_tensor)
restore_op = self._traced_restore(filename_tensor).op
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
version=saver_pb2.SaverDef.V2)
@def_function.function(
input_signature=(tensor_spec.TensorSpec(shape=(), dtype=dtypes.string),),
autograph=False)
def _traced_save(self, file_prefix):
save_op = self.save(file_prefix)
with ops.device("cpu:0"):
with ops.control_dependencies([save_op]):
return array_ops.identity(file_prefix)
@def_function.function(
input_signature=(tensor_spec.TensorSpec(shape=(), dtype=dtypes.string),),
autograph=False)
def _traced_restore(self, file_prefix):
restore_ops = self.restore(file_prefix)
with ops.device("cpu:0"):
with ops.control_dependencies(restore_ops.values()):
return array_ops.identity(file_prefix)
def save(self, file_prefix):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
Returns:
An `Operation`, or None when executing eagerly.
"""
for callback in self._before_save_callbacks:
callback()
# IMPLEMENTATION DETAILS: most clients should skip.
#
# Suffix for any well-formed "checkpoint_prefix", when sharded.
# Transformations:
# * Users pass in "save_path" in save() and restore(). Say "myckpt".
# * checkpoint_prefix gets fed <save_path><sharded_suffix>.
#
# Example:
# During runtime, a temporary directory is first created, which contains
# files
#
# <train dir>/myckpt_temp/
# part-?????-of-?????{.index, .data-00000-of-00001}
#
# Before .save() finishes, they will be (hopefully, atomically) renamed to
#
# <train dir>/
# myckpt{.index, .data-?????-of-?????}
#
|
apbard/scipy
|
scipy/optimize/optimize.py
|
Python
|
bsd-3-clause
| 105,325
| 0.000218
|
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
# A collection of optimization algorithms. Version 0.5
# CHANGES
# Added fminbound (July 2001)
# Added brute (Aug. 2002)
# Finished line search satisfying strong Wolfe conditions (Mar. 2004)
# Updated strong Wolfe conditions line search to use
# cubic-interpolation (Mar. 2004)
from __future__ import division, print_function, absolute_import
# Minimization routines
__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
'line_search', 'check_grad', 'OptimizeResult', 'show_options',
'OptimizeWarning']
__docformat__ = "restructuredtext en"
import warnings
import sys
import numpy
from scipy._lib.six import callable, xrange
from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze,
vectorize, asarray, sqrt, Inf, asfarray, isinf)
import numpy as np
from .linesearch import (line_search_wolfe1, line_search_wolfe2,
line_search_wolfe2 as line_search,
LineSearchWarning)
from scipy._lib._util import getargspec_no_self as _getargspec
from scipy.linalg import get_blas_funcs
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev': 'Maximum number of function evaluations has '
'been exceeded.',
'maxiter': 'Maximum number of iterations has been '
'exceeded.',
'pr_loss': 'Desired error not necessarily achieved due '
'to precision loss.'}
class MemoizeJac(object):
""" Decorator that caches the value gradient of function each time it
is called. """
def __init__(self, fun):
self.fun = fun
self.jac = None
self.x = None
def __call__(self, x, *args):
self.x = numpy.asarray(x).copy()
fg = self.fun(x, *args)
self.jac = fg[1]
return fg[0]
def derivative(self, x, *args):
if self.jac is not None and numpy.alltrue(x == self.x):
return self.jac
else:
self(x, *args)
return self.jac
class OptimizeResult(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess: ndarray
Values of objective function, its Jacobian and its Hessian (if
available). The Hessians may be approximations, see the documentation
of the function in question.
hess_inv : object
Inverse of the objective function's Hessian; may be an approximation.
Not available for all solvers. The type of this attribute may be
either np.ndarray or scipy.sparse.linalg.LinearOperator.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
class OptimizeWarning(UserWarning):
pass
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in Scipy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return np.size(x) == 1
_epsilon = sqrt(numpy.finfo(float).eps)
def vecnorm(x, ord=2):
if ord == Inf:
return numpy.amax(numpy.abs(x))
elif ord == -Inf:
return numpy.amin(numpy.abs(x))
else:
return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord)
def rosen(x):
"""
The Rosenbrock function.
The function computed is::
sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
Parameters
----------
x : array_like
1-D array of points at which the Rosenbrock function is to be computed.
Returns
-------
f : float
The value of the Rosenbrock function.
See Also
--------
rosen_der, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def rosen_der(x):
"""
The derivative (i.e. gradient) of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the derivative is to be computed.
Returns
-------
rosen_der : (N,) ndarray
The gradient of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def rosen_hess(x):
|
"""
The Hessian matrix of the Rosenbrock f
|
unction.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_der, rosen_hess_prod
"""
x = atleast_1d(x)
H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1)
diagonal = numpy.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + numpy.diag(diagonal)
return H
def rosen_hess_prod(x, p):
"""
Product of the Hessian matrix of the Rosenbrock function with a vector.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
p : array_like
1-D array, the vector to be multiplied by the Hessian matrix.
Returns
-------
rosen_hess_prod : ndarray
The Hessian matrix of the Rosenbrock function at `x` multiplied
by the vector `p`.
See Also
--------
rosen, rosen_der, rosen_hess
"""
x = atleast_1d(x)
Hp = numpy.zeros(len(x), dtype=x.dtype)
Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]
Hp[1:-1] = (-400 * x[:-2] * p[:-2] +
(202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1
|
s20121035/rk3288_android5.1_repo
|
cts/tools/utils/buildCts.py
|
Python
|
gpl-3.0
| 16,411
| 0.01304
|
#!/usr/bin/python
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for generating CTS test descriptions and test plans."""
import glob
import os
import re
import shutil
import subprocess
import sys
import xml.dom.minidom as dom
from cts import tools
from multiprocessing import Pool
def GetSubDirectories(root):
"""Return all directories under the given root directory."""
return [x for x in os.listdir(root) if os.path.isdir(os.path.join(root, x))]
def GetMakeFileVars(makefile_path):
"""Extracts variable definitions from the given make file.
Args:
makefile_path: Path to the make file.
Returns:
A dictionary mapping variable names to their assigned value.
"""
result = {}
pattern = re.compile(r'^\s*([^:#=\s]+)\s*:=\s*(.*?[^\\])$', re.MULTILINE + re.DOTALL)
stream = open(makefile_path, 'r')
content = stream.read()
fo
|
r match in pattern.finditer(content):
result[match.group(1)] = match.group(2)
stream.close()
return result
class CtsBuilder(object):
"""Main class for generating test descriptio
|
ns and test plans."""
def __init__(self, argv):
"""Initialize the CtsBuilder from command line arguments."""
if len(argv) != 6:
print 'Usage: %s <testRoot> <ctsOutputDir> <tempDir> <androidRootDir> <docletPath>' % argv[0]
print ''
print 'testRoot: Directory under which to search for CTS tests.'
print 'ctsOutputDir: Directory in which the CTS repository should be created.'
print 'tempDir: Directory to use for storing temporary files.'
print 'androidRootDir: Root directory of the Android source tree.'
print 'docletPath: Class path where the DescriptionGenerator doclet can be found.'
sys.exit(1)
self.test_root = sys.argv[1]
self.out_dir = sys.argv[2]
self.temp_dir = sys.argv[3]
self.android_root = sys.argv[4]
self.doclet_path = sys.argv[5]
self.test_repository = os.path.join(self.out_dir, 'repository/testcases')
self.plan_repository = os.path.join(self.out_dir, 'repository/plans')
self.definedplans_repository = os.path.join(self.android_root, 'cts/tests/plans')
def GenerateTestDescriptions(self):
"""Generate test descriptions for all packages."""
pool = Pool(processes=2)
# generate test descriptions for android tests
results = []
pool.close()
pool.join()
return sum(map(lambda result: result.get(), results))
def __WritePlan(self, plan, plan_name):
print 'Generating test plan %s' % plan_name
plan.Write(os.path.join(self.plan_repository, plan_name + '.xml'))
def GenerateTestPlans(self):
"""Generate default test plans."""
# TODO: Instead of hard-coding the plans here, use a configuration file,
# such as test_defs.xml
packages = []
descriptions = sorted(glob.glob(os.path.join(self.test_repository, '*.xml')))
for description in descriptions:
doc = tools.XmlFile(description)
packages.append(doc.GetAttr('TestPackage', 'appPackageName'))
# sort the list to give the same sequence based on name
packages.sort()
plan = tools.TestPlan(packages)
plan.Exclude('android\.performance.*')
self.__WritePlan(plan, 'CTS')
self.__WritePlan(plan, 'CTS-TF')
plan = tools.TestPlan(packages)
plan.Exclude('android\.performance.*')
plan.Exclude('android\.media\.cts\.StreamingMediaPlayerTest.*')
# Test plan to not include media streaming tests
self.__WritePlan(plan, 'CTS-No-Media-Stream')
plan = tools.TestPlan(packages)
plan.Exclude('android\.performance.*')
self.__WritePlan(plan, 'SDK')
plan.Exclude(r'android\.signature')
plan.Exclude(r'android\.core.*')
self.__WritePlan(plan, 'Android')
plan = tools.TestPlan(packages)
plan.Include(r'android\.core\.tests.*')
plan.Exclude(r'android\.core\.tests\.libcore.\package.\harmony*')
self.__WritePlan(plan, 'Java')
# TODO: remove this once the tests are fixed and merged into Java plan above.
plan = tools.TestPlan(packages)
plan.Include(r'android\.core\.tests\.libcore.\package.\harmony*')
self.__WritePlan(plan, 'Harmony')
plan = tools.TestPlan(packages)
plan.Include(r'android\.core\.vm-tests-tf')
self.__WritePlan(plan, 'VM-TF')
plan = tools.TestPlan(packages)
plan.Include(r'android\.tests\.appsecurity')
self.__WritePlan(plan, 'AppSecurity')
# hard-coded white list for PDK plan
plan.Exclude('.*')
plan.Include('android\.aadb')
plan.Include('android\.bluetooth')
plan.Include('android\.graphics.*')
plan.Include('android\.hardware')
plan.Include('android\.media')
plan.Exclude('android\.mediastress')
plan.Include('android\.net')
plan.Include('android\.opengl.*')
plan.Include('android\.renderscript')
plan.Include('android\.telephony')
plan.Include('android\.nativemedia.*')
plan.Include('com\.android\.cts\..*')#TODO(stuartscott): Should PDK have all these?
self.__WritePlan(plan, 'PDK')
flaky_tests = BuildCtsFlakyTestList()
# CTS Stable plan
plan = tools.TestPlan(packages)
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-stable')
# CTS Flaky plan - list of tests known to be flaky in lab environment
plan = tools.TestPlan(packages)
plan.Exclude('.*')
plan.Include(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.Include(package+'$')
plan.IncludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-flaky')
small_tests = BuildAospSmallSizeTestList()
medium_tests = BuildAospMediumSizeTestList()
new_test_packages = BuildCtsVettedNewPackagesList()
# CTS - sub plan for public, small size tests
plan = tools.TestPlan(packages)
plan.Exclude('.*')
for package, test_list in small_tests.iteritems():
plan.Include(package+'$')
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-kitkat-small')
# CTS - sub plan for public, medium size tests
plan = tools.TestPlan(packages)
plan.Exclude('.*')
for package, test_list in medium_tests.iteritems():
plan.Include(package+'$')
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-kitkat-medium')
# CTS - sub plan for hardware tests which is public, large
plan = tools.TestPlan(packages)
plan.Exclude('.*')
plan.Include(r'android\.hardware$')
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-hardware')
# CTS - sub plan for media tests which is public, large
plan = tools.TestPlan(packages)
plan.Exclude('.*')
plan.Include(r'android\.media$')
plan.Include(r'android\.view$')
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-media')
# CTS - sub plan for mediastress tests which is public, large
plan = tools.TestPlan(packages)
plan.Exclude('.*')
plan.Include(r'android\.mediastress$')
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.Exclud
|
tobspr/LUI
|
Demos/B_Frame.py
|
Python
|
mit
| 1,147
| 0.000872
|
from DemoFramework import DemoFramework
from LUIVerticalLayout import LUIVerticalLayout
from LUIFrame import LUIFrame
from LUILabel import LUILabel
from LUIButton import LUIButton
from LUIObject import LUIObject
import random
f = DemoFramework()
f.prepare_demo("LUIFrame")
# Constructor
f.add_constructor_parameter("width", "200")
f.add_constructor_parameter("height", "200")
f.add_constructor_parameter("innerPadding", "5")
f.add_constructor_parameter("scrollable", "False")
f.add_constructor_parameter("style", "UIFrame.Raised")
# Functions
# Events
f.construct_sourcecode("LUIFrame")
# Construct a new frame
frame = LUIFrame(parent=f.get_widget_node())
layout = LUIVerticalLayout(parent=frame, spacing=5)
layout.add(LUILabel(text="This is some frame ..", color=(0.2, 0.6, 1.0, 1.0), font_size=20))
layout.add(LUILabel(text="It can contain arbitrary elements."))
layout.add(LUILabel(text="For example this button:"))
layout.add(LUIButton(text="Fancy button"))
# frame.fit_to_children()
f.set_actions({
"Resize to 300x160
|
": lambda: frame.set_size(300, 160),
|
"Fit to children": lambda: frame.clear_size(),
})
run()
|
fictorial/pygameui
|
pygameui/textfield.py
|
Python
|
mit
| 3,128
| 0.000959
|
import pygame
import view
import label
import callback
class TextField(view.View):
"""Editable single line of text.
There are no fancy keybindings; just backspace.
Signals
on_text_change(text_field, text)
on_return(text_field, text)
"""
def __init__(self, frame, text='', placeholder=''):
view.View.__init__(self, frame)
self.text =
|
text or ''
self.placeholder = placeholder
|
self.label = label.Label(pygame.Rect((0, 0), frame.size),
text or placeholder)
self.label.halign = label.LEFT
self.add_child(self.label)
self.enabled = True
self.max_len = None
self.secure = False
self.on_return = callback.Signal()
self.on_text_change = callback.Signal()
def layout(self):
self.label.topleft = self.padding
r_before = self.label.frame.right
self.label.frame.w = self.frame.w - self.padding[0] * 2
self.label.frame.h = self.frame.h - self.padding[1] * 2
self.label.frame.right = r_before
self._update_text()
view.View.layout(self)
def key_down(self, key, code):
if key == pygame.K_BACKSPACE:
self.text = self.text[0:-1]
elif key == pygame.K_RETURN:
can_submit = True
if self.placeholder and self.text == self.placeholder:
can_submit = False
if can_submit:
self.on_return(self, self.text)
else:
try:
self.text = '%s%s' % (self.text, str(code))
except:
pass
self.on_text_change(self, self.text)
if self.max_len:
self.text = self.text[0:self.max_len]
self._update_text()
self.label.shrink_wrap()
self.label.layout()
if self.label.frame.right > self.frame.w - self.padding[0] * 2:
self.label.frame.right = self.frame.w - self.padding[0] * 2
else:
self.label.frame.left = self.padding[0]
def _update_text(self):
if (len(self.text) == 0 and
self.placeholder is not None and
not self.has_focus()):
self.label.text_color = self.placeholder_text_color
self.label.text = self.placeholder
elif len(self.text) >= 0:
self.label.text_color = self.text_color
self.label.text = self.text
elif self.secure:
self.label.text = '*' * len(self.text)
def draw(self):
if not view.View.draw(self) or not self.has_focus():
return False
if (not self.blink_cursor or
pygame.time.get_ticks() / self.cursor_blink_duration % 2 == 0):
size = self.label.font.size(self.text)
rect = pygame.Rect(
self.label.frame.left + self.label.padding[0] + size[0],
self.label.frame.bottom - self.label.padding[1],
10, 2)
pygame.draw.rect(self.surface, self.text_color, rect)
return True
def __repr__(self):
return self.text
|
alxgu/ansible
|
lib/ansible/modules/remote_management/manageiq/manageiq_alerts.py
|
Python
|
gpl-3.0
| 12,976
| 0.002004
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Red Hat Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: manageiq_alerts
short_description: Configuration of alerts in ManageIQ
extends_documentation_fragment: manageiq
version_added: '2.5'
author: Elad Alfassa (@elad661) <ealfassa@redhat.com
description:
- The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ.
options:
state:
description:
- absent - alert should not exist,
- present - alert should exist,
required: False
choices: ['absent', 'present']
default: 'present'
description:
description:
- The unique alert description in ManageIQ.
- Required when state is "absent" or "present".
resource_type:
description:
- The entity type for the alert in ManageIQ. Required when state is "present".
choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
'ExtManagementSystem', 'MiddlewareServer']
expression_type:
description:
- Expression type.
default: hash
choices: ["hash", "miq"]
expression:
description:
- The alert expression for ManageIQ.
- Can either be in the "Miq Expression" format or the "Hash Expression format".
- Required if state is "present".
enabled:
description:
- Enable or disable the alert. Required if state is "present".
type: bool
options:
description:
- Additional alert options, such as notification type and frequency
'''
EXAMPLES = '''
- name: Add an alert with a "hash expression" to ManageIQ
manageiq_alerts:
state: present
description: Test Alert 01
options:
notifications:
email:
to: ["example@example.com"]
from: "example@example.com"
resource_type: ContainerNode
expression:
eval_method: hostd_log_threshold
mode: internal
options: {}
enabled: true
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Add an alert with a "miq expression" to ManageIQ
manageiq_alerts:
state: present
description: Test Alert 02
options:
notifications:
email:
to: ["example@example.com"]
from: "example@example.com"
resource_type: Vm
expression_type: miq
expression:
and:
- CONTAINS:
tag: Vm.managed-environment
value: prod
- not:
CONTAINS:
tag: Vm.host.managed-environment
value: prod
enabled: true
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
- name: Delete an alert from ManageIQ
manageiq_alerts:
state: absent
description: Test Alert 01
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
validate_certs: False
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec
class ManageIQAlert(object):
""" Represent a ManageIQ alert. Can be initialized with both the format
we recieve from the server and the format we get from the user.
"""
def __init__(self, alert):
self.description = alert['description']
self.db = alert['db']
self.enabled = alert['enabled']
self.options = alert['options']
self.hash_expression = None
self.miq_expressipn = None
if 'hash_expression' in alert:
self.hash_expression = alert['hash_expression']
if 'miq_expression' in alert:
self.miq_expression = alert['miq_expression']
if 'exp'
|
in self.miq_expression:
# miq_expression is a field that needs a special case, because
# it's returned surrounded by a dict named exp even though we don't
# send it with that dict.
self.miq_expression = self.miq_expression['exp']
def __eq__(self, other):
""" Compare two ManageIQAlert objects
"""
return self.__dict__ == other.__dict__
class ManageIQAlerts(object):
""" Obj
|
ect to execute alert management operations in manageiq.
"""
def __init__(self, manageiq):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url)
def get_alerts(self):
""" Get all alerts from ManageIQ
"""
try:
response = self.client.get(self.alerts_url + '?expand=resources')
except Exception as e:
self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e))
return response.get('resources', [])
def validate_hash_expression(self, expression):
""" Validate a 'hash expression' alert definition
"""
# hash expressions must have the following fields
for key in ['options', 'eval_method', 'mode']:
if key not in expression:
msg = "Hash expression is missing required field {key}".format(key=key)
self.module.fail_json(msg)
def create_alert_dict(self, params):
""" Create a dict representing an alert
"""
if params['expression_type'] == 'hash':
# hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76
self.validate_hash_expression(params['expression'])
expression_type = 'hash_expression'
else:
# actually miq_expression, but we call it "expression" for backwards-compatibility
expression_type = 'expression'
# build the alret
alert = dict(description=params['description'],
db=params['resource_type'],
options=params['options'],
enabled=params['enabled'])
# add the actual expression.
alert.update({expression_type: params['expression']})
return alert
def add_alert(self, alert):
""" Add a new alert to ManageIQ
"""
try:
result = self.client.post(self.alerts_url, action='create', resource=alert)
msg = "Alert {description} created successfully: {details}"
msg = msg.format(description=alert['description'], details=result)
return dict(changed=True, msg=msg)
except Exception as e:
msg = "Creating alert {description} failed: {error}"
if "Resource expression needs be specified" in str(e):
# Running on an older version of ManageIQ and trying to create a hash expression
msg = msg.format(description=alert['description'],
error="Your version of ManageIQ does not support hash_expression")
else:
msg = msg.format(description=alert['description'], error=e)
self.module.fail_json(msg=msg)
def delete_alert(self, alert):
""" Delete an alert
"""
try:
result = self.client.post('{url}/{id}'.format(url=self.alerts_url,
id=alert['id']),
action="delete")
msg = "Alert {description} deleted: {details}"
msg = msg.format(description=alert['description'], details=result)
return dict(changed=True, msg=msg)
except Exception as e:
msg = "Deleting alert {description} failed: {error}"
msg = msg.format(description=alert['description'], error=e)
self.modu
|
sivakuna-aap/superdesk-core
|
superdesk/io/feed_parsers/wenn_parser.py
|
Python
|
agpl-3.0
| 3,652
| 0.002738
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import datetime
from superdesk.io import register_feed_parser
from superdesk.io.feed_parsers import XMLFeedParser
from superdesk.errors import ParserError
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE
from superdesk.utc import utc
class WENNFeedParser(XMLFeedParser):
"""
Feed Parser for parsing the XML supplied by WENN
"""
NAME = 'wenn'
ATOM_NS = 'http://www.w3.org/2005/Atom'
WENN_MM_NS = 'http://feed.wenn.com/xmlns/2010/03/wenn_mm'
WENN_NM_NS = 'http://feed.wenn.com/xmlns/2010/03/wenn_nm'
WENN_CM_NS = 'http:/
|
/feed.wenn.com/xmlns/2010/03/wenn_cm'
GEORSS_NS = 'http://www.georss.org/georss'
def can_parse(self, xml):
return xml.tag == self.qname('feed', self.ATOM_NS) and len(xml.findall(
self.qname('NewsManagement', self.WENN_NM_NS))) > 0
def parse(self, xml, provider=None):
itemList = []
try:
|
for entry in xml.findall(self.qname('entry', self.ATOM_NS)):
item = {}
self.set_item_defaults(item)
self.parse_content_management(item, entry)
self.parse_news_management(item, entry)
item['body_html'] = self.get_elem_content(entry.find(self.qname('content', self.ATOM_NS)))
itemList.append(item)
return itemList
except Exception as ex:
raise ParserError.wennParserError(ex, provider)
def set_item_defaults(self, item):
item[ITEM_TYPE] = CONTENT_TYPE.TEXT
item['urgency'] = 5
item['pubstatus'] = 'usable'
item['anpa_category'] = [{'qcode': 'e'}]
item['subject'] = [{'qcode': '01000000', 'name': 'arts, culture and entertainment'}]
def parse_news_management(self, item, entry):
news_mgmt_el = entry.find(self.qname('NewsManagement', self.WENN_NM_NS))
if news_mgmt_el:
item['firstcreated'] = self.datetime(self.get_elem_content(
news_mgmt_el.find(self.qname('published', self.WENN_NM_NS))))
item['versioncreated'] = self.datetime(self.get_elem_content(
news_mgmt_el.find(self.qname('updated', self.WENN_NM_NS))))
item['guid'] = self.get_elem_content(
news_mgmt_el.find(self.qname('original_article_id', self.WENN_NM_NS)))
def parse_content_management(self, item, entry):
content_mgmt_el = entry.find(self.qname('ContentMetadata', self.WENN_CM_NS))
if content_mgmt_el:
item['headline'] = self.get_elem_content(content_mgmt_el.find(self.qname('title', self.WENN_CM_NS)))
item['abstract'] = self.get_elem_content(
content_mgmt_el.find(self.qname('first_line', self.WENN_CM_NS)))
item['keywords'] = [element.attrib.get('value') for element in
content_mgmt_el.findall(self.qname('tags', self.WENN_CM_NS) + '/' +
self.qname('tag', self.WENN_CM_NS))
if element.attrib.get('value')]
def get_elem_content(self, elem):
return elem.text if elem is not None else ''
def datetime(self, string):
return datetime.datetime.strptime(string, '%Y-%m-%dT%H:%M:%S+00:00').replace(tzinfo=utc)
register_feed_parser(WENNFeedParser.NAME, WENNFeedParser())
|
JeffHeard/sondra
|
sondra/files.py
|
Python
|
apache-2.0
| 7,919
| 0.001515
|
from functools import lru_cache
import os
import rethinkdb as r
from sondra.document.schema_parser import ValueHandler
try:
from werkzeug.utils import secure_filename
except ImportError:
import re
def secure_filename(name):
name = re.sub(r'\s+', '-', name) # Replace white space with dash
name = name.sub(r'([a-zA-Z]):\\', '')
return name.sub(r'[^a-zA-Z0-9\-.]+', '_', name) # Replace non alphanumerics with a single _
def _strip_slashes(p):
p = p[1:] if p.startswith('/') else p
return p[:-1] if p.endswith('/') else p
def _join_components(*paths):
return '/'.join(_strip_slashes(p) for p in paths)
class FileHandler(ValueHandler):
def __init__(self, storage_service, key, content_type='application/octet-stream'):
self._storage_service = storage_service
self._key = key
self._content_type = content_type
def post_save(self, document):
self._storage_service.assoc(document, document.obj[self._key])
def to_json_repr(self, value, document):
if not hasattr(value, 'read'):
return super().to_json_repr(value, document)
else:
return self._storage_service.store(
document=document,
key=self._key,
original_filename=getattr(value, "filename", "uploaded-file.dat"),
content_type=self._content_type,
stream=value
)
def pre_delete(self, document):
self._storage_service.delete_for_document(document)
def to_python_repr(self, value, document):
return self._storage_service.stream(value)
def to_rql_repr(self, value, document):
if not hasattr(value, 'read'):
return super().to_rql_repr(value, document)
else:
return self._storage_service.store(
document=document,
key=self._key,
original_filename=getattr(value, "filename", "uploaded-file.dat"),
content_type=self._content_type,
stream=value
)
class FileStorageDefaults(object):
"""Suite mixin for suite containing defaults for file storage"""
media_url_path = "media"
class FileStorageService(object):
def __init__(self):
self._suite = None
self._media_url = None
self._path_start = None
def _db(self, collection):
return r.db(collection.application.db)
def _conn(self, collection):
return collection.application.connection
@lru_cache()
def _table_name(self, collection):
return "_sondra_files__{collection}".format(collection=collection.name)
@lru_cache()
def _table(self, collection):
db = self._db(collection)
conn = self._conn(collection)
table_name = self._table_name(collection)
table = db.table(table_name)
all_tables = { name for name in db.table_list().run(conn) }
if table_name not in all_tables:
db.table_create(table_name).run(conn)
table.index_create('document').run(conn)
table.index_create('collection').run(conn)
return table
def connect(self, suite):
self._suite = suite
host = "{scheme}://{netloc}".format(
scheme=suite.base_url_scheme, netloc=suite.base_url_netloc)
self._media_url = _join_components(host, suite.media_url_path)
self._path_start = len(self._media_url) + 1
def assoc(self, document, url):
app, coll, pk_ext = url[self._path_start:].split('/', 2)
pk, ext = os.path.splitext(pk_ext)
self._table(document.collection).get(pk).update({"document": document.id}).run(self._conn(document.collection))
def store(self, document, key, original_filename, content_type, stream):
collection = document.collection
if document.id is not None:
self.delete_for_document(document, key)
_, filename = os.path.split(original_filename)
_, extension = os.path.splitext(filename)
result = self._table(collection).insert({
"collection": collection.name,
"document": None,
"key": key,
"original_filename": filename,
"extension": extension,
"content_type": content_type,
}).run(self._conn(collection))
new_filename = "{id}{ext}".format(id=result['generated_keys'][0], ext=extension)
self.store_file(collection, new_filename, stream)
return "{media_url}/{app}/{coll}/{new_filename}".format(
media_url=self._media_url,
app=collection.application.slug,
coll=collection.slug,
new_filename=new_filename
)
def stream_file(self, collection, ident_ext):
raise NotImplementedError("Implement stream_file
|
in a concrete class")
def store_file(self, collection, ident_ext, stream):
raise NotImplementedError("Implement store_stream in an concrete class")
def delete_file(self, collection, ident_ext):
raise NotImplementedError("Implement delete_file in a concrete class")
def delete_from_collection(self, collection, ident):
self.
|
delete_file(collection, ident)
self._table(collection).get(id).delete().run(self._conn)
def delete_for_document(self, document, key=None):
if key is not None:
existing = self._table(document.collection)\
.get_all(document, index='document')\
.filter({'key': key})\
.run(self._conn(document.collection))
for f in existing: # should only be one
self.delete_file(document.collection, f['id'] + f['extension'])
else:
self._table(document.collection)\
.get_all(document, index='document')\
.delete()\
.run(self._conn(document.collection))
def stream(self, url):
app, coll, pk = url[self._path_start:].split('/', 2)
pk, ext = os.path.splitext(pk)
collection = self._suite[app][coll]
record = self._table(collection).get(pk).run(self._conn(collection))
in_stream = self.stream_file(collection, pk + ext)
return {
"content_type": record['content_type'],
"filename": record['original_filename'],
"stream": in_stream
}
class LocalFileStorageDefaults(FileStorageDefaults):
"""Suite mixin for local file storage defaults"""
media_path = os.path.join(os.getcwd(), "_media")
media_path_permissions = 0o755
chunk_size = 16384
class LocalFileStorageService(FileStorageService):
def __init__(self):
super(LocalFileStorageService, self).__init__()
self._root = None
def connect(self, suite):
super(LocalFileStorageService, self).connect(suite)
self._root = suite.media_path \
if suite.media_path.startswith('/') \
else os.path.join(os.getcwd(), suite.media_path)
os.makedirs(self._root, self._suite.media_path_permissions, exist_ok=True)
def _path(self, collection, make=False):
p = os.path.join(self._root, collection.application.slug, collection.slug)
if make:
os.makedirs(p, exist_ok=True)
return p
def stream_file(self, collection, ident_ext):
return open(os.path.join(self._path(collection), ident_ext))
def delete_file(self, collection, ident_ext):
os.unlink(os.path.join(self._path(collection), ident_ext))
def store_file(self, collection, ident_ext, stream):
p = self._path(collection, True)
dest = os.path.join(p, ident_ext)
with open(dest, 'w') as out:
chunk = stream.read(self._suite.chunk_size)
while chunk:
out.write(chunk)
chunk = stream.read(self._suite.chunk_size)
out.flush()
|
rule0x42/education
|
Python/daysBetweenDates.py
|
Python
|
gpl-3.0
| 2,697
| 0
|
# Define a daysBetweenDates procedure that would produce the
# correct output if there was a correct nextDay procedure.
#
# Udacity course work
def isLeapYear(year):
if year % 400 == 0:
return True
if year % 100 == 0:
return False
if year % 4 == 0:
return True
return False
def daysInMonth(year, month):
"""Provides days for each month of the year including leap years"""
days_of_months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if isLeapYear(year):
days_of_months[1] = 29
return days_of_months[month - 1]
def dateIsBefore(year1, month1, day1, year2, month2, day2):
"""Returns True if year1-month1-day1 is before
year2-month2-day2. Otherwise, returns False."""
if year1 < year2:
return True
if year1 == year2:
if month1 < month2:
return True
|
if month1 == month2:
# 'assert not' makes this true for a result
# of: days = 0
return day1 < day2
return False
def nextDay(year, month, day):
|
"""Simple version: assume every month has 30 days"""
if day < daysInMonth(year, month):
return year, month, day + 1
else:
if month == 12:
return year + 1, 1, 1
else:
return year, month + 1, 1
def daysBetweenDates(year1, month1, day1, year2, month2, day2):
"""Returns the number of days between year1/month1/day1
and year2/month2/day2. Assumes inputs are valid dates
in Gregorian calendar, and the first date is not after
the second."""
assert not dateIsBefore(year2, month2, day2, year1, month1, day1)
days = 0
while dateIsBefore(year1, month1, day1, year2, month2, day2):
year1, month1, day1 = nextDay(year1, month1, day1)
days += 1
return days
def test():
test_cases = [((2012, 9, 30, 2012, 10, 30), 30),
((2012, 1, 1, 2013, 1, 1), 366),
((2012, 9, 1, 2012, 9, 4), 3),
((2016, 9, 1, 2012, 9, 4), "AssertionError"),
((2012, 10, 1, 2012, 9, 1), "AssertionError"),
((2012, 9, 1, 2012, 9, 1), 0),
((1900, 1, 1, 1999, 12, 31), 36523)]
for (args, answer) in test_cases:
try:
result = daysBetweenDates(*args)
if result != answer:
print "Expected:", answer, "Received:", result
else:
print "Test case passed!"
except AssertionError:
if answer == "AssertionError":
print "Test case passed!"
else:
print "Exception: {0} raised AssertionError!\n".format(args)
test()
|
meirwah/st2contrib
|
packs/hubot/actions/post_result.py
|
Python
|
apache-2.0
| 5,619
| 0.00089
|
import json
import httplib
import requests
import six
import pyaml
from six.moves.urllib.parse import urljoin
from st2actions.runners.pythonrunner import Action
__all__ = [
'PostResultAction'
]
def _serialize(data):
return pyaml.dump(data)
def format_possible_failure_result(result):
'''
Error result as generator by the runner container is of the form
{'message': x, 'traceback': traceback}
Try and pull out these value upfront. Some other runners that could publish
these properties would get them for free.
'''
output = {}
message = result.get('message', None)
if message:
output['message'] = message
traceback = result.get('traceback', None)
if traceback:
output['traceback'] = traceback
return output
def format_default_result(result):
try:
output = json.loads(result)
return _serialize(output)
except ValueError:
return result
def format_localrunner_result(result, do_serialize=True):
output = format_possible_failure_result(result)
# Add in various properties if they have values
stdout = result.get('stdout', None)
if stdout:
try:
output['stdout'] = stdout.strip()
except AttributeError:
output['stdout'] = stdout
stderr = result.get('stderr', None)
if stderr:
output['stderr'] = stderr.strip()
return_code = result.get('return_code', 0)
if return_code != 0:
output['return_code'] = return_code
error = result.get('error', None)
if error:
output['error'] = error
return _serialize(output) if do_serialize else output
def format_remoterunner_result(result):
output = format_possible_failure_result(result)
output.update({k: format_localrunner_result(v, do_serialize=False)
for k, v in six.iteritems(result)})
return _serialize(output)
def format_actionchain_result(result):
output = format_possible_failure_result(result)
return '' if not output else _serialize(output)
def format_mistral_result(result):
return format_default_result(result)
def format_pythonrunner_result(result):
output = format_possible_failure_result(result)
# Add in various properties if they have values
result_ = result.get('result', None)
if result_ is not None:
output['result'] = result_
stdout = result.get('stdout', None)
if stdout:
try:
output['stdout'] = stdout.strip()
except AttributeError:
output['stdout'] = stdout
stderr = result.get('stderr', None)
if stderr:
output['stderr'] = stderr.strip()
exit_code = result.get('exit_code', 0)
if exit_code != 0:
output['exit_code'] = exit_code
return _serialize(output)
def format_httprunner_result(result):
return format_default_result(result)
def format_windowsrunner_result(result):
# same format as pythonrunner
return format_pythonrunner_result(result)
FORMATTERS = {
# localrunner
'local-shell-cmd': format_localrunner_result,
'run-local': format_localrunner_result,
'local-shell-script': format_localrunner_result,
'run-local-script': format_localrunner_result,
# remoterunner
'remote-shell-cmd': format_remoterunner_result,
'run-remote': format_remoterunner_result,
'remote-shell-script': format_remoterunner_result,
'run-remote-script': format_remoterunner_result,
# httprunner
'http-request': format_httprunner_result,
'http-runner': format_httprunner_result,
# mistralrunner
'mistral-v1': format_mistral_result,
'mistral-v2': format_mistral_result,
# actionchainrunner
'action-chain': format_actionchain_result,
# pythonrunner
'run-python': format_pythonrunner_result,
'python-script': format_pythonrunner_result,
# windowsrunner
'windows-cmd': format_windowsrunner_result,
'windows-script': format_windowsrunner_result
}
class PostResultAction(Action):
def run(self, result, channel, user=None, whisper=False):
endpoint = self.config['endpoint']
if not endpoint:
raise ValueError('Missing "endpoint" config option')
url = urljoin(endpoint, "/hubot/st2")
headers = {}
headers['Content-Type'] = 'application/json'
body = {
'channel': channel,
'message': self._get_message(result)
}
if user:
body['user'] = user
if whisper is True:
|
body['whisper'] = whisper
data = json.du
|
mps(body)
self.logger.info(data)
response = requests.post(url=url, headers=headers, data=data)
if response.status_code == httplib.OK:
self.logger.info('Message successfully posted')
else:
self.logger.exception('Failed to post message: %s' % (response.text))
return True
def _get_message(self, data):
envelope = '{message}\nstatus : {status}\nexecution: {execution_id}'.format(**data)
result = self._get_result(data)
if result:
message = '%s\n\nresult :\n--------\n%s' % (envelope, self._get_result(data))
else:
message = envelope
return message
def _get_result(self, data):
result = data.get('data', {'result': {}}).get('result', '{}')
try:
result = json.loads(result)
except ValueError:
# if json.loads fails then very return result as-is. Should not happen.
return result
return FORMATTERS.get(data['runner_ref'], format_default_result)(result)
|
SE2Dev/PyCoD
|
__init__.py
|
Python
|
mit
| 158
| 0
|
# <pep8 compliant>
from .xmodel import Model
from .xanim import Anim
from .san
|
im
|
import SiegeAnim
version = (0, 3, 0) # Version specifier for PyCoD
|
amazinger2013/OpenSesame
|
libqtopensesame/items/sketchpad.py
|
Python
|
gpl-3.0
| 1,289
| 0.013964
|
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as pub
|
lished by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope
|
that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.sketchpad import sketchpad as sketchpad_runtime
from libqtopensesame.items.qtplugin import qtplugin
from libqtopensesame.items.feedpad import feedpad
class sketchpad(feedpad, qtplugin, sketchpad_runtime):
"""
desc:
The sketchpad controls are implemented in feedpad.
"""
def __init__(self, name, experiment, string=None):
sketchpad_runtime.__init__(self, name, experiment, string)
qtplugin.__init__(self)
def init_edit_widget(self):
"""
desc:
Initializes the widget.
"""
feedpad.init_edit_widget(self)
self.sketchpad_widget.ui.widget_settings_reset_variables.hide()
|
agustinhenze/nikola.debian
|
docs/sphinx/conf.py
|
Python
|
mit
| 8,386
| 0
|
# -*- coding: utf-8 -*-
#
# Nikola documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 22 17:43:37 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
from __future__ import unicode_literals
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
try:
import sphinxcontrib.gist # NOQA
extensions = ['sphinxcontrib.gist']
except ImportError:
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Nikola'
copyright = '2012-2015, The Nikola Contributors'
# The version info for the project yo're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '7.6.4'
# The full version, including alpha/beta/rc tags.
release = '7.6.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this director
|
y. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nikoladoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto/manual]).
latex_documents = [
('index', 'Nikola.tex', 'Nikola Documentation',
'The Nikola Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'nikola', 'Nikola Documentation',
['The Nikola Contributors'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Nikola', 'Nikola Documentation',
'The Nikola Contributors', 'Nikola', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to disp
|
saltukalakus/elastalert
|
elastalert/kibana.py
|
Python
|
apache-2.0
| 13,457
| 0.001115
|
# -*- coding: utf-8 -*-
import urllib
from util import EAException
dashboard_temp = {'editable': True,
u'failover': False,
u'index': {u'default': u'NO_TIME_FILTER_OR_INDEX_PATTERN_NOT_MATCHED',
u'interval': u'none',
u'pattern': u'',
u'warm_fields': True},
u'loader': {u'hide': False,
u'load_elasticsearch': True,
u'load_elasticsearch_size': 20,
u'load_gist': True,
u'load_local': True,
u'save_default': True,
u'save_elasticsearch': True,
u'save_gist': False,
u'save_local': True,
u'save_temp': True,
u'save_temp_ttl': u'30d',
u'save_temp_ttl_enable': True},
u'nav': [{u'collapse': False,
u'enable': True,
u'filter_id': 0,
u'notice': False,
u'now': False,
u'refresh_intervals': [u'5s',
u'10s',
u'30s',
u'1m',
u'5m',
u'15m',
u'30m',
u'1h',
u'2h',
u'1d'],
u'status': u'Stable',
u'time_options': [u'5m',
u'15m',
u'1h',
u'6h',
u'12h',
u'24h',
u'2d',
u'7d',
u'30d'],
u'timefield': u'@timestamp',
u'type': u'timepicker'}],
u'panel_hints': True,
u'pulldowns': [{u'collapse': False,
u'enable': True,
u'notice': True,
u'type': u'filtering'}],
u'refresh': False,
u'rows': [{u'collapsable': True,
u'collapse': False,
u'editable': True,
u'height': u'350px',
u'notice': False,
u'panels': [{u'annotate': {u'enable': False,
u'field': u'_type',
u'query': u'*',
u'size': 20,
u'sort': [u'_score', u'desc']},
u'auto_int': True,
u'bars': True,
u'derivative': False,
u'editable': True,
u'fill': 3,
u'grid': {u'max': None, u'min': 0},
|
u'group': [u'default'],
u'interactive': True,
u'interval': u'1m',
u'intervals': [u'auto',
u'1s',
|
u'1m',
u'5m',
u'10m',
u'30m',
u'1h',
u'3h',
u'12h',
u'1d',
u'1w',
u'1M',
u'1y'],
u'legend': True,
u'legend_counts': True,
u'lines': False,
u'linewidth': 3,
u'mode': u'count',
u'options': True,
u'percentage': False,
u'pointradius': 5,
u'points': False,
u'queries': {u'ids': [0], u'mode': u'all'},
u'resolution': 100,
u'scale': 1,
u'show_query': True,
u'span': 12,
u'spyable': True,
u'stack': True,
u'time_field': u'@timestamp',
u'timezone': u'browser',
u'title': u'Events over time',
u'tooltip': {u'query_as_alias': True,
u'value_type': u'cumulative'},
u'type': u'histogram',
u'value_field': None,
u'x-axis': True,
u'y-axis': True,
u'y_format': u'none',
u'zerofill': True,
u'zoomlinks': True}],
u'title': u'Graph'},
{u'collapsable': True,
u'collapse': False,
u'editable': True,
u'height': u'350px',
u'notice': False,
u'panels': [{u'all_fields': False,
u'editable': True,
u'error': False,
u'field_list': True,
u'fields': [],
u'group': [u'default'],
u'header': True,
u'highlight': [],
u'localTime': True,
u'normTimes': True,
u'offset': 0,
u'overflow': u'min-height',
u'pages': 5,
u'paging': True,
u'queries': {u'ids': [0], u'mode': u'all'},
u'size': 100,
u'sort': [u'@timestamp', u'desc'],
u'sortable': True,
u'span': 12,
u'spyable': True,
|
ishikota/PyPokerEngine
|
tests/examples/players/console_player_test.py
|
Python
|
mit
| 2,602
| 0.009608
|
from tests.base_unittest import BaseUnitTest
from examples.players.console_player import ConsoleP
|
layer
class ConsolePlayerTest(BaseUnitTest):
def setUp(self):
self.valid_actions = [\
{'action': 'fold', 'amount': 0},\
{'action': 'call', 'amount': 10},\
{'action': 'raise', 'amount': {'max': 105, 'min': 15}}\
]
self
|
.round_state = {
'dealer_btn': 1,
'street': 'preflop',
'seats': [
{'stack': 85, 'state': 'participating', 'name': u'player1', 'uuid': 'ciglbcevkvoqzguqvnyhcb'},
{'stack': 100, 'state': 'participating', 'name': u'player2', 'uuid': 'zjttlanhlvpqzebrwmieho'}
],
'next_player': 1,
'small_blind_pos': 0,
'big_blind_pos': 1,
'community_card': [],
'pot': {
'main': {'amount': 15},
'side': []
},
"round_count": 3,
"action_histories": {
"preflop": [
{'action': 'SMALLBLIND', 'amount': 5, 'add_amount': 5, "uuid": "ciglbcevkvoqzguqvnyhcb"},
{'action': 'BIGBLIND', 'amount': 10, 'add_amount': 5, "uuid": "zjttlanhlvpqzebrwmieho"}
]
}
}
def test_declare_fold(self):
mock_input = self.__gen_raw_input_mock(['f'])
player = ConsolePlayer(mock_input)
player.set_uuid("dummy")
action, amount = player.declare_action(self.valid_actions, None, self.round_state)
self.eq('fold', action)
self.eq(0, amount)
def test_declare_call(self):
mock_input = self.__gen_raw_input_mock(['c'])
player = ConsolePlayer(mock_input)
player.set_uuid("dummy")
action, amount = player.declare_action(self.valid_actions, None, self.round_state)
self.eq('call', action)
self.eq(10, amount)
def test_declare_valid_raise(self):
mock_input = self.__gen_raw_input_mock(['r', '15'])
player = ConsolePlayer(mock_input)
player.set_uuid("dummy")
action, amount = player.declare_action(self.valid_actions, None, self.round_state)
self.eq('raise', action)
self.eq(15, amount)
def test_correct_invalid_raise(self):
mock_input = self.__gen_raw_input_mock(['r', '14', '105'])
player = ConsolePlayer(mock_input)
player.set_uuid("dummy")
action, amount = player.declare_action(self.valid_actions, None, self.round_state)
self.eq('raise', action)
self.eq(105, amount)
def __gen_raw_input_mock(self, mock_returns):
counter = []
def raw_input_wrapper(self):
mock_return = mock_returns[len(counter)]
counter.append(0)
return mock_return
return raw_input_wrapper
|
plenario/plenario
|
config.py
|
Python
|
mit
| 220
| 0.004545
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'd
|
b_r
|
epository')
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
hzlf/openbroadcast
|
website/shop/shop/tests/util.py
|
Python
|
gpl-3.0
| 6,686
| 0.001047
|
#-*- coding: utf-8 -*-
from decimal import Decimal
from django.contrib.auth.models import User, AnonymousUser
from django.core.exceptions import ImproperlyConfigured
from django.test.testcases import TestCase
from shop.addressmodel.models import Address, Country
from shop.models.cartmodel import Cart
from shop.util.address import get_shipping_address_from_request, \
assign_address_to_request, get_billing_address_from_request
from shop.util.cart import get_or_create_cart
from shop.util.fields import CurrencyField
from shop.util.loader import load_class
class Mock(object):
pass
class CurrencyFieldTestCase(TestCase):
"""
Tests the currency field defined in the util package
"""
def test_01_currencyfield_has_fixed_format(self):
cf = CurrencyField(max_digits=2, decimal_places=1)
number = cf.format_number(99.99)
#number should *not* end up having only one decimal place
self.assertEqual(Decimal(number), Decimal('99.99'))
def test_02_currencyfield_has_default(self):
cf = CurrencyField()
default = cf.get_default()
self.assertNotEqual(default, None)
self.assertEqual(default, Decimal('0.0'))
def test_03_currencyfield_can_override_default(self):
cf = CurrencyField(default=Decimal('99.99'))
default = cf.get_default()
self.assertNotEqual(default, None)
self.assertEqual(default, Decimal('99.99'))
class CartUtilsTestCase(TestCase):
"""
Tests the cart util functions in the util package
"""
def setUp(self):
self.user = User.objects.create(username="test",
email="test@example.com",
first_name="Test",
last_name="Toto")
self.cart = Cart.objects.create()
self.request = Mock()
setattr(self.request, 'user', None)
setattr(self.request, 'session', None)
def tearDown(self):
self.user.delete()
self.cart.delete()
del self.request
def test_uninteresting_request_returns_none(self):
ret = get_or_create_cart(self.request)
self.assertEqual(ret, None)
def test_passing_user_returns_new_cart(self):
setattr(self.request, 'user', self.user)
ret = get_or_create_cart(self.request)
self.assertNotEqual(ret, None)
self.assertNotEqual(ret, self.cart)
def test_passing_user_returns_proper_cart(self):
self.cart.user = self.user
self.cart.save()
setattr(self.request, 'user', self.user)
ret = get_or_create_cart(self.request)
self.assertNotEqual(ret, None)
self.assertEqual(ret, self.cart)
def test_passing_session_returns_new_cart(self):
setattr(self.request, 'session', {})
ret = get_or_create_cart(self.request)
self.assertNotEqual(ret, None)
self.assertNotEqual(ret, self.cart)
def test_passing_session_returns_proper_cart(self):
setattr(self.request, 'session', {'cart_id': self.cart.id})
ret = get_or_create_cart(self.request)
self.assertNotEqual(ret, None)
self.assertEqual(ret, self.cart)
def test_anonymous_user_is_like_no_user(self):
setattr(self.request, 'user', AnonymousUser())
ret = get_or_create_cart(self.request)
self.assertEqual(ret, None)
class LoaderTestCase(TestCase):
def test_loader_without_a_name_works(self):
class_to_load = 'shop.tests.util.Mock'
res = load_class(class_to_load)
self.assertEqual(res, Mock)
def test_loader_without_a_name_fails(self):
class_to_load = 'shop.tests.IdontExist.IdontExistEither'
self.assertRaises(ImproperlyConfigured, load_class, class_to_load)
def test_loader_without_a_name_fails_for_wrong_classname(self):
class_to_load = 'shop.tests.util.IdontExist'
self.assertRaises(ImproperlyConfigured, load_class, class_to_load)
def test_loader_without_a_name_fails_when_too_short(self):
class_to_load = 'IdontExist'
self.assertRaises(ImproperlyConfigured, load_class, class_to_load)
class AddressUtilTestCase(TestCase):
def setUp(self):
self.user = User.objects.create(username="test",
email="test@example.com",
first_name="Test",
last_name="Toto")
self.country = Country.objects.create(name="Switzerland")
self.address = Address.objects.create(country=self.country)
self.request = Mock()
setattr(self.request, 'user', None)
setattr(self.request, 'session', {})
#==========================================================================
# Shipping
#==========================================================================
def test_get_shipping_address_from_request_no_preset(self):
# Set the user
setattr
|
(self.request, 'user', self.user)
res = get_shipping_address_from_request(self.request)
self.assertEqual(res, None)
def test_get_shipping_address_from_request_with_preset_and_user(self):
setattr(self.request, 'user', self.user)
assign_address_to_request(self.request, self.address, shipping=True)
res = get_shipping_address_from_request(self.request)
self.assertEqual(res, self.address)
def test_get_shipping_address_from_request_with_preset_an
|
d_session(self):
assign_address_to_request(self.request, self.address, shipping=True)
res = get_shipping_address_from_request(self.request)
self.assertEqual(res, self.address)
#==========================================================================
# Billing
#==========================================================================
def test_get_billing_address_from_request_no_preset(self):
# Set the user
setattr(self.request, 'user', self.user)
res = get_billing_address_from_request(self.request)
self.assertEqual(res, None)
def test_get_billing_address_from_request_with_preset_and_user(self):
setattr(self.request, 'user', self.user)
assign_address_to_request(self.request, self.address, shipping=False)
res = get_billing_address_from_request(self.request)
self.assertEqual(res, self.address)
def test_get_billing_address_from_request_with_preset_and_session(self):
assign_address_to_request(self.request, self.address, shipping=False)
res = get_billing_address_from_request(self.request)
self.assertEqual(res, self.address)
|
NoMod-Programming/PyRobotC
|
examples/InTheZone/FantasticBox - Tank Control.py
|
Python
|
mit
| 3,175
| 0.013543
|
#region config
vex.pragma(config, I2C_Usage, I2C1, i2cSensors)
vex.pragma(config, Sensor, in1, leftLight, sensorLineFollower)
vex.pragma(config, Sensor, in2, middleLight, sensorLineFollower)
vex.pragma(config, Sensor, in3, rightLight, sensorLineFollower)
vex.pragma(config, Sensor, in4, wristPot, sensorPotentiometer)
vex.pragma(config, Sensor, in5, gyro, sensorGyro)
vex.pragma(config, Sensor, dgtl1, rightEncoder, sensorQuadEncoder)
vex.pragma(config, Sensor, dgtl3, leftEncoder, sensorQuadEncoder)
vex.pragma(config, Sensor, dgtl5, extensionEncoder, sensorQuadEncoder)
vex.pragma(config, Sensor, dgtl7, touchSensor, sensorTouch)
vex.pragma(config, Sensor, dgtl8, sonarSensor, sensorSONAR_cm)
vex.pragma(config, Sensor, dgtl11, armEncoder, sensorQuadEncoder)
vex.pragma(config, Sensor, I2C_1, _,sensorQuadEncoderOnI2CPort, _, AutoAssign)
vex.pragma(config, Sensor, I2C_2, _,sensorQuad
|
EncoderOnI2CPort, _, AutoAssign)
vex.pragma(config, Sensor, I2C_3, _,sensorQuadEncoderOnI2CPort, _, AutoAssign)
vex.pragma(c
|
onfig, Sensor, I2C_4, _,sensorQuadEncoderOnI2CPort, _, AutoAssign)
vex.pragma(config, Sensor, I2C_5, _,sensorQuadEncoderOnI2CPort, _, AutoAssign)
vex.pragma(config, Motor, port1, frontRightMotor, tmotorVex393_HBridge, openLoop, reversed)
vex.pragma(config, Motor, port2, rearRightMotor, tmotorVex393_MC29, openLoop, reversed, encoderPort, I2C_1)
vex.pragma(config, Motor, port3, frontLeftMotor, tmotorVex393_MC29, openLoop)
vex.pragma(config, Motor, port4, rearLeftMotor, tmotorVex393_MC29, openLoop, encoderPort, I2C_2)
vex.pragma(config, Motor, port6, clawMotor, tmotorVex393_MC29, openLoop)
vex.pragma(config, Motor, port7, armMotor, tmotorVex393_MC29, openLoop, encoderPort, I2C_3)
vex.pragma(config, Motor, port8, leftExtendMotor, tmotorVex393_MC29, openLoop, encoderPort, I2C_4)
vex.pragma(config, Motor, port9, rightExtendMotor, tmotorVex393_MC29, openLoop)
vex.pragma(config, Motor, port10, wristMotor, tmotorVex393_HBridge, openLoop, encoderPort, I2C_5)
#endregion config
import JoystickDriver.c
import autonRecorder.c
def threshold(number:int,minNumber:int=20) -> int:
"""Threshold a value to a minimum int"""
return number if abs(number) >= minNumber else 0
def main() -> task:
"""This is the main task."""
threshold:int = 10
while (True):
getJoystickSettings(joystick) # Update joystick in a loop
motor[frontRightMotor] = motor[rearRightMotor] = threshold(joystick.joy1_y2)
motor[frontLeftMotor] = motor[rearLeftMotor] = threshold(joystick.joy1_y1)
if joy1Btn(6):
motor[armMotor] = 127
elif joy1Btn(8):
motor[armMotor] = -63
else:
motor[armMotor] = 0
if joy1Btn(5):
motor[clawMotor] = 127
elif joy1Btn(7):
motor[clawMotor] = -127
else:
motor[clawMotor] = 0
if joy1Btn(4):
motor[leftExtenderMotor] = motor[rightExtenderMotor] = 127
elif joy1Btn(2):
motor[leftExtenderMotor] = motor[rightExtenderMotor] = -127
else:
motor[leftExtenderMotor] = motor[rightExtenderMotor] = 0
if joy1Btn(1):
motor[wristMotor] = 127
elif joy1Btn(3):
motor[wristMotor] = -127
else:
motor[wristMotor] = 0
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/boto/exception.py
|
Python
|
agpl-3.0
| 17,106
| 0.00152
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Exception classes - Subclassing allows you to check for specific errors
"""
import base64
import xml.sax
import boto
from boto import handler
from boto.compat import json, six, StandardError
from boto.resultset import ResultSet
class BotoClientError(StandardError):
"""
General Boto Client error (error accessing AWS)
"""
def __init__(self, reason, *args):
super(BotoClientError, self).__init__(reason, *args)
self.reason = reason
def __repr__(self):
return 'BotoClientError: %s' % self.reason
def __str__(self):
return 'BotoClientError: %s' % self.reason
class SDBPersistenceError(StandardError):
pass
class StoragePermissionsError(BotoClientError):
"""
Permissions error when accessing a bucket or key on a storage service.
"""
pass
class S3PermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on S3.
"""
pass
class GSPermissionsError(StoragePermissionsError):
"""
Permissions error when accessing a bucket or key on GS.
"""
pass
class BotoServerError(StandardError):
def __init__(self, status, reason, body=None, *args):
super(BotoServerError, self).__init__(status, reason, body, *args)
self.status = status
self.reason = reason
self.body = body or ''
self.request_id = None
self.error_code = None
self._error_message = None
self.message = ''
self.box_usage = None
if isinstance(self.body, bytes):
try:
self.body = self.body.decode('utf-8')
except UnicodeDecodeError:
boto.log.debug('Unable to decode body from bytes!')
# Attempt to parse the erro
|
r response. If body isn't present,
# then just ignore the error response.
if self.body:
# Check if it looks like a ``dict``.
if hasattr(self.body, 'items'):
# It's not a string, so trying to parse it will fail.
# But since it's data, we can work with that.
self.request_id = self.body.get('Req
|
uestId', None)
if 'Error' in self.body:
# XML-style
error = self.body.get('Error', {})
self.error_code = error.get('Code', None)
self.message = error.get('Message', None)
else:
# JSON-style.
self.message = self.body.get('message', None)
else:
try:
h = handler.XmlHandlerWrapper(self, self)
h.parseString(self.body)
except (TypeError, xml.sax.SAXParseException) as pe:
# What if it's JSON? Let's try that.
try:
parsed = json.loads(self.body)
if 'RequestId' in parsed:
self.request_id = parsed['RequestId']
if 'Error' in parsed:
if 'Code' in parsed['Error']:
self.error_code = parsed['Error']['Code']
if 'Message' in parsed['Error']:
self.message = parsed['Error']['Message']
except (TypeError, ValueError):
# Remove unparsable message body so we don't include garbage
# in exception. But first, save self.body in self.error_message
# because occasionally we get error messages from Eucalyptus
# that are just text strings that we want to preserve.
self.message = self.body
self.body = None
def __getattr__(self, name):
if name == 'error_message':
return self.message
if name == 'code':
return self.error_code
raise AttributeError
def __setattr__(self, name, value):
if name == 'error_message':
self.message = value
else:
super(BotoServerError, self).__setattr__(name, value)
def __repr__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def __str__(self):
return '%s: %s %s\n%s' % (self.__class__.__name__,
self.status, self.reason, self.body)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in ('RequestId', 'RequestID'):
self.request_id = value
elif name == 'Code':
self.error_code = value
elif name == 'Message':
self.message = value
elif name == 'BoxUsage':
self.box_usage = value
return None
def _cleanupParsedProperties(self):
self.request_id = None
self.error_code = None
self.message = None
self.box_usage = None
class ConsoleOutput(object):
def __init__(self, parent=None):
self.parent = parent
self.instance_id = None
self.timestamp = None
self.comment = None
self.output = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.instance_id = value
elif name == 'output':
self.output = base64.b64decode(value)
else:
setattr(self, name, value)
class StorageCreateError(BotoServerError):
"""
Error creating a bucket or key on a storage service.
"""
def __init__(self, status, reason, body=None):
self.bucket = None
super(StorageCreateError, self).__init__(status, reason, body)
def endElement(self, name, value, connection):
if name == 'BucketName':
self.bucket = value
else:
return super(StorageCreateError, self).endElement(name, value, connection)
class S3CreateError(StorageCreateError):
"""
Error creating a bucket or key on S3.
"""
pass
class GSCreateError(StorageCreateError):
"""
Error creating a bucket or key on GS.
"""
pass
class StorageCopyError(BotoServerError):
"""
Error copying a key on a storage service.
"""
pass
class S3CopyError(StorageCopyError):
"""
Error copying a key on S3.
"""
pass
class GSCopyError(StorageCopyError):
"""
Error copying a key on GS.
"""
pass
class SQSError(BotoServerError):
"""
General Error on Simple Queue Service.
"""
def __init__(self, status, reason, body=None):
self.detail = None
self.type = None
super(SQSError, self).__init__(status, reason, body)
def startElement(self, name, attrs, connection):
return super(SQSError, self).startElement(name, attrs, co
|
apache/climate
|
obs4MIPs/obs4MIPs_process.py
|
Python
|
apache-2.0
| 19,825
| 0.015687
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import cdms2
import cdtime
import cmor
import sys
import getopt
import factory
import numpy
from factory.formats import import_equation
from Toolbox.ESGFresources import *
from Toolbox.ESGFexcel import *
from Toolbox.CMORresources import CMORTable
# ************************************************************************
# process()
#
# Convert to obs4MIPS file format.
# ************************************************************************
def process( rc ):
'''
Convert netcdf/matlab/grads files into CMIP5 format.
'''
pdb.set_trace()
# ----------------------------
# Loop yearly on file list.
# ----------------------------
file_template = rc['file_template'].split(",");
if( len(file_template) == 2 ):
template_parameter = file_template[1]
rc['file_template'] = file_template[0]
else:
template_parameter = 'years'
for year in rc[template_parameter].split(","):
if(year == ''):
files= os.popen( "ls " + rc['file_template'] ).readlines()
else:
# ------------------------------------------------
# Use string formating for path with same argument
# ------------------------------------------------
try:
tmplFile = rc['file_template'] % (year)
except:
tmplFile = rc['file_template'].format(year)
if( not os.path.isfile( tmplFile) ) :
print("**** Warning {} not found\n".format(tmplFile))
continue
files= os.popen( "ls " + tmplFile).readlines()
if( files == [] ):
print("No file found: Check your resource file")
return -1
# ------------------------------------------------
# Get the right handler to manage this file format
# ------------------------------------------------
Handler = factory.HandlerFormats(files[0].strip())
# -----------------------------------
# Take care of cmor initialization.
# -----------------------------------
cmor.setup(inpath=rc['inpath'],
netcdf_file_action = cmor.CMOR_REPLACE)
cmor.dataset(experiment_id = rc['experiment_id'],
institution = rc['institution' ],
calendar = rc['calendar' ],
institute_id = rc['institute_id' ],
model_id = rc['model_id' ],
source = rc['source' ],
contact = rc['contact' ],
references = rc['references' ])
# -----------------------------------------
# add extra Global Attributes for obs4MIPs.
# -----------------------------------------
cmor.set_cur_dataset_attribute( 'instrument', rc['instrument' ])
cmor.set_cur_dataset_attribute( 'mip_specs', rc['mip_specs' ])
cmor.set_cur_dataset_attribute( 'data_structure', rc['
|
data_structure'])
cmor.set_cur_dataset_attribute( 'source_type', rc['source_type' ])
cmor.set_cur_dataset_attribute( 'source_id'
|
, rc['source_id' ])
cmor.set_cur_dataset_attribute( 'realm', rc['realm' ])
cmor.set_cur_dataset_attribute( 'obs_project', rc['obs_project' ])
cmor.set_cur_dataset_attribute( 'processing_version',
rc['processing_version'] )
cmor.set_cur_dataset_attribute( 'processing_level',
rc['processing_level'] )
cmor.load_table(rc['table'])
# ---------------------------------------------------------------------
# We loop on each file found, a new cmor file will be create on each
# iteration. If you want to aggregate, you need to use Grads ctl file
# or NeCDF list of file.
# ---------------------------------------------------------------------
for file in files:
fnm=file.strip() # Get rid of \n
aVariable = eval(rc['original_var'])
nbVariable = len(aVariable)
# -----------------------------------------------------
# ECMWF needs synoptic time 00z and 12z in he filename.
# We extract it from the first file in the list.
# -----------------------------------------------------
if( rc['source_fn'] == 'SYNOPTIC' ):
index = fnm.find('z.')
rc['SYNOPTIC'] = fnm[index-2:index]
# -----------------------
# Work on all variables
# -------------------------
for j in arange(nbVariable):
# ----------------------------------------------------
# Fetch the variable directly or excute equation.
# ----------------------------------------------------
try:
variable=aVariable[j]
Handler.open(fnm, variable=variable)
rc['cvrt_original_var'] = aVariable[j]
print("Working on variable {} ".format(variable))
except:
if( aVariable[j] != 'equation' ) :
print("Variable {} can't open".format(variable))
continue
else:
print("Executing {} ".format(eval(rc['equation'])[j]))
# pdb.set_trace()
rc['cvrt_original_units'] = eval(rc['original_units'])[j]
rc['cvrt_cmor_var'] = eval(rc['cmor_var'])[j]
rc['cvrt_equation'] = eval(rc['equation'])[j]
rc['cvrt_level'] = eval(rc['level'])[j]
data=Handler.getData()
# ----------------------------------------------------------
# Evaluate equation if needed. Usually used to change units
# ----------------------------------------------------------
if( rc['cvrt_equation'][0] == '@' ):
fncName = rc['cvrt_equation'][1:]
fnc = import_equation( "equations.%s" % fncName )
data[:]= fnc(Handler)
else:
data[:]=eval(rc['cvrt_equation'])
# -------------------------------------------------------------
# Save filled value in case data type is changed in createAxes
# -------------------------------------------------------------
fill_value = data.fill_value
# ---------------------------------------------
# Extract latitude/longitude
# ---------------------------------------------
lonvals=Handler.getLongitude()
latvals=Handler.getLatitude()
# ---------------------
# Create cmor time axis
# ----------------------
(rel_time, rel_time_bnds) = createTime(Handler, rc)
|
SoftwareHeritage/swh-web-ui
|
swh/web/tests/api/views/test_release.py
|
Python
|
agpl-3.0
| 3,819
| 0.000786
|
# Copyright (C) 2015-2019 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from datetime import datetime
from hypothesis import given
from swh.model.hashutil import hash_to_bytes, hash_to_hex
from swh.model.model import (
ObjectType,
Person,
Release,
Timestamp,
TimestampWithTimezone,
)
from swh.web.common.utils import reverse
from swh.web.tests.data import random_sha1
from swh.web.tests.strategies import content, directory, release
from swh.web.tests.utils import check_api_get_responses, check_http_get_response
@given(release())
def test_api_release(api_client, archive_data, release):
url = reverse("api-1-release", url_args={"sha1_git": release})
rv = check_api_get_responses(api_client, url, status_code=200)
expected_release = archive_data.release_get(release)
target_revision = expected_release["target"]
target_url = reverse(
"api-1-revision",
url_args={"sha1_git": target_revision},
request=rv.wsgi_request,
)
expected_release["target_url"] = target_url
assert rv.data == expected_release
@given(content(), directory(), release())
def test_api_release_target_type_not_a_revision(
api_client, archive_data, content, directory, release
):
for target_type, target in (
(ObjectType.CONTENT, content),
(ObjectType.DIRECTORY, directory),
(ObjectType.RELEASE, release),
):
if target_type == ObjectType.CONTENT:
target = target["sha1_git"]
sample_release = Release(
author=Person(
email=b"author@company.org",
fullname=b"author <author@company.org>",
name=b"author",
),
date=TimestampWithTimezone(
timestamp=Timestamp(
seconds=int(datetime.now().timestamp()), microseconds
|
=0
),
offset=0,
negative_utc=False,
),
message=b"sample release message",
name=b"sample release",
synthetic=False,
target=hash_to_bytes(target),
target_type=target_type,
)
archive_data.release_add([sample_release])
new_release_id = hash_to_hex(sample_release.id)
url = reverse("api-1-release", url_args={"sha1_git": new_release_id})
rv = chec
|
k_api_get_responses(api_client, url, status_code=200)
expected_release = archive_data.release_get(new_release_id)
if target_type == ObjectType.CONTENT:
url_args = {"q": "sha1_git:%s" % target}
else:
url_args = {"sha1_git": target}
target_url = reverse(
"api-1-%s" % target_type.value, url_args=url_args, request=rv.wsgi_request
)
expected_release["target_url"] = target_url
assert rv.data == expected_release
def test_api_release_not_found(api_client):
unknown_release_ = random_sha1()
url = reverse("api-1-release", url_args={"sha1_git": unknown_release_})
rv = check_api_get_responses(api_client, url, status_code=404)
assert rv.data == {
"exception": "NotFoundExc",
"reason": "Release with sha1_git %s not found." % unknown_release_,
}
@given(release())
def test_api_release_uppercase(api_client, release):
url = reverse(
"api-1-release-uppercase-checksum", url_args={"sha1_git": release.upper()}
)
resp = check_http_get_response(api_client, url, status_code=302)
redirect_url = reverse(
"api-1-release-uppercase-checksum", url_args={"sha1_git": release}
)
assert resp["location"] == redirect_url
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/File.py
|
Python
|
gpl-2.0
| 4,640
| 0.003017
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code for more fancy file handles.
Classes:
UndoHandle File object decorator with support for undo-like operations.
StringHandle Wraps a file object around a string.
SGMLStripper Object that strips SGML. This is now considered OBSOLETE, and
is likely to be deprecated in a future release of Biopython,
and later removed.
"""
import StringIO
class UndoHandle:
"""A Python handle that adds functionality for saving lines.
Saves lines in a LIFO fashion.
Added methods:
saveline Save a line to be returned next time.
peekline Peek at the next line without consuming it.
"""
def __init__(self, handle):
self._handle = handle
self._saved = []
def __iter__(self):
return self
def next(self):
next = self.readline()
if not next:
raise StopIteration
return next
def readlines(self, *args, **keywds):
lines = self._saved + self._handle.readlines(*args,**keywds)
self._saved = []
return lines
def readline(self, *args, **keywds):
if self._saved:
line = self._saved.pop(0)
else:
line = self._handle.readline(*args,**keywds)
return line
def read(self, size=-1):
if size == -1:
saved = "".join(self._saved)
self._saved[:] = []
else:
saved = ''
while size > 0 and self._saved:
if len(self._saved[0]) <= size:
size = size - len(self._saved[0])
saved = saved + self._saved.pop(0)
else:
saved = saved + self._saved[0][:size]
self._saved[0] = self._saved[0][size:]
size = 0
return saved + self._handle.read(size)
def saveline(self, line):
if line:
self._saved = [line] + self._saved
def peekline(self):
if self._saved:
line = self._saved[0]
else:
line = self._handle.readline()
self.saveline(line)
return line
def tell(self):
lengths = map(len, self._saved)
sum = reduce(lambda x, y: x+y, lengths, 0)
return self._handle.tell() - sum
def seek(self, *args):
self._saved = []
self._handle.seek(*args)
def __getattr__(self, attr):
return getattr(self._handle, attr)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._handle.close()
# I could make this faster by using cStringIO.
# However, cStringIO (in v1.52) does not implement the
# readlines method.
StringHandle = StringIO.StringIO
try:
import sgmllib
except ImportError:
#This isn't available on Python 3, but we don't care much as SGMLStripper
#is obsolete
pass
else:
class SGMLStripper:
"""Object to strip SGML tags (OBSOLETE)."""
class MyParser(sgmllib.SGMLParser):
def __init__(self):
sgmllib.SGMLParser.__init__(self)
self.data = ''
def handle_data(self, data):
self.data = self.data + data
def __init__(self):
import warnings
warnings.warn("This class is obsolete, and likely to be deprecated and later removed in a future version of Biopython", PendingDeprecationWarning)
self._parser = SGMLStripper.MyParser()
def strip(self, str):
"""S.strip(str) -> string
|
Strip the SGML tags from str.
"""
if not str: # empty string, don't do anything.
return ''
# I need to make sure that I don't return an empty string if
# the buffer is not empty. This can happen if there's a newline
# character embedded within a tag. Thus, I'll first check to
# see if the last character is a newline. If it is, and it's stripped
# away, I'll add it back.
i
|
s_newline = str[-1] in ['\n', '\r']
self._parser.data = '' # clear the parser's data (don't reset)
self._parser.feed(str)
if self._parser.data:
str = self._parser.data
elif is_newline:
str = '\n'
else:
str = ''
return str
|
zwadar/pyqode.core
|
examples/modes/right_margin.py
|
Python
|
mit
| 640
| 0.007813
|
"""
Minimal example showing the use of the AutoCompleteMode.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
from pyqode.qt import QtWidgets
from pyqode.cor
|
e.api import CodeEdit
from pyqode.core.backend import server
from pyqode.core.modes import RightMarginMode
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
editor = CodeEdit()
editor.backend.start(server.__file__)
editor.resize(800, 600)
margin = editor.modes.append(RightMarginMode())
margin.position = 4
editor.file.open(__file__)
editor.show()
app.exec_()
|
editor.close()
del editor
del app
|
ubirch/aws-tools
|
virtual-env/lib/python2.7/site-packages/boto/sqs/message.py
|
Python
|
apache-2.0
| 9,892
| 0.002932
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
SQS Message
A Message represents the data stored in an SQS queue. The rules for what is allowed within an SQS
Message are here:
http://docs.amazonwebservices.com/AWSSimpleQueueService/2008-01-01/SQSDeveloperGuide/Query_QuerySendMessage.html
So, at it's simplest level a Message just needs to allow a developer to store bytes in it and get the bytes
back out. However, to allow messages to have richer semantics, the Message class must support the
following interfaces:
The constructor for the Message class must accept a keyword parameter "queue" which is an instance of a
boto Queue object and represents the queue that the message will be stored in. The default value for
this parameter is None.
The constructor for the Message class must accept a keyword parameter "body" which represents the
content or body of the message. The format of this parameter will depend on the behavior of the
particular Message subclass. For example, if the Message subclass provides dictionary-like behavior to the
user the body passed to the constructor should be a dict-like object that can be used to populate
the initial state of the message.
The Message class must provide
|
an encode method that accepts a value of the same type as the body
parameter of the constructor and returns a string of characters that are able to be stored in an
SQS message body (see rules above).
The Message class must provide a decode method that accepts a string of characters that c
|
an be
stored (and probably were stored!) in an SQS message and return an object of a type that is consistent
with the "body" parameter accepted on the class constructor.
The Message class must provide a __len__ method that will return the size of the encoded message
that would be stored in SQS based on the current state of the Message object.
The Message class must provide a get_body method that will return the body of the message in the
same format accepted in the constructor of the class.
The Message class must provide a set_body method that accepts a message body in the same format
accepted by the constructor of the class. This method should alter to the internal state of the
Message object to reflect the state represented in the message body parameter.
The Message class must provide a get_body_encoded method that returns the current body of the message
in the format in which it would be stored in SQS.
"""
import base64
import boto
from boto.compat import StringIO
from boto.compat import six
from boto.sqs.attributes import Attributes
from boto.sqs.messageattributes import MessageAttributes
from boto.exception import SQSDecodeError
class RawMessage(object):
"""
Base class for SQS messages. RawMessage does not encode the message
in any way. Whatever you store in the body of the message is what
will be written to SQS and whatever is returned from SQS is stored
directly into the body of the message.
"""
def __init__(self, queue=None, body=''):
self.queue = queue
self.set_body(body)
self.id = None
self.receipt_handle = None
self.md5 = None
self.attributes = Attributes(self)
self.message_attributes = MessageAttributes(self)
self.md5_message_attributes = None
def __len__(self):
return len(self.encode(self._body))
def startElement(self, name, attrs, connection):
if name == 'Attribute':
return self.attributes
if name == 'MessageAttribute':
return self.message_attributes
return None
def endElement(self, name, value, connection):
if name == 'Body':
self.set_body(value)
elif name == 'MessageId':
self.id = value
elif name == 'ReceiptHandle':
self.receipt_handle = value
elif name == 'MD5OfBody':
self.md5 = value
elif name == 'MD5OfMessageAttributes':
self.md5_message_attributes = value
else:
setattr(self, name, value)
def endNode(self, connection):
self.set_body(self.decode(self.get_body()))
def encode(self, value):
"""Transform body object into serialized byte array format."""
return value
def decode(self, value):
"""Transform seralized byte array into any object."""
return value
def set_body(self, body):
"""Override the current body for this object, using decoded format."""
self._body = body
def get_body(self):
return self._body
def get_body_encoded(self):
"""
This method is really a semi-private method used by the Queue.write
method when writing the contents of the message to SQS.
You probably shouldn't need to call this method in the normal course of events.
"""
return self.encode(self.get_body())
def delete(self):
if self.queue:
return self.queue.delete_message(self)
def change_visibility(self, visibility_timeout):
if self.queue:
self.queue.connection.change_message_visibility(self.queue,
self.receipt_handle,
visibility_timeout)
class Message(RawMessage):
"""
The default Message class used for SQS queues. This class automatically
encodes/decodes the message body using Base64 encoding to avoid any
illegal characters in the message body. See:
https://forums.aws.amazon.com/thread.jspa?threadID=13067
for details on why this is a good idea. The encode/decode is meant to
be transparent to the end-user.
"""
def encode(self, value):
if not isinstance(value, six.binary_type):
value = value.encode('utf-8')
return base64.b64encode(value).decode('utf-8')
def decode(self, value):
try:
value = base64.b64decode(value.encode('utf-8')).decode('utf-8')
except:
boto.log.warning('Unable to decode message')
return value
return value
class MHMessage(Message):
"""
The MHMessage class provides a message that provides RFC821-like
headers like this:
HeaderName: HeaderValue
The encoding/decoding of this is handled automatically and after
the message body has been read, the message instance can be treated
like a mapping object, i.e. m['HeaderName'] would return 'HeaderValue'.
"""
def __init__(self, queue=None, body=None, xml_attrs=None):
if body is None or body == '':
body = {}
super(MHMessage, self).__init__(queue, body)
def decode(self, value):
try:
msg = {}
fp = StringIO(value)
line = fp.readline()
while line:
delim = line.find(':')
key = line[0:delim]
value = line[delim+1:].strip()
msg[key.strip()] = value.strip()
line = fp.readline()
except:
raise SQSDecodeError('Unable to de
|
rampantpixels/rpmalloc-benchmark
|
configure.py
|
Python
|
unlicense
| 14,222
| 0.037126
|
#!/usr/bin/env python
"""Ninja build configurator for foundation library"""
import sys
import os
import copy
sys.path.insert(0, os.path.join('build', 'ninja'))
import generator
generator = generator.Generator(project = 'rpmalloc', variables = {'bundleidentifier': 'com.rampantpixels.rpmalloc.$(binname)', 'nowarning': True})
target = generator.target
writer = generator.writer
toolchain = generator.toolchain
variables = {'defines': ['NDEBUG=1'], 'cflags': ['-fno-builtin-malloc']}
def merge_variables(a, b):
merged = copy.deepcopy(a)
for k, v in b.items():
if k in merged:
merged[k] = list(merged[k]) + list(v)
else:
merged[k] = v
return merged
includepaths = ['test', 'benchmark']
test_lib = generator.lib(module = 'test', sources = ['thread.c', 'timer.c'], includepaths = includepaths, variables = variables)
benchmark_lib = generator.lib(module = 'benchmark', sources = ['main.c'], includepaths = includepaths, variables = variables)
#Build one binary per benchmark
generator.bin(module = 'rpmalloc', sources = ['benchmark.c', 'rpmalloc.c'], binname = 'benchmark-rpmalloc', basepath = 'benchmark', implicit_deps = [benchmark_lib, test_lib], libs = ['benchmark', 'test'], includepaths = includepaths, variables = variables)
if target.is_android():
resources = [os.path.join('all', 'android', item) for item in [
'AndroidManifest.xml', os.path.join('layout', 'main.xml'), os.path.join('values', 'strings.xml'),
os.path.join('drawable-ldpi', 'icon.png'), os.path.join('drawable-mdpi', 'icon.png'), os.path.join('drawable-hdpi', 'icon.png'),
os.path.join('drawable-xhdpi', 'icon.png'), os.path.join('drawable-xxhdpi', 'icon.png'), os.path.join('drawable-xxxhdpi', 'icon.png')
]]
appsources = [os.path.join('test', 'all', 'android', 'java', 'com', 'rampantpixels', 'foundation', 'test', item) for item in [
'TestActivity.java'
]]
generator.app(module = '', sources = appsources, binname = 'benchmark-rpmalloc', basepath = '', implicit_deps = [benchmark_lib, test_lib], libs = ['benchmark', 'test'], resources = resources, includepaths = includepaths, variables = variables)
generator.bin(module = 'crt', sources = ['benchmark.c'], binname = 'benchmark-crt', basepath = 'benchmark', implicit_deps = [benchmark_lib, test_lib], libs = ['benchmark', 'test'], includepaths = includepaths, variables = {'defines': ['NDEBUG=1']})
if not target.is_android():
generator.bin(module = 'nedmalloc', sources = ['benchmark.c', 'nedmalloc.c'], binname = 'benchmark-nedmalloc', basepath = 'benchmark', implicit_deps = [benchmark_lib, test_lib], libs = ['benchmark', 'test'], includepaths = includepaths, variables = variables)
platform_includepaths = [os.path.join('benchmark', 'ptmalloc3')]
if target.is_windows():
platform_includepaths += [os.path.join('benchmark', 'ptmalloc3', 'sysdeps', 'windows')]
else:
platform_includepaths += [os.path.join('benchmark', 'ptmalloc3', 'sysdeps', 'pthread')]
if not target.is_android():
generator.bin(module = 'ptmalloc3', sources = ['benchmark.c', 'ptmalloc3.c', 'malloc.c'], binname = 'benchmark-ptmalloc3', basepath = 'benchmark', implicit_deps = [benchmark_lib, test_lib], libs = ['benchmark', 'test'], includepaths = includepaths + platform_includepaths, variables = variables)
hoardincludepaths = [
os.path.join('benchmark', 'hoard', 'include'),
os.path.join('benchmark', 'hoard', 'include', 'hoard'),
os.path.join('benchmark', 'hoard', 'include', 'util'),
os.path.join('benchmark', 'hoard', 'include', 'superblocks'),
os.path.join('benchmark', 'hoard'),
os.path.join('benchmark', 'hoard', 'Heap-Layers')
]
hoardsources = ['source/libhoard.cpp']
if target.is_macos() or target.is_ios():
hoardsources += ['Heap-Layers/wrappers/macwrapper.cpp']
elif target.is_windows():
hoardsources += ['Heap-Layers/wrappers/winwrapper.cpp']
else:
hoardsources += ['Heap-Layers/wrappers/gnuwrapper.cpp']
if target.is_macos() or target.is_ios():
hoardsources += ['source/mactls.cpp']
elif target.is_windows():
hoardsources += ['source/wintls.cpp']
else:
hoardsources += ['source/unixtls.cpp']
if not target.is_android():
hoard_variables = merge_variables({'runtime': 'c++'}, variables)
hoard_lib = generator.lib(module = 'hoard', sources = hoardsources, basepath = 'benchmark', includepaths = includepaths + hoardincludepaths, variables = hoard_variables)
hoard_depend_libs = ['hoard', 'be
|
nchmark', 'test']
generator.bin(module = 'hoard', sources = ['benchmark.c'], binname = 'benchmark-hoard', basepath = 'benchmark', implicit_deps = [hoard_lib, benchmark_lib, test_lib], libs = hoard_depend_libs, includepaths = includepaths, variables = hoard_variables)
gperftoolsincludepaths = [
os.path.join('benchmark', 'gperftools', 'src'),
os.path.join('benchmark',
|
'gperftools', 'src', 'base'),
os.path.join('benchmark', 'gperftools', 'src', target.get())
]
gperftoolsbasesources = [
'dynamic_annotations.c', 'linuxthreads.cc', 'logging.cc', 'low_level_alloc.cc', 'spinlock.cc',
'spinlock_internal.cc', 'sysinfo.cc'
]
if not target.is_windows():
gperftoolsbasesources += ['thread_lister.c']
gperftoolsbasesources = [os.path.join('src', 'base', path) for path in gperftoolsbasesources]
gperftoolssources = [
'central_freelist.cc', 'common.cc', 'internal_logging.cc',
'malloc_extension.cc', 'malloc_hook.cc', 'memfs_malloc.cc',
'page_heap.cc', 'sampler.cc', 'stack_trace_table.cc',
'static_vars.cc', 'span.cc', 'symbolize.cc', 'tcmalloc.cc', 'thread_cache.cc'
]
if not target.is_windows():
gperftoolssources += ['maybe_threads.cc', 'system-alloc.cc']
if target.is_windows():
gperftoolssources += [os.path.join('windows', 'port.cc'), os.path.join('windows', 'system-alloc.cc')]
gperftoolssources = [os.path.join('src', path) for path in gperftoolssources]
if not target.is_android():
gperf_variables = merge_variables({'runtime': 'c++', 'defines': ['NO_TCMALLOC_SAMPLES', 'NO_HEAP_CHECK'], 'nowarning': True}, variables)
gperftools_lib = generator.lib(module = 'gperftools', sources = gperftoolsbasesources + gperftoolssources, basepath = 'benchmark', includepaths = includepaths + gperftoolsincludepaths, variables = gperf_variables)
gperftools_depend_libs = ['gperftools', 'benchmark', 'test']
generator.bin(module = 'gperftools', sources = ['benchmark.c'], binname = 'benchmark-tcmalloc', basepath = 'benchmark', implicit_deps = [gperftools_lib, benchmark_lib, test_lib], libs = gperftools_depend_libs, includepaths = includepaths, variables = gperf_variables)
jemallocincludepaths = [
os.path.join('benchmark', 'jemalloc', 'include'),
os.path.join('benchmark', 'jemalloc', 'include', 'jemalloc'),
os.path.join('benchmark', 'jemalloc', 'include', 'jemalloc', 'internal')
]
jemallocsources = [
'arena.c', 'background_thread.c', 'base.c', 'bin.c', 'bitmap.c', 'ckh.c', 'ctl.c', 'div.c', 'extent.c',
'extent_dss.c', 'extent_mmap.c', 'hash.c', 'hook.c', 'jemalloc.c', 'large.c', 'log.c', 'malloc_io.c',
'mutex.c', 'mutex_pool.c', 'nstime.c', 'pages.c', 'prng.c', 'prof.c', 'rtree.c', 'safety_check.c',
'sc.c', 'stats.c', 'sz.c', 'tcache.c', 'test_hooks.c', 'ticker.c', 'tsd.c', 'witness.c'
]
jemallocsources = [os.path.join('src', path) for path in jemallocsources]
if not target.is_windows() and not target.is_android():
je_variables = merge_variables({'defines': ['JEMALLOC_NO_RENAME']}, variables)
jemalloc_lib = generator.lib(module = 'jemalloc', sources = jemallocsources, basepath = 'benchmark', includepaths = includepaths + jemallocincludepaths, variables = je_variables)
jemalloc_depend_libs = ['jemalloc', 'benchmark', 'test']
generator.bin(module = 'jemalloc', sources = ['benchmark.c'], binname = 'benchmark-jemalloc', basepath = 'benchmark', implicit_deps = [jemalloc_lib, benchmark_lib, test_lib], libs = jemalloc_depend_libs, includepaths = includepaths, variables = je_variables)
snmallocincludepaths = [
os.path.join('benchmark', 'snmalloc', 'src'),
]
snmallocsources = [os.path.join('src', 'override', 'malloc.cc')]
snvariables = merge_variables({'defines': ['SNMALLOC_STATIC_LIBRARY=1', 'SNMALLOC_STATIC_LIBRARY_PREFIX=sn_'], 'cflags': ['-mcx16'], 'runtime': 'c++'}, variables)
snmalloc_lib = generator.lib(module = 'snmalloc', sources = snmallocsources, basepath = 'benc
|
acutesoftware/virtual-AI-simulator
|
vais/z_prototypes/game_rpg_simulation1.py
|
Python
|
mit
| 3,330
| 0.007808
|
# game_rpg_simulation.py written by Duncan Murray 30/3/2015
import os
import random
def main():
"""
Prototype to see how an RPG simulation might be used
in the AIKIF framework.
The idea is to build a simple character and run a simulation
to see how it succeeds in a random world against another players
character
character
stats
world
locations
"""
character1 = Character('Albogh', str=4,int=7,sta=50)
character2 = Character('Zoltor', str=6,int=6,sta=70)
print('PLAYER1 [start]:', character1)
print('PLAYER2 [start]:', character2)
b = Battle(character1, character2)
print(b)
print('PLAYER1 [end]:', character1)
print('PLAYER2 [end]:', character2)
class Character():
"""
Character class to manage how a character is built
"""
def __init__(self, name, str=5, int=5, sta=5):
self.name = name
self.str = str
self.int = int
self.sta = sta
self.status = 'Alive'
self.exp = 0
self.gold = 10
self.backpack = ['torch', 'appl
|
e']
def __str__(self):
res = ''
res += 'Character : ' + self.name + '\n'
res += 'Statistics : STA=' + str(self.sta) + '
|
, INT=' + str(self.int) + ', STR=' + str(self.str) + '\n'
res += 'Status : ' + self.status + '\n'
res += 'Carrying : '
for i in self.backpack:
res += i + ', '
res += str(self.gold) + ' Gold'
return res
class Battle():
"""
manages a fight between 2 rpg characters
"""
def __init__(self, char1, char2):
self.c1 = char1
self.c2 = char2
self.status = 'Start...'
self.fight()
def __str__(self):
res = 'Battle Status : ' + self.status + '\n'
res += 'Character 1 = ' + self.c1.name + '\n'
res += 'Character 2 = ' + self.c2.name + '\n'
return res
def fight(self, moves=10):
"""
runs a series of fights
"""
for i in range(1, moves):
# player 1
result, dmg = self.calc_move(self.c1, self.c2)
print (self.c1.name + ' ' + result + ' for ' + str(dmg))
self.c1.sta = self.c1.sta - dmg
if self.is_character_dead(self.c1):
print(self.c1.name + ' has died')
return
# player 2
result, dmg = self.calc_move(self.c2, self.c1)
print (self.c2.name + ' ' + result + ' for ' + str(dmg))
self.c2.sta = self.c2.sta - dmg
if self.is_character_dead(self.c2):
print(self.c2.name + ' has died')
return
def calc_move(self, c1, c2):
chance_hit = random.randint(2,c1.int)
amount_dmg = random.randint(2,c1.str+3) * (c1.int/2)
# print('chance_hit =',chance_hit , 'amount_dmg = ',amount_dmg )
if chance_hit > 6:
return 'Crit', amount_dmg
elif chance_hit < 3:
return 'Miss', 0
else:
return 'Hit', amount_dmg
def is_character_dead(self, c):
"""
check to see if a character is dead
"""
if c.sta < 1:
return True
else:
return False
main()
|
johnctitus/troposphere
|
examples/VPC_With_VPN_Connection.py
|
Python
|
bsd-2-clause
| 6,123
| 0
|
# Converted from VPC_With_VPN_Connection.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
from troposphere import Join, Output
from troposphere import Parameter, Ref, Tags, Template
from troposphere.ec2 import PortRange
from troposphere.ec2 import NetworkAcl
from troposphere.ec2 import Route
from troposphere.ec2 import VPCGatewayAttachment
from troposphere.ec2 import SubnetRouteTableAssociation
from troposphere.ec2 import Subnet
from troposphere.ec2 import CustomerGateway
from troposphere.ec2 import VPNConnectionRoute
from troposphere.ec2 import RouteTable
from troposphere.ec2 import VPC
from troposphere.ec2 import NetworkAclEntry
from troposphere.ec2 import VPNGateway
from troposphere.ec2 import SubnetNetworkAclAssociation
from troposphere.ec2 import VPNConnection
t = Template()
t.add_version("2010-09-09")
t.add_description("""\
AWS CloudFormation Sample Template VPC_With_VPN_Connection.template: \
Sample template showing how to create a private subnet with a VPN connection \
using static routing to an existing VPN endpoint. NOTE: The VPNConnection \
created will define the configuration you need yonk the tunnels to your VPN \
endpoint - you can get the VPN Gateway configuration from the AWS Management \
console. You will be billed for the AWS resources used if you creat
|
e a stack \
from this template.""")
VPNAddress = t.add_parameter(Parameter(
"VPNAddress",
Type="String",
Description="IP Address of your VPN device",
|
MinLength="7",
AllowedPattern=r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})",
MaxLength="15",
ConstraintDescription="must be a valid IP address of the form x.x.x.x",
))
OnPremiseCIDR = t.add_parameter(Parameter(
"OnPremiseCIDR",
ConstraintDescription=(
"must be a valid IP CIDR range of the form x.x.x.x/x."),
Description="IP Address range for your existing infrastructure",
Default="10.0.0.0/16",
MinLength="9",
AllowedPattern=r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})",
MaxLength="18",
Type="String",
))
VPCCIDR = t.add_parameter(Parameter(
"VPCCIDR",
ConstraintDescription=(
"must be a valid IP CIDR range of the form x.x.x.x/x."),
Description="IP Address range for the VPN connected VPC",
Default="10.1.0.0/16",
MinLength="9",
AllowedPattern=r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})",
MaxLength="18",
Type="String",
))
SubnetCIDR = t.add_parameter(Parameter(
"SubnetCIDR",
ConstraintDescription=(
"must be a valid IP CIDR range of the form x.x.x.x/x."),
Description="IP Address range for the VPN connected Subnet",
Default="10.1.0.0/24",
MinLength="9",
AllowedPattern=r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})",
MaxLength="18",
Type="String",
))
PrivateNetworkAcl = t.add_resource(NetworkAcl(
"PrivateNetworkAcl",
VpcId=Ref("VPC"),
Tags=Tags(
Application=Ref("AWS::StackName"),
Network="Private",
)
))
PrivateRoute = t.add_resource(Route(
"PrivateRoute",
GatewayId=Ref("VPNGateway"),
DestinationCidrBlock="0.0.0.0/0",
RouteTableId=Ref("PrivateRouteTable"),
))
VPNGatewayAttachment = t.add_resource(VPCGatewayAttachment(
"VPNGatewayAttachment",
VpcId=Ref("VPC"),
VpnGatewayId=Ref("VPNGateway"),
))
PrivateSubnetRouteTableAssociation = t.add_resource(
SubnetRouteTableAssociation(
"PrivateSubnetRouteTableAssociation",
SubnetId=Ref("PrivateSubnet"),
RouteTableId=Ref("PrivateRouteTable"),
)
)
PrivateSubnet = t.add_resource(Subnet(
"PrivateSubnet",
VpcId=Ref("VPC"),
CidrBlock=Ref(SubnetCIDR),
Tags=Tags(
Application=Ref("AWS::StackName"),
Network="VPN Connected Subnet",
)
))
CustomerGateway = t.add_resource(CustomerGateway(
"CustomerGateway",
BgpAsn="65000",
IpAddress=Ref(VPNAddress),
Type="ipsec.1",
Tags=Tags(
Application=Ref("AWS::StackName"),
VPN=Join("", ["Gateway to ", Ref(VPNAddress)]),
)
))
VPNConnectionRoute = t.add_resource(VPNConnectionRoute(
"VPNConnectionRoute",
VpnConnectionId=Ref("VPNConnection"),
DestinationCidrBlock=Ref(OnPremiseCIDR),
))
PrivateRouteTable = t.add_resource(RouteTable(
"PrivateRouteTable",
VpcId=Ref("VPC"),
Tags=Tags(
Application=Ref("AWS::StackName"),
Network="VPN Connected Subnet",
)
))
VPC = t.add_resource(VPC(
"VPC",
EnableDnsSupport="true",
CidrBlock=Ref(VPCCIDR),
EnableDnsHostnames="true",
Tags=Tags(
Application=Ref("AWS::StackName"),
Network="VPN Connected VPC",
)
))
OutBoundPrivateNetworkAclEntry = t.add_resource(NetworkAclEntry(
"OutBoundPrivateNetworkAclEntry",
NetworkAclId=Ref(PrivateNetworkAcl),
RuleNumber="100",
Protocol="6",
PortRange=PortRange(To="65535", From="0"),
Egress="true",
RuleAction="allow",
CidrBlock="0.0.0.0/0",
))
VPNGateway = t.add_resource(VPNGateway(
"VPNGateway",
Type="ipsec.1",
Tags=Tags(
Application=Ref("AWS::StackName"),
)
))
PrivateSubnetNetworkAclAssociation = t.add_resource(
SubnetNetworkAclAssociation(
"PrivateSubnetNetworkAclAssociation",
SubnetId=Ref(PrivateSubnet),
NetworkAclId=Ref(PrivateNetworkAcl),
)
)
VPNConnection = t.add_resource(VPNConnection(
"VPNConnection",
CustomerGatewayId=Ref(CustomerGateway),
StaticRoutesOnly="true",
Type="ipsec.1",
VpnGatewayId=Ref(VPNGateway),
))
InboundPrivateNetworkAclEntry = t.add_resource(NetworkAclEntry(
"InboundPrivateNetworkAclEntry",
NetworkAclId=Ref(PrivateNetworkAcl),
RuleNumber="100",
Protocol="6",
PortRange=PortRange(To="65535", From="0"),
Egress="false",
RuleAction="allow",
CidrBlock="0.0.0.0/0",
))
PrivateSubnet = t.add_output(Output(
"PrivateSubnet",
Description="SubnetId of the VPN connected subnet",
Value=Ref(PrivateSubnet),
))
VPCId = t.add_output(Output(
"VPCId",
Description="VPCId of the newly created VPC",
Value=Ref(VPC),
))
print(t.to_json())
|
StegSchreck/RatS
|
RatS/base/base_ratings_parser.py
|
Python
|
agpl-3.0
| 7,162
| 0.002932
|
import os
import sys
import time
from bs4 import BeautifulSoup
from progressbar import ProgressBar
class RatingsParser:
def __init__(self, site, args):
|
self.site = site
self.args = args
if not self.site.CREDENTIALS_VALID:
return
self.m
|
ovies = []
self.movies_count = 0
self.site.open_url_with_521_retry(self.site.MY_RATINGS_URL)
self.exports_folder = os.path.abspath(
os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, "RatS", "exports"
)
)
if not os.path.exists(self.exports_folder):
os.makedirs(self.exports_folder)
self.progress_bar = None
def parse(self):
iteration = 0
while True:
iteration += 1
try:
self._parse_ratings()
break
except AttributeError as e:
if iteration > 10:
raise e
time.sleep(iteration * 2)
continue
self.site.browser_handler.kill()
return self.movies
def _parse_ratings(self):
movie_ratings_page = BeautifulSoup(self.site.browser.page_source, "html.parser")
time.sleep(1)
pages_count = self._retrieve_pages_count_and_movies_count(movie_ratings_page)
if self.args and self.args.verbose and self.args.verbose >= 3:
sys.stdout.write(
"\r\n ================================================== \r\n"
)
sys.stdout.write(self.site.browser.current_url)
sys.stdout.write(
f"\r\n ===== {self.site.site_displayname}: getting page count: {pages_count} \r\n"
)
sys.stdout.write(
f"\r\n ===== {self.site.site_displayname}: getting movies count: {self.movies_count} \r\n"
)
sys.stdout.write(
"\r\n ================================================== \r\n"
)
sys.stdout.flush()
sys.stdout.write(
f"\r===== {self.site.site_displayname}: Parsing {pages_count} pages"
f" with {self.movies_count} movies in total\r\n"
)
sys.stdout.flush()
for page_number in range(1, pages_count + 1):
self.site.open_url_with_521_retry(self._get_ratings_page(page_number))
movie_listing_page = BeautifulSoup(
self.site.browser.page_source, "html.parser"
)
self._parse_movie_listing_page(movie_listing_page)
def _retrieve_pages_count_and_movies_count(self, movie_ratings_page):
pages_count = self._get_pages_count(movie_ratings_page)
self.movies_count = self._get_movies_count(movie_ratings_page)
return pages_count
@staticmethod
def _get_pages_count(movie_ratings_page):
raise NotImplementedError("This is not the implementation you are looking for.")
@staticmethod
def _get_movies_count(movie_ratings_page):
raise NotImplementedError("This is not the implementation you are looking for.")
def _get_ratings_page(self, page_number):
raise NotImplementedError("This is not the implementation you are looking for.")
def _parse_movie_listing_page(self, movie_listing_page):
movies_tiles = self._get_movie_tiles(movie_listing_page)
for movie_tile in movies_tiles:
movie = self._parse_movie_tile(movie_tile)
if movie:
self.movies.append(movie)
self.print_progress(movie)
def print_progress(self, movie):
if self.args and self.args.verbose and self.args.verbose >= 2:
sys.stdout.write(
f"\r===== {self.site.site_displayname}: [{len(self.movies)}/{self.movies_count}] parsed {movie} \r\n"
)
sys.stdout.flush()
elif self.args and self.args.verbose and self.args.verbose >= 1:
sys.stdout.write(
f"\r===== {self.site.site_displayname}: [{len(self.movies)}/{self.movies_count}]"
f" parsed {movie['title']} ({movie['year']}) \r\n"
)
sys.stdout.flush()
else:
self._print_progress_bar()
def _print_progress_bar(self):
if not self.progress_bar:
self.progress_bar = ProgressBar(
max_value=self.movies_count, redirect_stdout=True
)
self.progress_bar.update(len(self.movies))
if self.movies_count == len(self.movies):
self.progress_bar.finish()
@staticmethod
def _get_movie_tiles(movie_listing_page):
raise NotImplementedError("This is not the implementation you are looking for.")
def _parse_movie_tile(self, movie_tile):
movie = dict()
movie["title"] = self._get_movie_title(movie_tile)
movie[self.site.site_name.lower()] = dict()
movie[self.site.site_name.lower()]["id"] = self._get_movie_id(movie_tile)
movie[self.site.site_name.lower()]["url"] = self._get_movie_url(movie_tile)
self._go_to_movie_details_page(movie)
time.sleep(1)
iteration = 0
while True:
iteration += 1
try:
self.parse_movie_details_page(movie)
break
except AttributeError as e:
if iteration > 10:
raise e
time.sleep(iteration * 1)
continue
return movie
def _go_to_movie_details_page(self, movie):
self.site.open_url_with_521_retry(movie[self.site.site_name.lower()]["url"])
@staticmethod
def _get_movie_title(movie_tile):
raise NotImplementedError("This is not the implementation you are looking for.")
@staticmethod
def _get_movie_id(movie_tile):
raise NotImplementedError("This is not the implementation you are looking for.")
@staticmethod
def _get_movie_url(movie_tile):
raise NotImplementedError("This is not the implementation you are looking for.")
def parse_movie_details_page(self, movie):
raise NotImplementedError("This is not the implementation you are looking for.")
def _parse_external_links(self, movie, movie_details_page):
external_links = self._get_external_links(movie_details_page)
for link in external_links:
if "imdb.com" in link["href"] and "find?" not in link["href"]:
movie["imdb"] = dict()
movie["imdb"]["url"] = (
link["href"].strip("/").replace("http://", "https://")
)
movie["imdb"]["id"] = movie["imdb"]["url"].split("/")[-1]
elif "themoviedb.org" in link["href"]:
movie["tmdb"] = dict()
movie["tmdb"]["url"] = (
link["href"].strip("/").replace("http://", "https://")
)
movie["tmdb"]["id"] = movie["tmdb"]["url"].split("/")[-1]
@staticmethod
def _get_external_links(movie_details_page):
raise NotImplementedError("This is not the implementation you are looking for.")
|
athoik/enigma2
|
lib/python/Plugins/Extensions/DVDBurn/TitleList.py
|
Python
|
gpl-2.0
| 17,688
| 0.025328
|
import DVDProject, TitleList, TitleCutter, TitleProperties, ProjectSettings, DVDToolbox, Process
from Screens.Screen import Screen
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.HelpMenu import HelpableScreen
from Screens.TaskView import JobView
from Components.Task import job_manager
from Components.ActionMap import HelpableActionMap, ActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Components.Sources.Progress import Progress
from Components.MultiContent import MultiContentEntryText
from Components.Label import MultiColorLabel
from enigma import gFont, RT_HALIGN_LEFT, RT_HALIGN_RIGHT
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
class TitleList(Screen, HelpableScreen):
skin = """
<screen name="TitleList" position="center,center" size="560,470" title="DVD Tool" >
<ePixmap pixmap="buttons/re
|
d.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label"
|
position="0,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="title_label" render="Label" position="10,48" size="540,38" font="Regular;18" transparent="1" />
<widget source="error_label" render="Label" position="10,48" size="540,296" zPosition="3" font="Regular;20" transparent="1" />
<widget source="titles" render="Listbox" scrollbarMode="showOnDemand" position="10,86" size="546,296" zPosition="3" transparent="1" >
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (0, 0), size = (360, 20), font = 0, flags = RT_HALIGN_LEFT, text = 1), # index 1 Title,
MultiContentEntryText(pos = (0, 20), size = (360, 17), font = 1, flags = RT_HALIGN_LEFT, text = 2), # index 2 description,
MultiContentEntryText(pos = (366, 6), size = (152, 20), font = 1, flags = RT_HALIGN_RIGHT, text = 3), # index 3 channel,
MultiContentEntryText(pos = (366, 20), size = (102, 17), font = 1, flags = RT_HALIGN_RIGHT, text = 4), # index 4 begin time,
MultiContentEntryText(pos = (470, 20), size = (48, 20), font = 1, flags = RT_HALIGN_RIGHT, text = 5), # index 5 duration,
],
"fonts": [gFont("Regular", 20), gFont("Regular", 14)],
"itemHeight": 37
}
</convert>
</widget>
<ePixmap pixmap="div-h.png" position="0,390" zPosition="10" size="560,2" />
<ePixmap pixmap="buttons/key_menu.png" position="10,394" size="35,25" alphatest="on" />
<widget source="hint" render="Label" position="50,396" size="540,22" font="Regular;18" halign="left" />
<widget name="medium_label" position="10,420" size="540,22" font="Regular;18" halign="left" foregroundColors="#FFFFFF,#FFFF00,#FF0000" />
<widget source="space_bar_single" render="Progress" position="10,446" size="270,24" borderWidth="1" zPosition="2" backgroundColor="#254f7497" />
<widget source="space_label_single" render="Label" position="10,449" size="270,22" zPosition="3" font="Regular;18" halign="center" transparent="1" foregroundColor="#000000" />
<widget source="space_bar_dual" render="Progress" position="10,446" size="540,24" borderWidth="1" backgroundColor="#254f7497" />
<widget source="space_label_dual" render="Label" position="10,449" size="540,22" zPosition="2" font="Regular;18" halign="center" transparent="1" foregroundColor="#000000" />
</screen>"""
def __init__(self, session, project = None):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self["titleactions"] = HelpableActionMap(self, "DVDTitleList",
{
"addTitle": (self.addTitle, _("Add a new title"), _("Add title")),
"titleProperties": (self.titleProperties, _("Properties of current title"), _("Title properties")),
"removeCurrentTitle": (self.removeCurrentTitle, _("Remove currently selected title"), _("Remove title")),
"settings": (self.settings, _("Collection settings"), _("Settings")),
"burnProject": (self.askBurnProject, _("Burn DVD"), _("Burn DVD")),
})
self["MovieSelectionActions"] = HelpableActionMap(self, "MovieSelectionActions",
{
"contextMenu": (self.showMenu, _("menu")),
})
self["actions"] = ActionMap(["OkCancelActions"],
{
"cancel": self.leave
})
self["key_red"] = StaticText()
self["key_green"] = StaticText(_("Add title"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText(_("Settings"))
self["title_label"] = StaticText()
self["error_label"] = StaticText()
self["space_label_single"] = StaticText()
self["space_label_dual"] = StaticText()
self["hint"] = StaticText(_("Advanced options"))
self["medium_label"] = MultiColorLabel()
self["space_bar_single"] = Progress()
self["space_bar_dual"] = Progress()
self["titles"] = List([])
self.previous_size = 0
if project is not None:
self.project = project
else:
self.newProject()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("DVD titlelist"))
def checkBackgroundJobs(self):
for job in job_manager.getPendingJobs():
print "type(job):", type(job)
print "Process.DVDJob:", Process.DVDJob
if type(job) == Process.DVDJob:
self.backgroundJob = job
return
self.backgroundJob = None
def showMenu(self):
menu = []
self.checkBackgroundJobs()
if self.backgroundJob:
j = self.backgroundJob
menu.append(("%s: %s (%d%%)" % (j.getStatustext(), j.name, int(100*j.progress/float(j.end))), self.showBackgroundJob))
menu.append((_("DVD media toolbox"), self.toolbox))
if self.project.settings.output.getValue() == "dvd":
if len(self["titles"].list):
menu.append((_("Burn DVD"), self.burnProject))
elif self.project.settings.output.getValue() == "iso":
menu.append((_("Create DVD-ISO"), self.burnProject))
menu.append((_("Burn existing image to DVD"), self.selectImage))
if len(self["titles"].list):
menu.append((_("Preview menu"), self.previewMenu))
menu.append((_("Edit chapters of current title"), self.editTitle))
menu.append((_("Reset and renumerate title names"), self.resetTitles))
menu.append((_("Exit"), self.leave))
self.session.openWithCallback(self.menuCallback, ChoiceBox, title="", list=menu)
def menuCallback(self, choice):
if choice:
choice[1]()
def showBackgroundJob(self):
job_manager.in_background = False
self.session.openWithCallback(self.JobViewCB, JobView, self.backgroundJob)
self.backgroundJob = None
def titleProperties(self):
if self.getCurrentTitle():
self.session.openWithCallback(self.updateTitleList, TitleProperties.TitleProperties, self, self.project, self["titles"].getIndex())
def selectImage(self):
self.session.openWithCallback(self.burnISO, ProjectSettings.FileBrowser, "image", self.project.settings)
def newProject(self):
self.project = DVDProject.DVDProject()
if self.loadTemplate():
self.project.session = self.session
self.settingsCB()
def addTitle(self):
from Screens.MovieSelection import MovieSelection
from Components.ActionMap import HelpableActionMap
class DVDMovieSelection(MovieSelection):
skin = """<screen name="DVDMovieSelection" position="center,center" size="560,445" title="Select a movie">
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="but
|
Azure/azure-sdk-for-python
|
sdk/peering/azure-mgmt-peering/azure/mgmt/peering/aio/operations/_peering_service_prefixes_operations.py
|
Python
|
mit
| 11,684
| 0.004622
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PeeringServicePrefixesOperations:
"""PeeringServicePrefixesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.peering.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
peering_service_name: str,
prefix_name: str,
**kwargs
) -> "_models.PeeringServicePrefix":
"""Gets the peering service prefix.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param peering_service_name: The peering service name.
:type peering_service_name: str
:param prefix_name: The prefix name.
:type prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeeringServicePrefix, or the result of cls(response)
:rtype: ~azure.mgmt.peering.models.PeeringServicePrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeeringServicePrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'peeringServiceName': self._serialize.url("peering_service_name", peering_service_name, 'str'),
'prefixName': self._serialize.url("prefix_name", prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PeeringServicePrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Peering/peeringServices/{peeringServiceName}/prefixes/{prefixName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
peering_service_name: str,
prefix_name: str,
peering_service_prefix: "_models.PeeringServicePrefix",
**kwargs
) -> "_models.PeeringServicePrefix":
|
"""Creates or updates the peering prefix.
:param resource_group_name: The resource group nam
|
e.
:type resource_group_name: str
:param peering_service_name: The peering service name.
:type peering_service_name: str
:param prefix_name: The prefix name.
:type prefix_name: str
:param peering_service_prefix: The IP prefix for an peering.
:type peering_service_prefix: ~azure.mgmt.peering.models.PeeringServicePrefix
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeeringServicePrefix, or the result of cls(response)
:rtype: ~azure.mgmt.peering.models.PeeringServicePrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeeringServicePrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'peeringServiceName': self._serialize.url("peering_service_name", peering_service_name, 'str'),
'prefixName': self._serialize.url("prefix_name", prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_service_prefix, 'PeeringServicePrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise H
|
UNH-CORE/RVAT-Re-dep
|
pyrvatrd/processing.py
|
Python
|
mit
| 35,800
| 0.002207
|
# -*- coding: utf-8 -*-
"""This module contains classes and functions for processing data."""
from __future__ import division, print_function
import numpy as np
from pxl import timeseries as ts
from pxl.timeseries import calc_uncertainty, calc_exp_uncertainty
import matplotlib.pyplot as plt
from scipy.io import loadmat
import multiprocessing as mp
import scipy.stats
from numpy import nanmean, nanstd
from scipy.signal import decimate
from pxl import fdiff
import progressbar
import json
import os
import sys
import pandas as pd
if sys.version_info[0] == 3:
from urllib.request import urlretrieve
else:
from urllib import urlretrieve
# Dict for runs corresponding to each height
wakeruns = {0.0 : np.arange(0, 45),
0.125 : np.arange(45, 90),
0.25 : np.arange(90, 135),
0.375 : np.arange(135, 180),
0.5 : np.arange(180, 225),
0.625 : np.arange(225, 270)}
# Constants
H = 1.0
D = 1.0
A = D*H
R = D/2
rho = 1000.0
nu = 1e-6
chord = 0.14
# Directory constants
raw_data_dir = os.path.join("Data", "Raw")
processed_data_dir = os.path.join("Data", "Processed")
def calc_b_vec(vel):
"""Calculates the systematic error of a Vectrino measurement (in m/s)
from their published specs. Returns half the +/- value as b."""
return 0.5*(0.005*np.abs(vel) + 0.001)
def calc_tare_torque(rpm):
"""Returns tare torque array given RPM array."""
return 0.000474675989476*rpm + 0.876750155952
times = {0.3 : (20.0, 80.0),
0.4 : (20.0, 60.0),
0.5 : (20.0, 50.0),
0.6 : (20.0, 45.0),
0.7 : (20.0, 38.0),
0.8 : (18.0, 34.0),
0.9 : (16.0, 32.0),
1.0 : (15.0, 30.0),
1.1 : (15.0, 28.0),
1.2 : (14.0, 27.0),
1.3 : (13.0, 23.0),
1.4 : (12.0, 20.0)}
class Run(object):
"""Object that represents a single turbine tow"""
def __init__(self, section, nrun):
self.section =
|
section
nrun = int(nrun)
section_raw_dir = os.path.join("Data", "Raw", section)
if nrun < 0:
runs = []
for f in os.listdir(section_raw_dir):
try:
runs.append(
|
int(f))
except ValueError:
pass
self.nrun = sorted(runs)[nrun]
else:
self.nrun = nrun
self.raw_dir = os.path.join(section_raw_dir, str(self.nrun))
self.loaded = False
self.t2found = False
self.not_loadable = False
self.wake_calculated = False
# Do all processing
self.load()
if self.loaded:
self.subtract_tare_drag()
self.add_tare_torque()
self.calc_perf_instantaneous()
self.make_trimmed()
self.filter_wake()
self.calc_wake_instantaneous()
self.calc_perf_per_rev()
self.calc_perf_stats()
self.calc_wake_stats()
self.calc_perf_uncertainty()
self.calc_perf_exp_uncertainty()
self.calc_wake_per_rev()
self.calc_wake_uncertainty()
self.calc_wake_exp_uncertainty()
def load(self):
"""Loads the data from the run into memory."""
self.loaded = True
try:
with open("Config/raw_data_urls.json") as f:
raw_data_urls = json.load(f)
except IOError:
raw_data_urls = {}
# Load metadata if it exists
fpath_metadata = os.path.join(self.raw_dir, "metadata.json")
if os.path.isfile(fpath_metadata):
self.load_metadata()
elif make_remote_name(fpath_metadata) in raw_data_urls:
self.download_raw("metadata.json")
self.load_metadata()
else:
self.loaded = False
# Load NI data if it exists
fpath_nidata = os.path.join(self.raw_dir, "nidata.mat")
if os.path.isfile(fpath_nidata):
self.load_nidata()
elif make_remote_name(fpath_nidata) in raw_data_urls:
self.download_raw("nidata.mat")
self.load_nidata()
else:
self.loaded = False
# Load ACS data if it exists
fpath_acsdata = os.path.join(self.raw_dir, "acsdata.mat")
if os.path.isfile(fpath_acsdata):
self.load_acsdata()
elif make_remote_name(fpath_acsdata) in raw_data_urls:
self.download_raw("acsdata.mat")
self.load_acsdata()
else:
self.loaded = False
# Load Vectrino data if it exists
fpath_vecdata = os.path.join(self.raw_dir, "vecdata.mat")
if os.path.isfile(fpath_vecdata):
self.load_vecdata()
elif make_remote_name(fpath_vecdata) in raw_data_urls:
self.download_raw("vecdata.mat")
self.load_vecdata()
else:
self.loaded = False
def load_metadata(self):
"""Loads run metadata."""
with open(os.path.join(self.raw_dir, "metadata.json")) as f:
self.metadata = json.load(f)
self.tow_speed_nom = np.round(self.metadata["Tow speed (m/s)"], decimals=1)
self.tsr_nom = self.metadata["Tip speed ratio"]
self.y_R = self.metadata["Vectrino y/R"]
self.z_H = self.metadata["Vectrino z/H"]
def load_nidata(self):
nidata = loadmat(os.path.join(self.raw_dir, "nidata.mat"), squeeze_me=True)
self.time_ni = nidata["t"]
self.sr_ni = (1.0/(self.time_ni[1] - self.time_ni[0]))
if "carriage_pos" in nidata:
self.lin_enc = True
self.carriage_pos = nidata["carriage_pos"]
self.tow_speed_ni = fdiff.second_order_diff(self.carriage_pos, self.time_ni)
self.tow_speed_ni = ts.smooth(self.tow_speed_ni, 8)
self.tow_speed_ref = self.tow_speed_ni
else:
self.lin_enc = False
self.tow_speed_ref = self.tow_speed_nom
self.torque = nidata["torque_trans"]
self.torque_arm = nidata["torque_arm"]
self.drag = nidata["drag_left"] + nidata["drag_right"]
# Remove offsets from drag, not torque
t0 = 2
self.drag = self.drag - np.mean(self.drag[0:self.sr_ni*t0])
# Compute RPM and omega
self.angle = nidata["turbine_angle"]
self.rpm_ni = fdiff.second_order_diff(self.angle, self.time_ni)/6.0
self.rpm_ni = ts.smooth(self.rpm_ni, 8)
self.omega_ni = self.rpm_ni*2*np.pi/60.0
self.omega = self.omega_ni
self.tow_speed = self.tow_speed_ref
def load_acsdata(self):
fpath = os.path.join(self.raw_dir, "acsdata.mat")
acsdata = loadmat(fpath, squeeze_me=True)
self.tow_speed_acs = acsdata["carriage_vel"]
self.rpm_acs = acsdata["turbine_rpm"]
self.rpm_acs = ts.sigmafilter(self.rpm_acs, 3, 3)
self.omega_acs = self.rpm_acs*2*np.pi/60.0
self.time_acs = acsdata["t"]
if len(self.time_acs) != len(self.omega_acs):
newlen = np.min((len(self.time_acs), len(self.omega_acs)))
self.time_acs = self.time_acs[:newlen]
self.omega_acs = self.omega_acs[:newlen]
self.omega_acs_interp = np.interp(self.time_ni, self.time_acs, self.omega_acs)
self.rpm_acs_interp = self.omega_acs_interp*60.0/(2*np.pi)
def load_vecdata(self):
try:
vecdata = loadmat(self.raw_dir + "/" + "vecdata.mat",
squeeze_me=True)
self.sr_vec = 200.0
self.time_vec = vecdata["t"]
self.u = vecdata["u"]
self.v = vecdata["v"]
self.w = vecdata["w"]
except IOError:
self.vecdata = None
def download_raw(self, name):
download_raw(self.section, self.nrun, name)
def subtract_tare_drag(self):
df = pd.read_csv(os.path.join("Data", "Processed", "Tare drag.csv"))
self.tare_drag = df.tare_drag[df.tow_speed==self.tow_speed_nom].values[0]
self.drag = self.drag - self.tare_drag
def add_tare_torque(self):
# Choose reference RPM, using NI for all except Perf-0.4
if self.section == "Perf-0.4":
|
armink/rt-thread
|
tools/buildbot.py
|
Python
|
apache-2.0
| 1,963
| 0.001528
|
import os
import sys
def usage():
print('%s all -- build all bsp' % os.path.basename(sys.argv[0]))
print('%s clean -- clean all bsp' % os.path.basename(sys.argv[0]))
print('%s project -- update all prject files' % os.path.basename(sys.argv[0]))
BSP_ROOT = os.path.join("..", "bsp")
if len(sys.argv) != 2:
usage()
sys.exit(0)
# get command options
command = ''
if sys.argv[1] == 'all':
command = ' '
elif sys.argv[1] == 'clean':
command = ' -c'
elif sys.argv[1] == 'project':
projects = os.listdir(BSP_ROOT)
for item in projects:
project_dir = os.path.join(BSP_ROOT, item)
if os.path.isfile(os.path.join(project_dir, 'template.Uv2')):
print('prepare MDK3 project file on ' + project_dir)
command = ' --target=mdk -s'
os.system('scons --directory=' + project_dir + command)
if os.path.isfile(os.path.join(project_dir, 'template.uvproj')):
print('prepare MDK4 project file on ' + project_dir)
command = ' --target=mdk4 -s'
|
os.system('scons --directory=' + project_dir + command)
if os.path.isfile(os.path.join(project_dir, 'template.uvprojx')):
print('prepare MDK5 project file on ' + project_dir)
command = ' --target=mdk5 -s'
os.system('scons --directory=' + project_dir + command)
if os.path.isfile(os.path.join(project_dir, 'template.ewp')):
print('prepare IAR project file on ' + project_dir)
command =
|
' --target=iar -s'
os.system('scons --directory=' + project_dir + command)
sys.exit(0)
else:
usage()
sys.exit(0)
projects = os.listdir(BSP_ROOT)
for item in projects:
project_dir = os.path.join(BSP_ROOT, item)
if os.path.isfile(os.path.join(project_dir, 'SConstruct')):
if os.system('scons --directory=' + project_dir + command) != 0:
print('build failed!!')
break
|
andrei-karalionak/ggrc-core
|
test/integration/ggrc_workflows/converters/test_workflow_export_csv.py
|
Python
|
apache-2.0
| 11,531
| 0.002168
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for workflow object exports."""
from os.path import abspath, dirname, join
from flask.json import dumps
from ggrc.app import app
from ggrc_workflows.models import Workflow
from integration.ggrc import TestCase
from integration.ggrc_workflows.generator import WorkflowsGenerator
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'test_csvs/')
class TestExportEmptyTemplate(TestCase):
"""Test empty export for all workflow object types."""
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
def test_single_object_export(self):
"""Test empty exports for workflow only."""
data = [{"object_name": "Workflow", "fields": "all"}]
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
def test_multiple_objects(self):
"""Test empty exports for all workflow object in one query."""
data = [
{"object_name": "Workflow", "fields": "all"},
{"object_name": "TaskGroup", "fields": "all"},
{"object_name": "TaskGroupTask", "fields": "all"},
{"object_name": "Cycle", "fields": "all"},
{"object_name": "CycleTaskGroup", "fields": "all"},
{"object_name": "CycleTaskGroupObjectTask", "fields": "all"},
]
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Workflow,", response.data)
self.assertIn("Task Group,", response.data)
self.assertIn("Task,", response.data)
self.assertIn("Cycle,", response.data)
self.assertIn("Cycle Task Group,", response.data)
self.assertIn("Cycle Task Group Object Task,", response.data)
class TestExportMultipleObjects(TestCase):
""" Test data is found in the google sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=2035742544
"""
@classmethod
def setUpClass(cls): # pylint: disable=C0103
TestCase.clear_data()
cls.tc = app.test_client()
cls.tc.get("/login")
cls.import_file("workflow_big_sheet.csv")
@classmethod
def import_file(cls, filename, dry_run=False):
data = {"file": (open(join(CSV_DIR, filename)), filename)}
headers = {
"X-test-only": "true" if dry_run else "false",
"X-requested-by": "gGRC",
}
cls.tc.post("/_service/import_csv",
data=data, headers=headers)
def activate(self):
""" activate workflows just once after the class has been initialized
This should be in setUpClass method, but we can't access the server
context from there."""
gen = WorkflowsGenerator()
# generate cycle for the only one time wf
wf1 = Workflow.query.filter_by(status="Draft", slug="wf-1").first()
if wf1:
gen.generate_cycle(wf1)
workflows = Workflow.query.filter_by(status="Draft").all()
for wf in workflows:
gen.activate_workflow(wf)
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
self.activate()
def export_csv(self, data):
response = self.client.post("/_service/export_csv", data=dumps(data),
headers=self.headers)
self.assert200(response)
return response
def test_workflow_task_group_mapping(self):
""" test workflow and task group mappings """
data = [
{
"object_name": "Workflow", # wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": ["tg-1"],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup", # tg-1, tg-2
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("wf-1")) # 1 for wf and 1 on each tg
self.assertIn("tg-1", response)
self.assertIn("tg-6", response)
def test_tg_task(self):
""" test task group and task mappings """
data = [
{
"object_name": "TaskGroupTask", # task-1, task-7
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "TaskGroup",
"slugs": ["tg-1"],
},
},
"fields": "all",
}, {
"object_name": "TaskGroup", # tg-1, tg-2
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("tg-1")) # 2 for tasks and 1 for tg
self.assertIn("task-1", response)
self.assertIn("task-7", response)
def test_workflow_cycle_mapping(self):
""" test workflo
|
w and cycle mappings """
data = [
{
"object_name": "Cycle", # cycle with title wf-1
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "Workflow",
"slugs": ["wf-1"],
},
},
"fields": "all",
}, {
"object_name": "Workflow", # wf-1
"filters": {
|
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}, {
"object_name": "CycleTaskGroup", # two cycle groups
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["0"],
},
},
"fields": "all",
}, {
"object_name": "Cycle", # sholud be same cycle as in first block
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
"fields": "all",
}, {
# Task mapped to any of the two task groups, 3 tasks
"object_name": "CycleTaskGroupObjectTask",
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["2"],
},
},
"fields": "all",
}, {
"object_name": "CycleTaskGroup", # two cycle groups
"filters": {
"expression": {
"op": {"name": "relevant"},
"object_name": "__previous__",
"ids": ["4"],
},
},
"fields": "all",
},
]
response = self.export_csv(data).data
self.assertEqual(3, response.count("wf-1")) # 2 for cycles and 1 for wf
# 3rd block = 2, 5th block = 3, 6th block = 2.
self.assertEqual(7, response.count("CYCLEGROUP-"))
self.assertEqual(9, response.count("CYCLE-"))
self.assertEqual(3, response.count("CYCLETASK-"))
def test_cycle_taks_objects(self):
""" test cycle task and var
|
buriburisuri/ebgan
|
mnist_ebgan_generate.py
|
Python
|
mit
| 943
| 0.003181
|
import sugartensor as tf
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from model import *
__author__ = 'namju.kim@kakaobrain.com'
# set log level to debug
tf.sg_verbosity(10)
#
# hyper parameters
#
batch_size = 100
# random uniform seed
z = tf.random_uniform((batch_size, z_dim))
# generator
gen = generator(z)
#
# draw samples
#
with tf.Session() as sess:
tf.sg_init(sess)
# restore parameters
tf.s
|
g_restore(sess, tf.train.latest_checkpoint('asset/train'), category='generator')
# run generator
imgs = sess.run(gen.sg_squeeze())
# plot result
_, ax = plt.subplots(10, 10, sharex=True, sharey=True)
for i in range(10):
for j in range(10):
ax[i][j].imshow(imgs[i * 10 + j], 'gray'
|
)
ax[i][j].set_axis_off()
plt.savefig('asset/train/sample.png', dpi=600)
tf.sg_info('Sample image saved to "asset/train/sample.png"')
plt.close()
|
lehoanganh/kcsdb
|
chef-repo/.chef/murder-kcsd/dist/BitTornado/BT1/PiecePicker.py
|
Python
|
apache-2.0
| 11,915
| 0.004784
|
# Written by Bram Cohen
# see LICENSE.txt for license information
from random import randrange, shuffle
from BitTornado.clock import clock
try:
True
except:
True = 1
False = 0
class PiecePicker:
def __init__(self, numpieces,
rarest_first_cutoff = 1, rarest_first_priority_cutoff = 3,
priority_step = 20):
self.rarest_first_cutoff = rarest_first_cutoff
self.rarest_first_priority_cutoff = rarest_first_priority_cutoff + priority_step
self.priority_step = priority_step
self.cutoff = rarest_first_priority_cutoff
self.numpieces = numpieces
self.started = []
self.totalcount = 0
self.numhaves = [0] * numpieces
self.priority = [1] * numpieces
self.removed_partials = {}
self.crosscount = [numpieces]
self.crosscount2 = [numpieces]
self.has = [0] * numpieces
self.numgot = 0
self.done = False
self.seed_connections = {}
self.past_ips = {}
self.seed_time = None
self.sup
|
erseed = False
self.seeds_connected = 0
self._init_interests()
def _init_interests(self):
self.interests = [[] for x in xrange(self.priority_step)]
self.level_in_interests = [self.p
|
riority_step] * self.numpieces
interests = range(self.numpieces)
shuffle(interests)
self.pos_in_interests = [0] * self.numpieces
for i in xrange(self.numpieces):
self.pos_in_interests[interests[i]] = i
self.interests.append(interests)
def got_have(self, piece):
self.totalcount+=1
numint = self.numhaves[piece]
self.numhaves[piece] += 1
self.crosscount[numint] -= 1
if numint+1==len(self.crosscount):
self.crosscount.append(0)
self.crosscount[numint+1] += 1
if not self.done:
numintplus = numint+self.has[piece]
self.crosscount2[numintplus] -= 1
if numintplus+1 == len(self.crosscount2):
self.crosscount2.append(0)
self.crosscount2[numintplus+1] += 1
numint = self.level_in_interests[piece]
self.level_in_interests[piece] += 1
if self.superseed:
self.seed_got_haves[piece] += 1
numint = self.level_in_interests[piece]
self.level_in_interests[piece] += 1
elif self.has[piece] or self.priority[piece] == -1:
return
if numint == len(self.interests) - 1:
self.interests.append([])
self._shift_over(piece, self.interests[numint], self.interests[numint + 1])
def lost_have(self, piece):
self.totalcount-=1
numint = self.numhaves[piece]
self.numhaves[piece] -= 1
self.crosscount[numint] -= 1
self.crosscount[numint-1] += 1
if not self.done:
numintplus = numint+self.has[piece]
self.crosscount2[numintplus] -= 1
self.crosscount2[numintplus-1] += 1
numint = self.level_in_interests[piece]
self.level_in_interests[piece] -= 1
if self.superseed:
numint = self.level_in_interests[piece]
self.level_in_interests[piece] -= 1
elif self.has[piece] or self.priority[piece] == -1:
return
self._shift_over(piece, self.interests[numint], self.interests[numint - 1])
def _shift_over(self, piece, l1, l2):
assert self.superseed or (not self.has[piece] and self.priority[piece] >= 0)
parray = self.pos_in_interests
p = parray[piece]
assert l1[p] == piece
q = l1[-1]
l1[p] = q
parray[q] = p
del l1[-1]
newp = randrange(len(l2)+1)
if newp == len(l2):
parray[piece] = len(l2)
l2.append(piece)
else:
old = l2[newp]
parray[old] = len(l2)
l2.append(old)
l2[newp] = piece
parray[piece] = newp
def got_seed(self):
self.seeds_connected += 1
self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected,0)
def became_seed(self):
self.got_seed()
self.totalcount -= self.numpieces
self.numhaves = [i-1 for i in self.numhaves]
if self.superseed or not self.done:
self.level_in_interests = [i-1 for i in self.level_in_interests]
if self.interests:
del self.interests[0]
del self.crosscount[0]
if not self.done:
del self.crosscount2[0]
def lost_seed(self):
self.seeds_connected -= 1
self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected,0)
def requested(self, piece):
if piece not in self.started:
self.started.append(piece)
def _remove_from_interests(self, piece, keep_partial = False):
l = self.interests[self.level_in_interests[piece]]
p = self.pos_in_interests[piece]
assert l[p] == piece
q = l[-1]
l[p] = q
self.pos_in_interests[q] = p
del l[-1]
try:
self.started.remove(piece)
if keep_partial:
self.removed_partials[piece] = 1
except ValueError:
pass
def complete(self, piece):
assert not self.has[piece]
self.has[piece] = 1
self.numgot += 1
if self.numgot == self.numpieces:
self.done = True
self.crosscount2 = self.crosscount
else:
numhaves = self.numhaves[piece]
self.crosscount2[numhaves] -= 1
if numhaves+1 == len(self.crosscount2):
self.crosscount2.append(0)
self.crosscount2[numhaves+1] += 1
self._remove_from_interests(piece)
def next(self, haves, wantfunc, complete_first = False):
cutoff = self.numgot < self.rarest_first_cutoff
complete_first = (complete_first or cutoff) and not haves.complete()
best = None
bestnum = 2 ** 30
for i in self.started:
if haves[i] and wantfunc(i):
if self.level_in_interests[i] < bestnum:
best = i
bestnum = self.level_in_interests[i]
if best is not None:
if complete_first or (cutoff and len(self.interests) > self.cutoff):
return best
if haves.complete():
r = [ (0, min(bestnum,len(self.interests))) ]
elif cutoff and len(self.interests) > self.cutoff:
r = [ (self.cutoff, min(bestnum,len(self.interests))),
(0, self.cutoff) ]
else:
r = [ (0, min(bestnum,len(self.interests))) ]
for lo,hi in r:
for i in xrange(lo,hi):
for j in self.interests[i]:
if haves[j] and wantfunc(j):
return j
if best is not None:
return best
return None
def am_I_complete(self):
return self.done
def bump(self, piece):
l = self.interests[self.level_in_interests[piece]]
pos = self.pos_in_interests[piece]
del l[pos]
l.append(piece)
for i in range(pos,len(l)):
self.pos_in_interests[l[i]] = i
try:
self.started.remove(piece)
except:
pass
def set_priority(self, piece, p):
if self.superseed:
return False # don't muck with this if you're a superseed
oldp = self.priority[piece]
if oldp == p:
return False
self.priority[piece] = p
if p == -1:
# when setting priority -1,
# make sure to cancel any downloads for this piece
if not self.has[piece]:
self._remove_from_interests(piece, True)
return True
if oldp == -1:
level = self.numhaves[piece] + (self.priority_step * p)
self.level_in_interests[piece] = level
if self.has[piece]:
return True
while len(self.interests) < level+1:
self.interests.appen
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.