text stringlengths 4 1.02M | meta dict |
|---|---|
import json
import os
class Curbd(object):
def __init__(self, service, options):
self.service = service
self.key_prefix = options.program + "/" + options.config + "/"
self.path = os.path.normpath(
"../curbd-config/" + options.environment + "/" + options.program + "/"
+ options.config + ".json")
def populate(self):
self.service.populate_json(self.path, self.key_prefix)
class CurbdService():
def __init__(self, consul_conn, dry_run=False):
self.consul_conn = consul_conn
self.dry_run = dry_run
def populate_json(self, path, key_prefix):
with open(path) as f:
json_data = json.load(f)
self.__populate_json(json_data, key_prefix)
def __populate_json(self, json_data, key_prefix):
for k, v in json_data.items():
if isinstance(v, dict):
self.__populate_json(v, key_prefix + k + "/")
else:
if isinstance(v, list):
v = "\n".join(v)
print(key_prefix + k, v)
if not self.dry_run:
self.consul_conn.kv.put(key_prefix + k, v)
| {
"content_hash": "8773bbceae5262201c37c6b5af87fc20",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 82,
"avg_line_length": 32.861111111111114,
"alnum_prop": 0.5350803043110736,
"repo_name": "ridecharge/curbd",
"id": "c976d8e2802902895674ba01001c8b65201d20f8",
"size": "1183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "curbd/curbd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "264"
},
{
"name": "Python",
"bytes": "4505"
}
],
"symlink_target": ""
} |
import pywikibot
def replace():
print('fetching fr_words')
fr_words = [page.title() for page in pywikibot.Category(
pywikibot.Site('fr', 'wiktionary'), "gaulois").articles()]
print('done fetching fr_words')
for page in pywikibot.Category(pywikibot.Site('mg', 'wiktionary'),
"gadaba an'i Mudhili").articles():
title = page.title()
pywikibot.output('>>>> %s <<<<<' % title)
if title not in fr_words:
continue
content = page.get()
new_content = content
new_content = new_content.replace("=gau=", '=xtg=')
new_content = new_content.replace("|gau}}", '|xtg}}')
pywikibot.showDiff(content, new_content)
page.put(
new_content,
"manitsy kaody ho an'ny teny gôloà (gau --> xtg)")
if __name__ == '__main__':
replace()
| {
"content_hash": "69e5e930148a50547e8ce45d3c0be2a1",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 70,
"avg_line_length": 33.96153846153846,
"alnum_prop": 0.5503963759909399,
"repo_name": "radomd92/botjagwar",
"id": "ebc841fe9615663c0c819368c1d368b03d335b3d",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "replace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "28427"
},
{
"name": "Python",
"bytes": "657399"
},
{
"name": "Shell",
"bytes": "3709"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Eloquent'
copyright = u'2015, Sébastien Eustace'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'pastie'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Eloquentdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Eloquent.tex', u'Eloquent Documentation',
u'Sébastien Eustace', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'eloquent', u'Eloquent Documentation',
[u'Sébastien Eustace'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Eloquent', u'Eloquent Documentation',
u'Sébastien Eustace', 'Eloquent', 'The Eloquent ORM provides a simple yet beautiful ActiveRecord implementation.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
html_theme = 'sphinx_rtd_theme'
def setup(app):
app.add_stylesheet('theme_overrides.css')
else:
html_context = {
'css_files': [
'https://media.readthedocs.org/css/sphinx_rtd_theme.css',
'https://media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/theme_overrides.css',
],
}
| {
"content_hash": "63826739d30d98ca733db8159c740408",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 117,
"avg_line_length": 31.5,
"alnum_prop": 0.7002302193141888,
"repo_name": "sdispater/eloquent",
"id": "2938df1a18fbad1bac3f2f7f55709bf815b729fa",
"size": "8678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "741617"
}
],
"symlink_target": ""
} |
"""
Mimics the core API with the new deserializer
"""
from __future__ import absolute_import
from typing import IO, Any # pylint:disable=W0611
try:
# Python 2
from StringIO import StringIO as BytesIO
except ImportError:
# Python 3+
from io import BytesIO
from ..utils import java_data_fd
from .api import ObjectTransformer # pylint:disable=W0611
from .core import JavaStreamParser
from .transformers import DefaultObjectTransformer, NumpyArrayTransformer
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 4, 3)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
def load(file_object, *transformers, **kwargs):
# type: (IO[bytes], ObjectTransformer, Any) -> Any
"""
Deserializes Java primitive data and objects serialized using
ObjectOutputStream from a file-like object.
:param file_object: A file-like object
:param transformers: Custom transformers to use
:return: The deserialized object
"""
# Check file format (uncompress if necessary)
file_object = java_data_fd(file_object)
# Ensure we have the default object transformer
all_transformers = list(transformers)
for t in all_transformers:
if isinstance(t, DefaultObjectTransformer):
break
else:
all_transformers.append(DefaultObjectTransformer())
if kwargs.get("use_numpy_arrays", False):
# Use the numpy array transformer if requested
all_transformers.append(NumpyArrayTransformer())
# Parse the object(s)
parser = JavaStreamParser(file_object, all_transformers)
contents = parser.run()
if len(contents) == 0:
# Nothing was parsed, but no error
return None
elif len(contents) == 1:
# Return the only object as is
return contents[0]
else:
# Returns all objects if they are more than one
return contents
def loads(data, *transformers, **kwargs):
# type: (bytes, ObjectTransformer, Any) -> Any
"""
Deserializes Java objects and primitive data serialized using
ObjectOutputStream from bytes.
:param data: A Java data string
:param transformers: Custom transformers to use
:param ignore_remaining_data: If True, don't log an error when unused
trailing bytes are remaining
:return: The deserialized object
"""
return load(BytesIO(data), *transformers, **kwargs)
| {
"content_hash": "0be92312d3f046744e810150a9096550",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 80,
"avg_line_length": 30.88235294117647,
"alnum_prop": 0.6407619047619048,
"repo_name": "tcalmant/python-javaobj",
"id": "2076ccd4b259e1fdec83acae8cd6269f0d5c98be",
"size": "2648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "javaobj/v2/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "17316"
},
{
"name": "Python",
"bytes": "206117"
}
],
"symlink_target": ""
} |
'''
Created on Jan 12, 2018
# Copyright 2017, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@author: John Phan
'''
import argparse
from datetime import date
import json
import logging
import sys
from pprint import pprint
from bq_wrapper import fetch_paged_results, query_bq_table
import gcs_wrapper_gcloud as wrapper
from isbcgc_cloudsql_model import ISBCGC_database_helper
from util import create_log
def check_for_valid_index_files(bam_data, bai_dict, log):
# determine which records (in bam_data) need to be updated
# based on actual file names in bucket (bai_dict)
update_list = {}
missing_list = {}
# iterate through bam data
for table in bam_data:
update_list[table] = []
missing_list[table] = []
for record in bam_data[table]:
# check if index file exists in bucket (i.e., bai_dict)
bai_path = 'gs://{}{}/{}'.format(
record['bucket'],
record['path'],
record['bai']
)
if bai_path in bai_dict:
# file already exists in bucket, no update needed
continue
else:
# if the record is a .bam.bai
if record['bai'].endswith('.bam.bai'):
bai_file = record['bai'].replace('.bam.bai', '.bai')
bai_path = 'gs://{}{}/{}'.format(
record['bucket'],
record['path'],
bai_file
)
# maybe the .bai file exists
if bai_path in bai_dict:
# update needed !
update_list[table].append({
'id': record['id'],
'index_file': bai_file # record should be updated to this value
})
else:
# index file is missing
missing_list[table].append({
'id': record['id']
})
# check for opposite case, record is .bai, but real file is .bam.bai
elif record['bai'].endswith('.bai'):
bambai_file = record['bai'].replace('.bai', '.bam.bai')
bambai_path = 'gs://{}{}/{}'.format(
record['bucket'],
record['path'],
bambai_file
)
if bambai_path in bai_dict:
# update needed
update_list[table].append({
'id': record['id'],
'index_file': bambai_file # record should be updated to this value
})
else:
# missing file
missing_list[table].append({
'id': record['id']
})
else:
log.warning('invalid index file name: {}'.format(record['bai']))
return (update_list, missing_list)
def process_gs_uri(path, log):
# given a gs uri, e.g., gs://bucket/path/file.ext
# return bucket, path, and filename in separate pieces
pieces = path.split('/')
if len(pieces) < 4:
log.warning('invalid path: {}'.format(path))
return (False, False, False)
bucket = pieces[2]
path = '/'+'/'.join(pieces[3:-1])
filename = pieces[-1]
return (bucket, path, filename)
def load_bai_file_list(bai_file_path):
# read file list into dict for fast lookup
bai_dict = {}
with open(bai_file_path, 'r') as f:
for line in f:
line = line.strip()
bai_dict[line] = True
return bai_dict
def get_bambai_from_database(config, log):
# query cloud sql metadata tables and extract list of bam and associated index files (*.bai)
# return list for each table with: id, bucket, path, bam file name, index filename
log.info('\tbegin get_bambai_from_database()')
bam_data = {}
used_buckets = {}
for table in [
'CCLE_metadata_data_HG19',
'TARGET_metadata_data_HG19',
'TARGET_metadata_data_HG38',
'TCGA_metadata_data_HG19',
'TCGA_metadata_data_HG38'
]:
bam_data[table] = []
rows = ISBCGC_database_helper.select(
config,
(
'select metadata_data_id, file_name_key, index_file_name '
'from {} where data_format = "BAM"'
).format(table),
log,
[]
)
log.info('\t\tfound {} rows for {}'.format(len(rows), table))
# keep track of how many are null
num_null = 0
# check rows for consistency
for row in rows:
if row[1]:
# bam field non-null
# extract bucket name
(bucket, path, filename) = process_gs_uri(row[1], log)
if not bucket:
log.warning('\t\tskipping invalid path: {}'.format(row[1]))
continue
bam_data[table].append({
'id' : row[0],
'bucket': bucket,
'path' : path,
'bam' : filename,
'bai' : row[2]
})
# keep track of all buckets where bam/bai files reside
used_buckets[bucket] = True
else:
# at least one of the two fields is null
num_null += 1
log.info('\t\t{} records are NULL in either file_name_key or index_file_name'.format(num_null))
log.info('\tend get_bambai_from_database()')
return (bam_data, used_buckets)
def parse_args():
parser = argparse.ArgumentParser(
description='update_bambai_filenames.py'
)
parser.add_argument(
'-c', '--config',
type=str,
required=True,
help='config file path'
)
parser.add_argument(
'-b', '--bai-file',
type=str,
required=True,
help='file with list of bam/bai files'
)
parser.add_argument(
'-s', '--sql-file',
type=str,
required=True,
help='file to output sql update commands'
)
args = parser.parse_args()
return (
args.config,
args.bai_file,
args.sql_file
)
def main(config_file_path, bai_file_path, sql_file_path):
log_dir = str(date.today()).replace('-', '_') + '_update_bambai_filenames/'
log_name = create_log(log_dir, 'update_bambai_filenames')
log = logging.getLogger(log_name)
log.info('begin update_bambai_filenames.py')
# open config file
with open(config_file_path) as config_file:
config = json.load(config_file)
# load list of bam/bai files into dict
bai_dict = load_bai_file_list(bai_file_path)
log.info('{} elements in bai_dict'.format(len(bai_dict)))
# query database for bam/bai files
(bam_data, used_buckets) = get_bambai_from_database(config, log)
for table in bam_data:
log.info('found {} non-NULL entries in table {}'.format(
len(bam_data[table]), table
))
log.info('used buckets: ')
for bucket in used_buckets:
log.info(' - {}'.format(bucket))
# check and update filenames for consistency
(update_list, missing_list) = check_for_valid_index_files(bam_data, bai_dict, log)
# write sql update statements to sql file
with open(sql_file_path, 'w') as f:
for table in update_list:
log.info('found {} index filenames that need updating in table {}'.format(
len(update_list[table]), table
))
log.info('{} index files missing (in cloud SQL table {}, but not in bucket)'.format(
len(missing_list[table]), table
))
for update_item in update_list[table]:
f.write("update {} set index_file_name = '{}' where metadata_data_id = {};\n".format(
table, update_item['index_file'], update_item['id']
))
log.info('end update_bambai_filenames.py')
if __name__ == '__main__':
(
config_file_path,
bai_file_path,
sql_file_path
) = parse_args()
main(config_file_path, bai_file_path, sql_file_path)
| {
"content_hash": "54953f1662287b1c005609785de88487",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 103,
"avg_line_length": 32.83274021352313,
"alnum_prop": 0.50845436809018,
"repo_name": "isb-cgc/ISB-CGC-data-proc",
"id": "d9edd7a8efe886a307269267827d90239a3cf6b8",
"size": "9226",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "gdc/main/update_bambai_filenames.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6576"
},
{
"name": "Python",
"bytes": "1169886"
},
{
"name": "Shell",
"bytes": "1068"
}
],
"symlink_target": ""
} |
from MiniREST.RESTServer import RESTServer, responseCodes, responseTypes
class MyServer(RESTServer):
"""Extend the RESTServer"""
def __init__(self, *args, **kwargs):
"""Accept arguments. Ie bind address/port"""
"""Call superclass constructor"""
super(MyServer, self).__init__(*args, **kwargs)
"""Register a callback function.
This one will respond to http://ip/hello.
Only one level of calling. http://ip/hello/meow will route to same callback"""
self.registerFunction('hello', self.hello, token=True)
def hello(self, env, start_response, post):
"""Callback function"""
"""env contains raw data"""
"""Superclass processes recieved data sent via RESTClient into 'post' param'"""
name = post['name']
"""Start a response, HTTP 200, type plaintext"""
start_response(responseCodes[200], responseTypes['plaintext'])
"""Content of our message"""
return "Hello %s" % name
"""Create an instance of your server."""
my_server = MyServer(token="secret")
"""
You can also pass in a port range and it will start on the first available port
SSL and tokens are optional.
my_server = MyServer(bind='0.0.0.0', port=8000, SSLKey="ssl.key", SSLCert="ssl.crt", token="secret_auth_token")
my_server = MyServer(bind='0.0.0.0', portRange=[8000,10000], SSLKey="ssl.key", SSLCert="ssl.crt", token="secret_auth_token")
"""
"""Start in blocking / non-blocking mode"""
my_server.start(block=True)
| {
"content_hash": "7a8c4307a3f5f87efb2f7216c729fced",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 124,
"avg_line_length": 44.23529411764706,
"alnum_prop": 0.6575797872340425,
"repo_name": "amplify-education/minirest",
"id": "3a4e532c6126df5f79d01e360e701cc8d44a1f22",
"size": "1526",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24291"
},
{
"name": "Shell",
"bytes": "749"
}
],
"symlink_target": ""
} |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# For compatibility with different OSes
# Edit PYTHONPATH to be able to import common_functions
import sys
sys.path.append("/usr/lib/ambari-agent/lib/")
########################################################
import os
import string
from ambari_commons import subprocess32
import logging
import shutil
import platform
import fnmatch
import ConfigParser
import optparse
import shlex
import datetime
import tempfile
import glob
import pwd
import re
from AmbariConfig import AmbariConfig
from ambari_commons.constants import AGENT_TMP_DIR
from ambari_commons import OSCheck, OSConst
from ambari_commons.constants import AMBARI_SUDO_BINARY
from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl
logger = logging.getLogger()
USER_ERASE_CMD = "userdel -rf {0}"
GROUP_ERASE_CMD = "groupdel {0}"
PROC_KILL_CMD = "kill -9 {0}"
ALT_DISP_CMD = "alternatives --display {0}"
ALT_ERASE_CMD = "alternatives --remove {0} {1}"
RUN_HOST_CHECKS_CMD = '/var/lib/ambari-agent/cache/custom_actions/scripts/check_host.py ACTIONEXECUTE {0} /var/lib/ambari-agent/cache/custom_actions {1} INFO {2}'
REPO_PATH_RHEL = "/etc/yum.repos.d"
REPO_PATH_SUSE = "/etc/zypp/repos.d/"
REPO_PATH_UBUNTU = "/etc/apt/sources.list.d"
SKIP_LIST = []
TMP_HOST_CHECK_FILE_NAME = "tmp_hostcheck.result"
HOST_CHECK_FILE_NAME = "hostcheck.result"
HOST_CHECK_CUSTOM_ACTIONS_FILE = "hostcheck_custom_actions.result"
OUTPUT_FILE_NAME = "hostcleanup.result"
PACKAGE_SECTION = "packages"
PACKAGE_KEY = "pkg_list"
USER_SECTION = "users"
USER_KEY = "usr_list"
USER_HOMEDIR_KEY = "usr_homedir_list"
USER_HOMEDIR_SECTION = "usr_homedir"
REPO_SECTION = "repositories"
REPOS_KEY = "repo_list"
DIR_SECTION = "directories"
ADDITIONAL_DIRS = "additional_directories"
DIR_KEY = "dir_list"
CACHE_FILES_PATTERN = {
'alerts': ['*.json']
}
PROCESS_SECTION = "processes"
PROCESS_KEY = "proc_list"
PROCESS_OWNER_KEY = "proc_owner_list"
PROCESS_IDENTIFIER_KEY = "proc_identifier"
ALT_SECTION = "alternatives"
ALT_KEYS = ["symlink_list", "target_list"]
HADOOP_GROUP = "hadoop"
FOLDER_LIST = ["/tmp"]
# Additional path patterns to find existing directory
DIRNAME_PATTERNS = [
"/tmp/hadoop-", "/tmp/hsperfdata_"
]
# resources that should not be cleaned
REPOSITORY_BLACK_LIST = ["ambari.repo"]
PACKAGES_BLACK_LIST = ["ambari-server", "ambari-agent"]
USER_BLACK_LIST = ["root"]
def get_erase_cmd():
if OSCheck.is_redhat_family():
return "yum erase -y {0}"
elif OSCheck.is_suse_family():
return "zypper -n -q remove {0}"
elif OSCheck.is_ubuntu_family():
return "/usr/bin/apt-get -y -q remove {0}"
else:
raise Exception("Unsupported OS family '{0}', cannot remove package. ".format(OSCheck.get_os_family()))
class HostCleanup:
def resolve_ambari_config(self):
try:
config = AmbariConfig()
if os.path.exists(AmbariConfig.getConfigFile()):
config.read(AmbariConfig.getConfigFile())
else:
raise Exception("No config found, use default")
except Exception, err:
logger.warn(err)
return config
def get_additional_dirs(self):
resultList = []
dirList = set()
for patern in DIRNAME_PATTERNS:
dirList.add(os.path.dirname(patern))
for folder in dirList:
for dirs in os.walk(folder):
for dir in dirs:
for patern in DIRNAME_PATTERNS:
if patern in dir:
resultList.append(dir)
return resultList
def do_cleanup(self, argMap=None):
if argMap:
packageList = argMap.get(PACKAGE_SECTION)
userList = argMap.get(USER_SECTION)
homeDirList = argMap.get(USER_HOMEDIR_SECTION)
dirList = argMap.get(DIR_SECTION)
repoList = argMap.get(REPO_SECTION)
proc_map = argMap.get(PROCESS_SECTION)
procList = proc_map.get(PROCESS_KEY)
procUserList = proc_map.get(PROCESS_OWNER_KEY)
procIdentifierList = proc_map.get(PROCESS_IDENTIFIER_KEY)
alt_map = argMap.get(ALT_SECTION)
additionalDirList = self.get_additional_dirs()
if userList and not USER_SECTION in SKIP_LIST:
userIds = self.get_user_ids(userList)
if procList and not PROCESS_SECTION in SKIP_LIST:
logger.info("\n" + "Killing pid's: " + str(procList) + "\n")
self.do_kill_processes(procList)
if procIdentifierList and not PROCESS_SECTION in SKIP_LIST:
self.do_kill_processes_by_identifier(procIdentifierList)
if procUserList and not PROCESS_SECTION in SKIP_LIST:
logger.info("\n" + "Killing pids owned by: " + str(procUserList) + "\n")
self.do_kill_processes_by_users(procUserList)
if packageList and not PACKAGE_SECTION in SKIP_LIST:
logger.info("Deleting packages: " + str(packageList) + "\n")
self.do_erase_packages(packageList)
if userList and not USER_SECTION in SKIP_LIST:
logger.info("\n" + "Deleting users: " + str(userList))
self.do_delete_users(userList)
self.do_erase_dir_silent(homeDirList)
self.do_delete_by_owner(userIds, FOLDER_LIST)
if dirList and not DIR_SECTION in SKIP_LIST:
logger.info("\n" + "Deleting directories: " + str(dirList))
self.do_erase_dir_silent(dirList)
if additionalDirList and not ADDITIONAL_DIRS in SKIP_LIST:
logger.info("\n" + "Deleting additional directories: " + str(dirList))
self.do_erase_dir_silent(additionalDirList)
if repoList and not REPO_SECTION in SKIP_LIST:
repoFiles = self.find_repo_files_for_repos(repoList)
logger.info("\n" + "Deleting repo files: " + str(repoFiles))
self.do_erase_files_silent(repoFiles)
if alt_map and not ALT_SECTION in SKIP_LIST:
logger.info("\n" + "Erasing alternatives: " + str(alt_map) + "\n")
self.do_erase_alternatives(alt_map)
return 0
def read_host_check_file(self, config_file_path):
propertyMap = {}
try:
with open(config_file_path, 'r'):
pass
except Exception, e:
logger.error("Host check result not found at: " + str(config_file_path))
return None
try:
config = ConfigParser.RawConfigParser()
config.read(config_file_path)
except Exception, e:
logger.error("Cannot read host check result: " + str(e))
return None
# Initialize map from file
try:
if config.has_option(PACKAGE_SECTION, PACKAGE_KEY):
propertyMap[PACKAGE_SECTION] = config.get(PACKAGE_SECTION, PACKAGE_KEY).split(',')
except:
logger.warn("Cannot read package list: " + str(sys.exc_info()[0]))
try:
proc_map = {}
if config.has_option(PROCESS_SECTION, PROCESS_KEY):
proc_map[PROCESS_KEY] = config.get(PROCESS_SECTION, PROCESS_KEY).split(',')
if config.has_option(PROCESS_SECTION, PROCESS_OWNER_KEY):
proc_map[PROCESS_OWNER_KEY] = config.get(PROCESS_SECTION, PROCESS_OWNER_KEY).split(',')
if config.has_option(PROCESS_SECTION, PROCESS_IDENTIFIER_KEY):
proc_map[PROCESS_IDENTIFIER_KEY] = config.get(PROCESS_SECTION, PROCESS_IDENTIFIER_KEY).split(',')
if proc_map:
propertyMap[PROCESS_SECTION] = proc_map
except:
logger.warn("Cannot read process list: " + str(sys.exc_info()))
try:
if config.has_option(USER_SECTION, USER_KEY):
propertyMap[USER_SECTION] = config.get(USER_SECTION, USER_KEY).split(',')
except:
logger.warn("Cannot read user list: " + str(sys.exc_info()[0]))
try:
if config.has_option(USER_SECTION, USER_HOMEDIR_KEY):
propertyMap[USER_HOMEDIR_SECTION] = config.get(USER_SECTION, USER_HOMEDIR_KEY).split(',')
except:
logger.warn("Cannot read user homedir list: " + str(sys.exc_info()[0]))
try:
if config.has_option(REPO_SECTION, REPOS_KEY):
propertyMap[REPO_SECTION] = config.get(REPO_SECTION, REPOS_KEY).split(',')
except:
logger.warn("Cannot read repositories list: " + str(sys.exc_info()[0]))
try:
if config.has_option(DIR_SECTION, DIR_KEY):
propertyMap[DIR_SECTION] = config.get(DIR_SECTION, DIR_KEY).split(',')
except:
logger.warn("Cannot read dir list: " + str(sys.exc_info()[0]))
try:
alt_map = {}
if config.has_option(ALT_SECTION, ALT_KEYS[0]):
alt_map[ALT_KEYS[0]] = config.get(ALT_SECTION, ALT_KEYS[0]).split(',')
if config.has_option(ALT_SECTION, ALT_KEYS[1]):
alt_map[ALT_KEYS[1]] = config.get(ALT_SECTION, ALT_KEYS[1]).split(',')
if alt_map:
propertyMap[ALT_SECTION] = alt_map
except:
logger.warn("Cannot read alternates list: " + str(sys.exc_info()[0]))
return propertyMap
def get_alternatives_desc(self, alt_name):
command = ALT_DISP_CMD.format(alt_name)
out = None
try:
p1 = subprocess32.Popen(shlex.split(command), stdout=subprocess32.PIPE)
p2 = subprocess32.Popen(["grep", "priority"], stdin=p1.stdout, stdout=subprocess32.PIPE)
p1.stdout.close()
out = p2.communicate()[0]
logger.debug('alternatives --display ' + alt_name + '\n, out = ' + out)
except:
logger.warn('Cannot process alternative named: ' + alt_name + ',' + \
'error: ' + str(sys.exc_info()[0]))
return out
def do_clear_cache(self, cache_root, dir_map=None):
"""
Clear cache dir according to provided root directory
cache_root - root dir for cache directory
dir_map - should be used only for recursive calls
"""
global CACHE_FILES_PATTERN
file_map = CACHE_FILES_PATTERN if dir_map is None else dir_map
remList = []
# Build remove list according to masks
for folder in file_map:
if isinstance(file_map[folder], list): # here is list of file masks/files
for mask in file_map[folder]:
remList += self.get_files_in_dir("%s/%s" % (cache_root, folder), mask)
elif isinstance(file_map[folder], dict): # here described sub-folder
remList += self.do_clear_cache("%s/%s" % (cache_root, folder), file_map[folder])
if dir_map is not None: # push result list back as this is call from stack
return remList
else: # root call, so we have final list
self.do_erase_files_silent(remList)
# Alternatives exist as a stack of symlinks under /var/lib/alternatives/$name
# Script expects names of the alternatives as input
# We find all the symlinks using command, #] alternatives --display $name
# and delete them using command, #] alternatives --remove $name $path.
def do_erase_alternatives(self, alt_map):
if alt_map:
alt_list = alt_map.get(ALT_KEYS[0])
if alt_list:
for alt_name in alt_list:
if alt_name:
out = self.get_alternatives_desc(alt_name)
if not out:
logger.warn('No alternatives found for: ' + alt_name)
continue
else:
alternates = out.split('\n')
if alternates:
for entry in alternates:
if entry:
alt_path = entry.split()[0]
logger.debug('Erasing alternative named: ' + alt_name + ', ' \
'path: ' + alt_path)
command = ALT_ERASE_CMD.format(alt_name, alt_path)
(returncode, stdoutdata, stderrdata) = self.run_os_command(command)
if returncode != 0:
logger.warn('Failed to remove alternative: ' + alt_name +
", path: " + alt_path + ", error: " + stderrdata)
# Remove directories - configs
dir_list = alt_map.get(ALT_KEYS[1])
if dir_list:
self.do_erase_dir_silent(dir_list)
return 0
def do_kill_processes(self, pidList):
if pidList:
for pid in pidList:
if pid:
command = PROC_KILL_CMD.format(pid)
(returncode, stdoutdata, stderrdata) = self.run_os_command(command)
if returncode != 0:
logger.error("Unable to kill process with pid: " + str(pid) + ", " + str(stderrdata))
return 0
def getProcsByUsers(self, users, pidList):
logger.debug("User list: "+str(users))
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
logger.debug("All pids under /proc: "+str(pids));
for pid in pids:
logger.debug("Checking " + str(pid))
try:
with open(os.path.join('/proc', pid, 'status'), 'r') as f:
for line in f:
if line.startswith('Uid:'):
uid = int(line.split()[1])
user = pwd.getpwuid(uid).pw_name
logger.debug("User: "+user);
if user in users and user not in USER_BLACK_LIST:
logger.info(user + " started process " + str(pid))
pidList.append(int(pid))
except:
logger.debug(str(sys.exc_info()))
def do_kill_processes_by_users(self, userList):
pidList = []
self.getProcsByUsers(userList, pidList)
logger.info("Killing pids: "+ str(pidList) + " owned by " + str(userList))
return self.do_kill_processes(pidList)
def do_kill_processes_by_identifier(self, identifierList):
pidList = []
cmd = "ps auxww"
(returncode, stdoutdata, stderrdata) = self.run_os_command(cmd)
line_regexp = re.compile("\s\s+")
if 0 == returncode and stdoutdata:
lines = stdoutdata.split('\n')
for line in lines:
line = line.strip()
for identifier in identifierList:
identifier = identifier.strip()
if identifier in line:
logger.debug("Found " + line + " for " + identifier);
line = line_regexp.sub(" ", line) #replace multi spaces with single space before calling the split
tokens = line.split(' ')
logger.debug(tokens)
logger.debug(len(tokens))
if len(tokens) > 1:
pid = str(tokens[1]);
pid = pid.strip()
if pid and pid not in pidList:
logger.info("Adding pid: "+str(pid) + " for " + identifier)
pidList.append(pid)
return self.do_kill_processes(pidList)
def get_files_in_dir(self, dirPath, filemask = None):
fileList = []
if dirPath:
if os.path.exists(dirPath):
listdir = os.listdir(dirPath)
if listdir:
for link in listdir:
path = dirPath + os.sep + link
if not os.path.islink(path) and not os.path.isdir(path):
if filemask is not None:
if fnmatch.fnmatch(path, filemask):
fileList.append(path)
else:
fileList.append(path)
return fileList
def find_repo_files_for_repos(self, repoNames):
repoFiles = []
osType = OSCheck.get_os_family()
repoNameList = []
for repoName in repoNames:
if len(repoName.strip()) > 0:
repoNameList.append("[" + repoName + "]")
repoNameList.append("name=" + repoName)
if repoNameList:
# get list of files
if OSCheck.is_suse_family():
fileList = self.get_files_in_dir(REPO_PATH_SUSE)
elif OSCheck.is_redhat_family():
fileList = self.get_files_in_dir(REPO_PATH_RHEL)
elif OSCheck.is_ubuntu_family():
fileList = self.get_files_in_dir(REPO_PATH_UBUNTU)
else:
logger.warn("Unsupported OS type, cannot get repository location.")
return []
if fileList:
for filePath in fileList:
with open(filePath, 'r') as file:
content = file.readline()
while (content != "" ):
for repoName in repoNameList:
if content.find(repoName) == 0 and filePath not in repoFiles:
repoFiles.append(filePath)
break;
content = file.readline()
return repoFiles
def do_erase_packages(self, packageList):
packageStr = None
if packageList:
packageStr = ' '.join(packageList)
logger.debug("Erasing packages: " + packageStr)
if packageStr is not None and packageStr:
command = get_erase_cmd().format(packageStr)
if command != '':
logger.debug('Executing: ' + str(command))
(returncode, stdoutdata, stderrdata) = self.run_os_command(command)
if returncode != 0:
logger.warn("Erasing packages failed: " + stderrdata)
else:
logger.info("Erased packages successfully.\n" + stdoutdata)
return 0
def do_erase_dir_silent(self, pathList):
if pathList:
for aPath in pathList:
pathArr = glob.glob(aPath)
logger.debug("Resolved {0} to {1}".format(aPath, ','.join(pathArr)))
for path in pathArr:
if path:
if os.path.exists(path):
if os.path.isdir(path):
try:
shutil.rmtree(path)
except:
logger.warn("Failed to remove dir {0} , error: {1}".format(path, str(sys.exc_info()[0])))
else:
logger.info("{0} is a file, deleting file".format(path))
self.do_erase_files_silent([path])
elif os.path.islink(path):
logger.info("Deleting broken symbolic link {0}".format(path))
self.do_erase_files_silent([path])
else:
logger.info("Path doesn't exists: {0}".format(path))
return 0
def do_erase_files_silent(self, pathList):
if pathList:
for path in pathList:
if path and ( os.path.exists(path) or os.path.islink(path) ):
try:
os.remove(path)
except:
logger.warn("Failed to delete file: {0}, error: {1}".format(path, str(sys.exc_info()[0])))
else:
logger.info("File doesn't exists: {0}".format(path))
return 0
def do_delete_group(self):
groupDelCommand = GROUP_ERASE_CMD.format(HADOOP_GROUP)
(returncode, stdoutdata, stderrdata) = self.run_os_command(groupDelCommand)
if returncode != 0:
logger.warn("Cannot delete group : " + HADOOP_GROUP + ", " + stderrdata)
else:
logger.info("Successfully deleted group: " + HADOOP_GROUP)
def do_delete_by_owner(self, userIds, folders):
for folder in folders:
for filename in os.listdir(folder):
fileToCheck = os.path.join(folder, filename)
try:
stat = os.stat(fileToCheck)
except OSError:
stat = None
logger.warn("Cannot stat file, skipping: " + fileToCheck)
if stat and stat.st_uid in userIds:
self.do_erase_dir_silent([fileToCheck])
logger.info("Deleting file/folder: " + fileToCheck)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def get_user_ids(self, userList):
userIds = []
# No user ids to check in Windows for now
return userIds
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def get_user_ids(self, userList):
from pwd import getpwnam
userIds = []
if userList:
for user in userList:
if user:
try:
userIds.append(getpwnam(user).pw_uid)
except Exception:
logger.warn("Cannot find user : " + user)
return userIds
def do_delete_users(self, userList):
if userList:
for user in userList:
if user:
command = USER_ERASE_CMD.format(user)
(returncode, stdoutdata, stderrdata) = self.run_os_command(command)
if returncode != 0:
logger.warn("Cannot delete user : " + user + ", " + stderrdata)
else:
logger.info("Successfully deleted user: " + user)
self.do_delete_group()
return 0
def is_current_user_root(self):
return os.getuid() == 0
# Run command as sudoer by default, if root no issues
def run_os_command(self, cmd, runWithSudo=True):
if runWithSudo:
cmd = "/var/lib/ambari-agent/"+AMBARI_SUDO_BINARY + " " + cmd
logger.info('Executing command: ' + str(cmd))
if type(cmd) == str:
cmd = shlex.split(cmd)
process = subprocess32.Popen(cmd,
stdout=subprocess32.PIPE,
stdin=subprocess32.PIPE,
stderr=subprocess32.PIPE
)
(stdoutdata, stderrdata) = process.communicate()
return process.returncode, stdoutdata, stderrdata
def run_check_hosts(self):
config_json = '{"commandParams": {"check_execute_list": "*BEFORE_CLEANUP_HOST_CHECKS*"}}'
with tempfile.NamedTemporaryFile(delete=False) as config_json_file:
config_json_file.write(config_json)
with tempfile.NamedTemporaryFile(delete=False) as tmp_output_file:
tmp_output_file.write('{}')
run_checks_command = RUN_HOST_CHECKS_CMD.format(config_json_file.name, tmp_output_file.name, AGENT_TMP_DIR)
(returncode, stdoutdata, stderrdata) = self.run_os_command(run_checks_command)
if returncode != 0:
logger.warn('Failed to run host checks,\nstderr:\n ' + stderrdata + '\n\nstdout:\n' + stdoutdata)
# Copy file and save with file.# (timestamp)
def backup_file(filePath):
if filePath is not None and os.path.exists(filePath):
timestamp = datetime.datetime.now()
format = '%Y%m%d%H%M%S'
try:
shutil.copyfile(filePath, filePath + "." + timestamp.strftime(format))
except (Exception), e:
logger.warn('Could not backup file "%s": %s' % (str(filePath, e)))
return 0
def get_YN_input(prompt, default):
yes = set(['yes', 'ye', 'y'])
no = set(['no', 'n'])
return get_choice_string_input(prompt, default, yes, no)
def get_choice_string_input(prompt, default, firstChoice, secondChoice):
choice = raw_input(prompt).lower()
if choice in firstChoice:
return True
elif choice in secondChoice:
return False
elif choice is "": # Just enter pressed
return default
else:
print "input not recognized, please try again: "
return get_choice_string_input(prompt, default, firstChoice, secondChoice)
pass
def main():
h = HostCleanup()
config = h.resolve_ambari_config()
hostCheckFileDir = config.get('agent', 'prefix')
hostCheckFilePath = os.path.join(hostCheckFileDir, HOST_CHECK_FILE_NAME)
hostCheckCustomActionsFilePath = os.path.join(hostCheckFileDir, HOST_CHECK_CUSTOM_ACTIONS_FILE)
hostCheckFilesPaths = hostCheckFilePath + "," + hostCheckCustomActionsFilePath
hostCheckResultPath = os.path.join(hostCheckFileDir, OUTPUT_FILE_NAME)
parser = optparse.OptionParser()
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="output verbosity.")
parser.add_option("-f", "--file", dest="inputfiles",
default=hostCheckFilesPaths,
help="host check result file to read.", metavar="FILE")
parser.add_option("-o", "--out", dest="outputfile",
default=hostCheckResultPath,
help="log file to store results.", metavar="FILE")
parser.add_option("-k", "--skip", dest="skip",
help="(packages|users|directories|repositories|processes|alternatives)." + \
" Use , as separator.")
parser.add_option("-s", "--silent",
action="store_true", dest="silent", default=False,
help="Silently accepts default prompt values")
(options, args) = parser.parse_args()
# set output file
backup_file(options.outputfile)
global logger
logger = logging.getLogger('HostCleanup')
handler = logging.FileHandler(options.outputfile)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# set verbose
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if options.skip is not None:
global SKIP_LIST
SKIP_LIST = options.skip.split(',')
is_root = h.is_current_user_root()
if not is_root:
raise RuntimeError('HostCleanup needs to be run as root.')
if not options.silent:
if "users" not in SKIP_LIST:
delete_users = get_YN_input('You have elected to remove all users as well. If it is not intended then use '
'option --skip \"users\". Do you want to continue [y/n] (y)', True)
if not delete_users:
print 'Exiting. Use option --skip="users" to skip deleting users'
sys.exit(1)
hostcheckfile, hostcheckfileca = options.inputfiles.split(",")
# Manage non UI install
if not os.path.exists(hostcheckfileca):
if options.silent:
print 'Host Check results not found. There is no {0}. Running host checks.'.format(hostcheckfileca)
h.run_check_hosts()
else:
run_check_hosts_input = get_YN_input('Host Check results not found. There is no {0}. Do you want to run host checks [y/n] (y)'.format(hostcheckfileca), True)
if run_check_hosts_input:
h.run_check_hosts()
with open(TMP_HOST_CHECK_FILE_NAME, "wb") as tmp_f:
with open(hostcheckfile, "rb") as f1:
with open(hostcheckfileca, "rb") as f2:
tmp_f.write(f1.read())
tmp_f.write(f2.read())
propMap = h.read_host_check_file(TMP_HOST_CHECK_FILE_NAME)
if propMap:
h.do_cleanup(propMap)
if os.path.exists(config.get('agent', 'cache_dir')):
h.do_clear_cache(config.get('agent', 'cache_dir'))
logger.info('Clean-up completed. The output is at %s' % (str(options.outputfile)))
if __name__ == '__main__':
main()
| {
"content_hash": "8a09d76013d3f3fa917c80794e2bb925",
"timestamp": "",
"source": "github",
"line_count": 709,
"max_line_length": 163,
"avg_line_length": 36.85331452750353,
"alnum_prop": 0.6299513950017223,
"repo_name": "sekikn/ambari",
"id": "07224aeb3ca7ca9326a4b62fe96101556092919a",
"size": "26152",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "ambari-agent/src/main/python/ambari_agent/HostCleanup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "22734"
},
{
"name": "C",
"bytes": "109499"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "CSS",
"bytes": "616806"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "Dockerfile",
"bytes": "8117"
},
{
"name": "HTML",
"bytes": "3725781"
},
{
"name": "Handlebars",
"bytes": "1594385"
},
{
"name": "Java",
"bytes": "26670585"
},
{
"name": "JavaScript",
"bytes": "14647486"
},
{
"name": "Jinja",
"bytes": "147938"
},
{
"name": "Less",
"bytes": "303080"
},
{
"name": "Makefile",
"bytes": "2407"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "298247"
},
{
"name": "PowerShell",
"bytes": "2047735"
},
{
"name": "Python",
"bytes": "7226684"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Shell",
"bytes": "350773"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim Script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "1133"
}
],
"symlink_target": ""
} |
def patch():
"""
Disable creation of foreign key constraints, unsupported from Google Cloud SQL
"""
from django.db.models import get_apps, get_models
from django.db.models.fields.related import RelatedField
for app in get_apps():
for model in get_models(app):
fields = model._meta.get_fields(include_parents=False)\
if hasattr(model._meta, 'get_fields') else model._meta.fields
for field in fields:
if isinstance(field, RelatedField):
field.db_constraint = False
| {
"content_hash": "e2efd8b985d8ecf21ee26bb08046ef30",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 82,
"avg_line_length": 40.857142857142854,
"alnum_prop": 0.6241258741258742,
"repo_name": "trik/djangae",
"id": "5311d37a42df04de377f67bb7d12e2eaa672fc4c",
"size": "572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangae/patches/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "277"
},
{
"name": "Python",
"bytes": "624663"
},
{
"name": "Shell",
"bytes": "368"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
import json
import logging
import requests
from flask import jsonify, make_response
from flask.ext.httpauth import HTTPBasicAuth
from flask.ext.restful import Resource, reqparse
from flask.ext.restful.representations.json import output_json
output_json.func_globals['settings'] = {'ensure_ascii': False,
'encoding': 'utf8'}
logger = logging.getLogger('__main__')
auth = HTTPBasicAuth()
@auth.get_password
def get_password(username):
if username == 'user':
return 'text2features'
return None
@auth.error_handler
def unauthorized():
# return 403 instead of 401 to prevent browsers from displaying the
# default auth dialog
return make_response(jsonify({'message': 'Unauthorized access'}), 403)
class JoshuaAPI(Resource):
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('content', type=unicode, location='json')
args = self.reqparse.parse_args() # setup the request parameters
self.content = args['content']
self.result = {}
super(JoshuaAPI, self).__init__()
def post(self):
logger.info('Started processing content.')
self.call_joshua()
logger.info('Finished processing content')
return self.result, 201
def call_joshua(self):
joshua_payload = json.dumps({'text': self.arabic_content})
joshua_headers = {'Content-Type': 'application/json'}
try:
joshua_ip = os.environ['JOSHUA_PORT_5009_TCP_ADDR']
joshua_url = 'http://{}:{}/joshua/translate'.format(joshua_ip,
'5009')
logger.info('Sending to joshua.')
joshua_r = requests.get(joshua_url, data=joshua_payload,
headers=joshua_headers).json()
joshua_r = joshua_r.replace('\n', '')
logger.info(joshua_r)
except KeyError:
logger.warning('Unable to reach joshua container. Returning nothing.')
joshua_r = {}
return joshua_r
| {
"content_hash": "9843143cb905180e34515938578c8e64",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 82,
"avg_line_length": 33.89230769230769,
"alnum_prop": 0.613708579210168,
"repo_name": "chilland/hermes",
"id": "4b612d53eadf16f076da8074cae91930d4020d85",
"size": "2222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/joshua.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34156"
},
{
"name": "Shell",
"bytes": "444"
}
],
"symlink_target": ""
} |
"""A scraper for downloading checklists from eBird.
This scraper creates checklists for recent observations for a given region
using the eBird API. Additional information for each checklist is also
scraped from the checklist web page.
"""
import json
import os
import re
from scrapy import log
from scrapy.http import Request
from scrapy.selector import HtmlXPathSelector
from scrapy.spider import BaseSpider
from checklists_scrapers.spiders import DOWNLOAD_FORMAT, DOWNLOAD_LANGUAGE
from checklists_scrapers.spiders.utils import remove_whitespace, select_keys, dedup, \
save_json_data
class JSONParser(object):
"""Extract checklists from JSON data returned from the eBird API."""
location_keys = [
'locID',
'locName',
'subnational1Name',
'subnational2Name',
'countryName',
'lat',
'lng',
]
checklist_keys = [
'firstName',
'lastName',
'obsDt',
'subID',
] + location_keys
def __init__(self, response):
"""Initialize the parser with a JSON encoded response.
Args:
response (str): an encoded string containing the JSON data returned
by a call to the eBird API.
Returns:
JSONParser: a JSONParser object with the records decided from
the JSON data.
"""
self.records = json.loads(response.body_as_unicode())
def get_checklists(self):
"""Get the set of checklists from the observations."""
filtered = dedup(select_keys(self.records, self.checklist_keys))
checklists = [self.get_checklist(record) for record in filtered]
for checklist in checklists:
checklist['entries'] = [self.get_entry(r) for r in self.records
if r['subID'] == checklist['identifier']]
return checklists
def get_checklist(self, record):
"""Get the fields for a checklist from an observation.
Args:
record (dict): the observation record.
Returns:
dict: a dictionary containing the checklist fields.
"""
checklist = {
'meta': {
'version': DOWNLOAD_FORMAT,
'language': DOWNLOAD_LANGUAGE,
},
'identifier': record['subID'].strip(),
'date': record['obsDt'].strip().split(' ')[0],
'location': self.get_location(record),
'observers': self.get_observers(record),
'source': self.get_source(record),
}
if ' ' in record['obsDt']:
checklist['protocol'] = self.get_protocol(record)
return checklist
def get_protocol(self, record):
"""Get the information about the checklist protocol.
Args:
record (dict): the observation record.
Returns:
dict: a dictionary containing the protocol fields.
A default protocol name of 'Incidental' is used since only the time
of the observation is currently available.
"""
return {
'name': 'Incidental',
'time': record['obsDt'].strip().split(' ')[1],
}
def get_observers(self, record):
"""Get the information about the checklist observers.
Args:
record (dict): the observation record.
Returns:
dict: a dictionary containing the list of observers names.
"""
first_name = record['firstName'].strip()
last_name = record['lastName'].strip()
return {
'count': 1,
'names': [first_name + ' ' + last_name],
}
def get_source(self, record):
"""Get the information about the source of the checklist.
Args:
record (dict): the observation record.
Returns:
dict: a dictionary containing the source fields.
"""
first_name = record['firstName'].strip()
last_name = record['lastName'].strip()
return {
'name': 'eBird',
'submitted_by': first_name + ' ' + last_name,
}
def get_locations(self):
"""Get the set of locations from the observations.
Returns:
list(dict): a list of dicts containing the fields for a location.
"""
filtered = dedup(select_keys(self.records, self.location_keys))
return [self.get_location(record) for record in filtered]
def get_location(self, record):
"""Get the fields for a location from an observation.
Returns:
dict: a dictionary containing the fields for a location.
If a given field is not present in the record then the value defaults
to an empty string. This allows the method to process records that
contain either the simple results fields or the full results fields.
"""
return {
'identifier': record['locID'],
'name': record['locName'],
'county': record.get('subnational2Name', ''),
'region': record.get('subnational1Name', ''),
'country': record.get('countryName', ''),
'lat': record['lat'],
'lon': record['lng'],
}
def get_entry(self, record):
"""Get the fields for an entry from an observation.
Returns:
dict: a dictionary containing the fields for a checklist entry.
"""
return {
'identifier': record['obsID'],
'species': self.get_species(record),
'count': record.get('howMany', 0),
}
def get_species(self, record):
"""Get the species fields for an entry from an observation.
Args:
record (dict); the observation record,
Returns:
dict: a dictionary containing the fields for a species.
"""
return {
'name': record['comName'],
'scientific_name': record['sciName'],
}
class HTMLParser(object):
"""Extract information from the checklist web page.
Only the information not available through the API is extracted, with the
exception of the counts for each species- which has the associated details
dictionary which contains a breakdown of the count based on age and sex.
"""
# eBird mixes up activities and protocols a bit so this table is used
# to map protocol names onto an activity and alternative protocol.
activities = {
'Nocturnal Flight Call Count': (
'Nocturnal Flight Call Count', 'Stationary'),
'Heron Area Count': ('Heron Count', 'Area'),
'Heron Stationary Count': ('Heron Count', 'Stationary'),
}
default_activity = 'Birding'
def __init__(self, response):
"""Initialize the parser with an HTML encoded response.
Args:
response (str): the contents of the checklist web page.
Returns:
HTMLParser: an HTMLParser object containing the contents of the
checklist web page and a dict containing the main checklist
attributes.
"""
self.docroot = HtmlXPathSelector(response)
self.attributes = self.get_attributes(self.docroot)
def get_attributes(self, node):
"""Get the checklist attributes.
Args:
node (HtmlXPathSelector): an XML node,
Returns:
dict: a dictionary containing the fields and values of a checklist.
"""
attr = {}
for idx, item in enumerate(node.select('//dl/dt/text()')):
key = item.extract().strip()
if key == 'Observers:':
names = []
values = node.select('//dl/dd')[idx].select('text()').extract()
for value in values:
name = value.replace(',', '').strip()
if name:
names.append(name)
values = node.select('//dl/dd')[idx]\
.select('strong/text()').extract()
for value in values:
name = value.replace(',', '').strip()
if name:
names.append(name)
attr[key] = ','.join(names)
else:
value = node.select('//dl/dd')[idx].select('text()').extract()
attr[key] = value[0].strip()
return attr
def get_checklist(self):
"""Get the checklist fields extracted ffrom the HTML response.
Returns:
dict: a checklist containing the fields extract from the HTML.
Only the fields not available through the API are extracted from the
HTML. The parser can be sub-classed to extract any more information.
"""
return {
'observers': self.get_observers(),
'activity': self.get_activity(),
'protocol': self.get_protocol(),
'entries': self.get_entries(),
'comment': self.attributes.get('Comments:', '')
}
def get_protocol(self):
"""Get the protocol used for the checklist.
Returns:
dict: a dictionary containing the fields describing the protocol
used to count the birds recorded in the checklist.
"""
protocol_name = self.attributes.get('Protocol:', None)
if protocol_name in self.activities:
protocol_name = self.activities[protocol_name][1]
duration_str = self.attributes.get('Duration:', '')
if 'hour' in duration_str:
duration_hours = int(re.search(
r'(\d+) h', duration_str).group(1))
else:
duration_hours = 0
if 'min' in duration_str:
duration_minutes = int(re.search(
r'(\d+) m', duration_str).group(1))
else:
duration_minutes = 0
distance_str = self.attributes.get('Distance:', '0 kilometer(s)')
if 'kilometer' in distance_str:
distance = int(float(re.search(
r'([\.\d]+) k', distance_str).group(1)) * 1000)
else:
distance = int(float(re.search(
r'([\.\d]+) m', distance_str).group(1)) * 1609)
return {
'name': protocol_name,
'duration_hours': duration_hours,
'duration_minutes': duration_minutes,
'distance': distance,
'area': 0,
}
def get_activity(self):
"""Get the activity used for the checklist.
Returns:
str: a name for the activity.
Uses the activities table to separate out specific activities from
the names eBird uses for protocols.
"""
protocol_name = self.attributes.get('Protocol:', None)
if protocol_name in self.activities:
activity = self.activities[protocol_name][0]
else:
activity = self.default_activity
return activity
def get_observers(self):
"""Get the additional observers.
Returns:
list(str): the observers, excluding the person who submitted the
checklist.
"""
try:
count = int(self.attributes.get('Party Size:', '0'))
except ValueError:
count = 0
names = remove_whitespace(
self.attributes.get('Observers:', '').split(','))
return {
'count': count,
'names': names,
}
def get_entries(self):
"""Get the checklist entries with any additional details for the count.
Returns:
list(dict): a list of dicts contains the fields for a checklist
entry. In turn each contains a list of dicts containing the
fields describing the breakdown of the entry count by age and
sex.
"""
entries = []
for selector in self.docroot.select('//tr[@class="spp-entry"]'):
name = selector.select(
'.//h5[@class="se-name"]/text()').extract()[0].strip()
count = selector.select(
'.//h5[@class="se-count"]/text()').extract()[0].strip()
species = {
'name': name,
}
try:
count = int(count)
except ValueError:
count = 0
entries.append({
'species': species,
'count': count,
'details': self.get_entry_details(selector),
'comment': self.get_entry_comment(selector),
})
return entries
def get_entry_comment(self, node):
"""Get any comment for a checklist entry.
Args:
node (HtmlXPathSelector): the node in the tree from where to
extract the comment.
Returns:
str: any comment associated with a checklist entry.
"""
comment = ''
selection = node.select('.//p[@class="obs-comments"]/text()')\
.extract()
if selection:
comment = selection[0].strip()
return comment
def get_entry_details(self, node):
"""Get the details for each count.
Args:
node (HtmlXPathSelector): the node in the tree from where to
extract the entry details.
Returns:
list(dict): a list of dicts containing the fields that describe
the breakdown of the checklist entry count by age and sex.
"""
details = []
xpath = './/div[@class="sd-data-age-sex"]//tr'
names = node.select(xpath).select('./th/text()').extract()
cols = len(names)
row = 0
for selector in node.select(xpath):
ages = selector.select('./td')
if not ages:
continue
sex = ages[0].select('./text()').extract()[0]
for col, age in zip(range(1, cols + 1), names):
values = ages[col].select('./text()').extract()
if values:
details.append({
'identifier': 'DET%02d' % (row * cols + col),
'age': age,
'sex': sex,
'count': int(values[0])
})
row += 1
return details
class EBirdSpider(BaseSpider):
"""Extract checklists recently added to eBird.
The spider starts by using the API to return the observations for the
last <n> days for the selected region. The recent observations for a region
only contain the simple results fields so additional requests are generated
for the recent observations for each location which contain the full result
fields. Not all the useful information for a checklist is available through
the API so the checklist web page from eBird.org is also parsed to extract
information such as the type of protocol used, breakdowns by age and sex of
the counts for each species, etc. The completed checklist is then written
in JSON format to a file.
Details on the eBird API and the different sets of fields returned can be
found at https://confluence.cornell.edu/display/CLOISAPI/eBird+API+1.1
Three settings control the behaviour of the spider:
DOWNLOAD_DIR: the directory where the downloaded checklists
will be written in JSON format. The directory will be created if it does
not exist.
DURATION: the number of days to fetch observations for. The eBird
API allows access to observations up to 30 days old.
EBIRD_INCLUDE_HTML: include data from the checklist web page.
The spider keeps a list of checklists downloaded and save along with any
errors raised. These are used to create a status report by the extension,
SpiderStatusReport which is emailed out when the spider finishes.
"""
name = 'ebird'
allowed_domains = ["ebird.org", "secure.birds.cornell.edu"]
api_parser = JSONParser
html_parser = HTMLParser
region_url = "http://ebird.org/ws1.1/data/obs/region/recent?" \
"rtype=subnational1&r=%s&back=%d&fmt=json"
location_url = "http://ebird.org/ws1.1/data/obs/loc/recent?" \
"r=%s&detail=full&back=%d&includeProvisional=true&fmt=json"
checklist_url = "http://ebird.org/ebird/view/checklist?subID=%s"
def __init__(self, region, **kwargs):
"""Initialize the spider.
Args:
region (str): the code identifying the eBird region to fetch
observations for.
Returns:
EBirdSpider: a Scrapy crawler object.
"""
super(EBirdSpider, self).__init__(**kwargs)
if not region:
raise ValueError("You must specify an eBird region")
self.region = region
self.log("Downloading checklists for region: %s" % self.region,
log.INFO)
self.checklists = []
self.errors = []
self.warnings = []
def start_requests(self):
"""Configure the spider and issue the first request to the eBird API.
Returns:
Request: yields a single request for the recent observations for
an eBird region.
"""
self.duration = int(self.settings['DURATION'])
self.log("Fetching observations for the past %d days" % self.duration,
log.INFO)
self.directory = self.settings['DOWNLOAD_DIR']
if self.directory and not os.path.exists(self.directory):
os.makedirs(self.directory)
self.log("Writing checklists to %s" % self.directory, log.INFO)
self.include_html = self.settings['EBIRD_INCLUDE_HTML']
if self.include_html:
self.log("Downloading checklists from API and web pages", log.INFO)
else:
self.log("Downloading checklists from API only", log.INFO)
url = self.region_url % (self.region, self.duration)
return [Request(url, callback=self.parse_region)]
def parse_region(self, response):
"""Request the recent observations for each location.
Args:
response (Response): the result of calling the eBird API to get the
recent observations for a region.
Returns:
Request: yields a series of requests to the eBird API to get the
recent observations for each location extracted from the
recent observations for the region.
"""
for location in self.api_parser(response).get_locations():
url = self.location_url % (location['identifier'], self.duration)
yield Request(url, callback=self.parse_locations)
def parse_locations(self, response):
"""Create the checklists from the observations.
Args:
response (Response): the result of calling the eBird API to get the
recent observations for a location.
Returns:
Request: (when the attribute include_html is True) yields a series
of requests to the eBird website to get web page used to
display the details of a checklist.
Even with the full results fields there is still useful information
missing so additional requests are generated for the checklist web
page. Whether the spider continues and processes the checklist web
page is controlled by the EBIRD_INCLUDE_HTML setting.
"""
checklists = self.api_parser(response).get_checklists()
for checklist in checklists:
checklist['source']['api'] = response.url
if self.include_html:
url = self.checklist_url % checklist['identifier']
yield Request(url, callback=self.parse_checklist,
dont_filter=True, meta={'checklist': checklist})
else:
self.save_checklist(checklist)
def parse_checklist(self, response):
"""Parse the missing checklist data from the web page.
Args:
response (str): the contents of the checklist web page.
The checklist first extracted from the call the eBird API is passed
through the parse_region() and parse_locations() methods using the
metadata attribute on the Request and Response objects. It is then
merged with the data has been extracted from the web page and written
to a file in the directory specified when the spider was created.
ISSUE: If the setting CONCURRENT_REQUEST != 1 then the checklist data
in the response sometimes does not match the checklist in the request
metadata. The problem appears to be intermittent, but for a given run
of the spider it usually happens after the 4th or 5th response. The
cause is not known. If the problem occurs then an error is logged and
the checklist is discarded.
"""
if not response.url.endswith(response.meta['checklist']['identifier']):
self.log("Checklists in response and request don't match."
"Identifiers: %s != %s" % (
response.url[-9:],
response.meta['checklist']['identifier']
), log.ERROR)
return
update = self.html_parser(response).get_checklist()
original = response.meta['checklist']
checklist = self.merge_checklists(original, update)
checklist['source']['url'] = response.url
self.save_checklist(checklist)
def merge_checklists(self, original, update):
"""Merge two checklists together.
Args:
original (dict): the checklist extracted from the JSON data.
update (dict): the checklist extracted from the web page.
Returns:
dict: an updated checklist containing values from the first
(original) updated with values from the second (update).
"""
entries, warnings = self.merge_entries(
original['entries'], update['entries'])
checklist = {
'meta': {
'version': original['meta']['version'],
'language': original['meta']['language'],
},
'identifier': original['identifier'],
'date': original['date'],
'source': original['source'],
'observers': self.merge_observers(original['observers'],
update['observers']),
'activity': update['activity'],
'location': original['location'],
'comment': update['comment'],
'entries': entries,
}
if 'protocol' in original:
protocol = original['protocol'].copy()
protocol.update(update['protocol'])
else:
protocol = update['protocol'].copy()
checklist['protocol'] = protocol
if warnings:
self.warnings.append((checklist, warnings))
return checklist
def merge_observers(self, originals, updates):
"""Merge the two lists of observers together.
Args:
originals (list): the observer extracted from the API JSON data.
updates (list): the observers extracted from the web page.
Returns:
dict: a dictionary containing all the names reported as observers
on the two checklists along with a total count of the number of
observers present.
"""
names = set(originals['names'])
names.update(set(updates['names']))
total = originals['count'] + updates['count']
for name in originals['names']:
if name in updates['names']:
total -= 1
return {
'names': list(names),
'count': total,
}
def merge_entries(self, originals, updates):
"""Merge two lists of entries together.
Args:
originals (list): the entries extracted from the API JSON data.
updates (list): the entries extracted from the web page.
Returns:
tuple(list, list): a tuple containing the complete (deep) copy of
the entries merged together and a list of any warnings generated
when merging the lists together.
IMPORTANT: The records from the API contain only the species name.
The subspecies name is discarded. That means if there are two records
for a species with the same count. It won't be possible to determine
which record to update when the lists are merged. In this case the
records will not be merged and only the records from the API will be
included in the merged list.
"""
merged = []
warnings = []
for entry in originals:
merged.append({
'identifier': entry['identifier'],
'species': entry['species'].copy(),
'count': entry['count'],
})
index = {}
for entry in merged:
key = entry['species']['name'].split('(')[0].strip()
count = entry['count']
if key in index:
if count in index[key]:
index[key][count].append(entry)
else:
index[key][count] = [entry]
else:
index[key] = {count: [entry]}
for name, counts in index.items():
for count, entries in counts.items():
if len(entries) > 1:
message = "Could not update record from API. There are" \
" %s records that match: species=%s; count=%d." \
% (len(entries), name, count)
warnings.append(message)
self.log(message)
for entry in updates:
key = entry['species']['name'].split('(')[0].strip()
count = entry['count']
target = None
added = False
if key in index:
if count in index[key]:
hits = len(index[key][count])
else:
hits = 0
if hits == 0:
target = {}
merged.append(target)
added = True
elif hits == 1:
target = index[key][count][0]
else:
target = {}
merged.append(target)
added = True
if target is not None:
target['species'] = entry['species'].copy()
target['count'] = entry['count']
if 'comment' in entry:
target['comment'] = entry['comment']
if 'details' in entry:
target['details'] = []
for detail in entry['details']:
target['details'].append(detail.copy())
if added:
message = "Web page contains record missing from API:" \
" species=%s; count=%d." \
% (entry['species']['name'], entry['count'])
if self.settings['LOG_LEVEL'] == 'DEBUG':
warnings.append(message)
self.log(message)
return merged, warnings
def save_checklist(self, checklist):
"""Save the checklist in JSON format.
Args:
checklist (dict); the checklist.
The filename using the source, in this case 'ebird' and the checklist
identifier so that the data is always written to the same file. The
directory where the files are written is defined by the setting
DOWNLOAD_DIR. If the directory attribute is set to None then the
checklist is not saved (used for testing).
The saved checklist is added to the list of checklists downloaded so
far so it can be used to generate a status report once the spider has
finished.
"""
if self.directory:
path = os.path.join(self.directory, "%s-%s.json" % (
checklist['source']['name'], checklist['identifier']))
save_json_data(path, checklist)
self.checklists.append(checklist)
self.log("Wrote %s: %s %s (%s)" % (
path, checklist['date'], checklist['location']['name'],
checklist['source']['submitted_by']), log.DEBUG)
| {
"content_hash": "85338efa2890b46c54c14a2d2d39118a",
"timestamp": "",
"source": "github",
"line_count": 803,
"max_line_length": 86,
"avg_line_length": 35.52303860523038,
"alnum_prop": 0.564592462751972,
"repo_name": "StuartMacKay/checklists_scrapers",
"id": "6abf7fd4b55925762aa3c439f21d0cf076a6fec7",
"size": "28525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checklists_scrapers/spiders/ebird_spider.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "173637"
},
{
"name": "Shell",
"bytes": "6728"
}
],
"symlink_target": ""
} |
import os
import mock
from oslotest import mockpatch
from cloud import cloud
from cloud import grouping
from cloudferrylib.utils import utils
from tests import test
RESULT_FILE = 'tests/grouping_result'
FILE_NAME = 'tests/groups'
FAKE_CONFIG = utils.ext_dict(src=utils.ext_dict({'user': 'fake_user',
'password': 'fake_password',
'tenant': 'fake_tenant',
'host': '1.1.1.1'}),
migrate=utils.ext_dict(
{'group_file_path': RESULT_FILE}))
class GroupingTestCase(test.TestCase):
def setUp(self):
super(GroupingTestCase, self).setUp()
self.network = mock.Mock()
self.compute = mock.Mock()
self.identity = mock.Mock()
self.fake_tenant1 = mock.Mock()
self.fake_tenant1.id = 't1'
self.fake_tenant2 = mock.Mock()
self.fake_tenant2.id = 't2'
self.identity.get_tenants_list.return_value = [self.fake_tenant1,
self.fake_tenant2]
self.fake_network_1 = {'name': 'net1',
'id': 'net1_id',
'shared': False}
self.fake_network_2 = {'name': 'net3',
'id': 'net3_id',
'shared': False}
self.fake_subnet1 = {'network_id': 'net1_id',
'tenant_id': 't1',
'cidr': '1.1.1.0/24'}
self.fake_subnet2 = {'network_id': 'net3_id',
'tenant_id': 't2',
'cidr': '1.1.3.0/24'}
self.network.get_subnets_list.return_value = [self.fake_subnet1,
self.fake_subnet2]
self.network.get_networks_list.return_value = [self.fake_network_1,
self.fake_network_2]
self.fake_instance1 = mock.Mock()
self.fake_instance1.id = 's1'
self.fake_instance1.networks = {'net1': ['1.1.1.1']}
self.fake_instance1.tenant_id = 't1'
self.fake_instance2 = mock.Mock()
self.fake_instance2.id = 's2'
self.fake_instance2.networks = {'net3': ['1.1.3.1']}
self.fake_instance2.tenant_id = 't2'
self.fake_instance3 = mock.Mock()
self.fake_instance3.id = 's3'
self.fake_instance3.tenant_id = 't1'
self.fake_instance3.networks = {'net1': ['1.1.1.2']}
self.cloud = mock.Mock()
self.cloud().resources = {'network': self.network,
'compute': self.compute,
'identity': self.identity}
self.cloud_patch = mockpatch.PatchObject(cloud, 'Cloud',
new=self.cloud)
self.useFixture(self.cloud_patch)
def tearDown(self):
super(GroupingTestCase, self).tearDown()
os.remove(FILE_NAME)
if utils.check_file(RESULT_FILE):
os.remove(RESULT_FILE)
def make_group_file(self, group_rules):
group_file = open(FILE_NAME, 'w')
group_file.write(group_rules)
def test_group_by_tenant(self):
group_rules = """
group_by:
- tenant
"""
self.make_group_file(group_rules)
group = grouping.Grouping(FAKE_CONFIG, FILE_NAME, 'src')
group.compute.get_instances_list.return_value = [self.fake_instance1,
self.fake_instance2,
self.fake_instance3]
group.group()
expected_result = {'t2': ['s2'], 't1': ['s1', 's3']}
result = utils.read_yaml_file(RESULT_FILE)
self.assertEquals(expected_result, result)
def test_group_by_network(self):
group_rules = """
group_by:
- network
"""
self.make_group_file(group_rules)
group = grouping.Grouping(FAKE_CONFIG, FILE_NAME, 'src')
group.compute.get_instances_list.return_value = [self.fake_instance1,
self.fake_instance2,
self.fake_instance3]
group.group()
expected_result = {'net1_id': ['s1', 's3'], 'net3_id': ['s2']}
result = utils.read_yaml_file(RESULT_FILE)
self.assertEquals(expected_result, result)
def test_invalid_group(self):
group_rules = """
group_by:
- some_group
"""
self.make_group_file(group_rules)
group = grouping.Grouping(FAKE_CONFIG, FILE_NAME, 'src')
with self.assertRaisesRegexp(RuntimeError, 'no such grouping option'):
group.group()
| {
"content_hash": "bcc066109744ef97f836d8f43d148942",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 36.03649635036496,
"alnum_prop": 0.48997366821956656,
"repo_name": "mgrygoriev/CloudFerry",
"id": "5d9e122fe7f7146498c639d40b7bc7f3ee46f96d",
"size": "5568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cloud/test_grouping.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2615"
},
{
"name": "Python",
"bytes": "775433"
},
{
"name": "Ruby",
"bytes": "5181"
},
{
"name": "Shell",
"bytes": "34787"
}
],
"symlink_target": ""
} |
"""## Functions for working with arbitrarily nested sequences of elements.
This module can perform operations on nested structures. A nested structure is a
Python sequence, tuple (including `namedtuple`), or dict that can contain
further sequences, tuples, and dicts.
attr.s decorated classes (http://www.attrs.org) are also supported, in the
same way as `namedtuple`.
The utilities here assume (and do not check) that the nested structures form a
'tree', i.e., no references in the structure of the input of these functions
should be recursive.
Example structures: `((3, 4), 5, (6, 7, (9, 10), 8))`, `(np.array(0),
(np.array([3, 4]), tf.constant([3, 4])))`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
import wrapt as _wrapt
from tensorflow.python import _pywrap_utils
from tensorflow.python.util.compat import collections_abc as _collections_abc
from tensorflow.python.util.tf_export import tf_export
_SHALLOW_TREE_HAS_INVALID_KEYS = (
"The shallow_tree's keys are not a subset of the input_tree's keys. The "
"shallow_tree has the following keys that are not in the input_tree: {}.")
_STRUCTURES_HAVE_MISMATCHING_TYPES = (
"The two structures don't have the same sequence type. Input structure has "
"type {input_type}, while shallow structure has type {shallow_type}.")
_STRUCTURES_HAVE_MISMATCHING_LENGTHS = (
"The two structures don't have the same sequence length. Input "
"structure has length {input_length}, while shallow structure has length "
"{shallow_length}."
)
_INPUT_TREE_SMALLER_THAN_SHALLOW_TREE = (
"The input_tree has fewer elements than the shallow_tree. Input structure "
"has length {input_size}, while shallow structure has length "
"{shallow_size}.")
_IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ = (
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: {}.")
def _get_attrs_items(obj):
"""Returns a list of (name, value) pairs from an attrs instance.
The list will be sorted by name.
Args:
obj: an object.
Returns:
A list of (attr_name, attr_value) pairs, sorted by attr_name.
"""
attrs = getattr(obj.__class__, "__attrs_attrs__")
attr_names = [a.name for a in attrs]
return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]
def _sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(dict_.keys())
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
def _is_namedtuple(instance, strict=False):
"""Returns True iff `instance` is a `namedtuple`.
Args:
instance: An instance of a Python object.
strict: If True, `instance` is considered to be a `namedtuple` only if
it is a "plain" namedtuple. For instance, a class inheriting
from a `namedtuple` will be considered to be a `namedtuple`
iff `strict=False`.
Returns:
True if `instance` is a `namedtuple`.
"""
return _pywrap_utils.IsNamedtuple(instance, strict)
# See the swig file (util.i) for documentation.
_is_mapping = _pywrap_utils.IsMapping
_is_mapping_view = _pywrap_utils.IsMappingView
_is_attrs = _pywrap_utils.IsAttrs
_is_composite_tensor = _pywrap_utils.IsCompositeTensor
_is_type_spec = _pywrap_utils.IsTypeSpec
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, `namedtuple`, `dict`,
`collections.OrderedDict`, or `composite_tensor.Composite_Tensor`
or `type_spec.TypeSpec`.
args: elements to be converted to the `instance` type.
Returns:
`args` with the type of `instance`.
"""
if _is_mapping(instance):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_sorted(instance), args))
instance_type = type(instance)
if instance_type == _collections.defaultdict:
d = _collections.defaultdict(instance.default_factory)
for key in instance:
d[key] = result[key]
return d
else:
return instance_type((key, result[key]) for key in instance)
elif _is_mapping_view(instance):
# We can't directly construct mapping views, so we create a list instead
return list(args)
elif _is_namedtuple(instance) or _is_attrs(instance):
if isinstance(instance, _wrapt.ObjectProxy):
instance_type = type(instance.__wrapped__)
else:
instance_type = type(instance)
return instance_type(*args)
elif _is_composite_tensor(instance):
assert len(args) == 1
spec = instance._type_spec # pylint: disable=protected-access
return spec._from_components(args[0]) # pylint: disable=protected-access
elif _is_type_spec(instance):
# Pack a CompositeTensor's components according to a TypeSpec.
assert len(args) == 1
return instance._from_components(args[0]) # pylint: disable=protected-access
elif isinstance(instance, _six.moves.range):
return _sequence_like(list(instance), args)
elif isinstance(instance, _wrapt.ObjectProxy):
# For object proxies, first create the underlying type and then re-wrap it
# in the proxy type.
return type(instance)(_sequence_like(instance.__wrapped__, args))
else:
# Not a namedtuple
return type(instance)(args)
def _yield_value(iterable):
for _, v in _yield_sorted_items(iterable):
yield v
def _yield_sorted_items(iterable):
"""Yield (key, value) pairs for `iterable` in a deterministic order.
For Sequences, the key will be an int, the array index of a value.
For Mappings, the key will be the dictionary key.
For objects (e.g. namedtuples), the key will be the attribute name.
In all cases, the keys will be iterated in sorted order.
Args:
iterable: an iterable.
Yields:
The iterable's (key, value) pairs, in order of sorted keys.
"""
if isinstance(iterable, _collections_abc.Mapping):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _sorted(iterable):
yield key, iterable[key]
elif _is_attrs(iterable):
for item in _get_attrs_items(iterable):
yield item
elif _is_namedtuple(iterable):
for field in iterable._fields:
yield field, getattr(iterable, field)
elif _is_composite_tensor(iterable):
type_spec = iterable._type_spec # pylint: disable=protected-access
yield type(iterable).__name__, type_spec._to_components(iterable) # pylint: disable=protected-access
elif _is_type_spec(iterable):
# Note: to allow CompositeTensors and their TypeSpecs to have matching
# structures, we need to use the same key string here.
yield iterable.value_type.__name__, iterable._component_specs # pylint: disable=protected-access
else:
for item in enumerate(iterable):
yield item
# See the swig file (util.i) for documentation.
is_sequence = _pywrap_utils.IsSequence
# See the swig file (util.i) for documentation.
is_sequence_or_composite = _pywrap_utils.IsSequenceOrComposite
@tf_export("nest.is_nested")
def is_nested(seq):
"""Returns true if its input is a collections.abc.Sequence (except strings).
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string and is a collections.abc.Sequence
or a dict.
"""
return is_sequence(seq)
@tf_export("nest.flatten")
def flatten(structure, expand_composites=False):
"""Returns a flat list from a given nested structure.
If nest is not a sequence, tuple (or a namedtuple), dict, or an attrs class,
then returns a single-element list:
[nest].
In the case of dict instances, the sequence consists of the values, sorted by
key to ensure deterministic behavior. This is true also for OrderedDict
instances: their sequence order is ignored, the sorting order of keys is used
instead. The same convention is followed in pack_sequence_as. This correctly
repacks dicts and OrderedDicts after they have been flattened, and also allows
flattening an OrderedDict and then repacking it back using a corresponding
plain dict, or vice-versa. Dictionaries with non-sortable keys cannot be
flattened.
Users must not modify any collections used in nest while this function is
running.
Examples:
1. Python dict (ordered by key):
>>> dict = { "key3": "value3", "key1": "value1", "key2": "value2" }
>>> tf.nest.flatten(dict)
['value1', 'value2', 'value3']
2. For a nested python tuple:
>>> tuple = ((1.0, 2.0), (3.0, 4.0, 5.0), (6.0))
>>> tf.nest.flatten(tuple)
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
3. Numpy array (will not flatten):
>>> array = np.array([[1, 2], [3, 4]])
>>> tf.nest.flatten(array)
[array([[1, 2],
[3, 4]])]
4. `tf.Tensor` (will not flatten):
>>> tensor = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
>>> tf.nest.flatten(tensor)
[<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=float32)>]
Args:
structure: an arbitrarily nested structure or a scalar object. Note, numpy
arrays are considered scalars.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Returns:
A Python list, the flattened version of the input.
Raises:
TypeError: The nest is or contains a dict with non-sortable keys.
"""
return _pywrap_utils.Flatten(structure, expand_composites)
# See the swig file (util.i) for documentation.
_same_namedtuples = _pywrap_utils.SameNamedtuples
class _DotString(object):
def __str__(self):
return "."
def __repr__(self):
return "."
_DOT = _DotString()
@tf_export("nest.assert_same_structure")
def assert_same_structure(nest1, nest2, check_types=True,
expand_composites=False):
"""Asserts that two structures are nested in the same way.
Note that namedtuples with identical name and fields are always considered
to have the same shallow structure (even with `check_types=True`).
For instance, this code will print `True`:
```python
def nt(a, b):
return collections.namedtuple('foo', 'a b')(a, b)
print(assert_same_structure(nt(0, 1), nt(2, 3)))
```
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences are checked as well,
including the keys of dictionaries. If set to `False`, for example a
list and a tuple of objects will look the same if they have the same
size. Note that namedtuples with identical name and fields are always
considered to have the same shallow structure. Two types will also be
considered the same if they are both list subtypes (which allows "list"
and "_ListWrapper" from trackable dependency tracking to compare
equal).
expand_composites: If true, then composite tensors such as `tf.SparseTensor`
and `tf.RaggedTensor` are expanded into their component tensors.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
try:
_pywrap_utils.AssertSameStructure(nest1, nest2, check_types,
expand_composites)
except (ValueError, TypeError) as e:
str1 = str(map_structure(lambda _: _DOT, nest1))
str2 = str(map_structure(lambda _: _DOT, nest2))
raise type(e)("%s\n"
"Entire first structure:\n%s\n"
"Entire second structure:\n%s"
% (str(e), str1, str2))
def flatten_dict_items(dictionary):
"""Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value do not have the same structure layout, or
if keys are not unique.
"""
if not isinstance(dictionary, (dict, _collections_abc.Mapping)):
raise TypeError("input must be a dictionary")
flat_dictionary = {}
for i, v in _six.iteritems(dictionary):
if not is_sequence(i):
if i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique." % i)
flat_dictionary[i] = v
else:
flat_i = flatten(i)
flat_v = flatten(v)
if len(flat_i) != len(flat_v):
raise ValueError(
"Could not flatten dictionary. Key had %d elements, but value had "
"%d elements. Key: %s, value: %s."
% (len(flat_i), len(flat_v), flat_i, flat_v))
for new_i, new_v in zip(flat_i, flat_v):
if new_i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique."
% (new_i))
flat_dictionary[new_i] = new_v
return flat_dictionary
def _packed_nest_with_indices(structure, flat, index, is_seq, sequence_fn=None):
"""Helper function for pack_sequence_as.
Args:
structure: Substructure (list / tuple / dict) to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
is_seq: Function used to test if a value should be treated as a sequence.
sequence_fn: Function used to generate a new sequence instance.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
sequence_fn = sequence_fn or _sequence_like
for s in _yield_value(structure):
if is_seq(s):
new_index, child = _packed_nest_with_indices(s, flat, index, is_seq,
sequence_fn)
packed.append(sequence_fn(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def _pack_sequence_as(structure, flat_sequence, expand_composites,
sequence_fn=None):
"""Implements sequence packing, with the option to alter the structure."""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
sequence_fn = sequence_fn or _sequence_like
def truncate(value, length):
value_str = str(value)
return value_str[:length] + (value_str[length:] and "...")
if not is_seq(flat_sequence):
raise TypeError(
"Attempted to pack value:\n {}\ninto a sequence, but found "
"incompatible type `{}` instead."
.format(truncate(flat_sequence, 100), type(flat_sequence)))
if not is_seq(structure):
if len(flat_sequence) != 1:
raise ValueError(
"The target structure is of type `{}`\n {}\nHowever the input "
"structure is a sequence ({}) of length {}.\n {}\nnest cannot "
"guarantee that it is safe to map one to the other.".format(
type(structure), truncate(structure, 100), type(flat_sequence),
len(flat_sequence), truncate(flat_sequence, 100)))
return flat_sequence[0]
try:
final_index, packed = _packed_nest_with_indices(structure, flat_sequence,
0, is_seq, sequence_fn)
if final_index < len(flat_sequence):
raise IndexError
except IndexError:
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but "
"flat_sequence had %d elements. Structure: %s, flat_sequence: %s." %
(len(flat_structure), len(flat_sequence), structure, flat_sequence))
return sequence_fn(structure, packed)
@tf_export("nest.pack_sequence_as")
def pack_sequence_as(structure, flat_sequence, expand_composites=False):
"""Returns a given flattened sequence packed into a given structure.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
If `structure` is or contains a dict instance, the keys will be sorted to
pack the flat sequence in deterministic order. This is true also for
`OrderedDict` instances: their sequence order is ignored, the sorting order of
keys is used instead. The same convention is followed in `flatten`.
This correctly repacks dicts and `OrderedDict`s after they have been
flattened, and also allows flattening an `OrderedDict` and then repacking it
back using a corresponding plain dict, or vice-versa.
Dictionaries with non-sortable keys cannot be flattened.
Args:
structure: Nested structure, whose structure is given by nested lists,
tuples, and dicts. Note: numpy arrays and strings are considered
scalars.
flat_sequence: flat sequence to pack.
expand_composites: If true, then composite tensors such as `tf.SparseTensor`
and `tf.RaggedTensor` are expanded into their component tensors.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If `flat_sequence` and `structure` have different
element counts.
TypeError: `structure` is or contains a dict with non-sortable keys.
"""
return _pack_sequence_as(structure, flat_sequence, expand_composites)
@tf_export("nest.map_structure")
def map_structure(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain results with the same structure layout.
Args:
func: A callable that accepts as many arguments as there are structures.
*structure: scalar, or tuple or list of constructed scalars and/or other
tuples/lists, or scalars. Note: numpy arrays are considered as scalars.
**kwargs: Valid keyword args are:
* `check_types`: If set to `True` (default) the types of
iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Note that namedtuples with identical name and fields are always
considered to have the same shallow structure.
* `expand_composites`: If set to `True`, then composite tensors such
as `tf.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors. If `False` (the default), then composite tensors
are not expanded.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
check_types = kwargs.pop("check_types", True)
expand_composites = kwargs.pop("expand_composites", False)
if kwargs:
raise ValueError(
"Only valid keyword arguments are `check_types` and "
"`expand_composites`, not: `%s`" % ("`, `".join(kwargs.keys())))
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types,
expand_composites=expand_composites)
flat_structure = [flatten(s, expand_composites) for s in structure]
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries],
expand_composites=expand_composites)
def map_structure_with_paths(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(path, x[0], x[1], ..., **kwargs)` where x[i] is an entry in
`structure[i]` and `path` is the common path to x[i] in the structures. All
structures in `structure` must have the same arity, and the return value will
contain the results with the same structure layout. Special kwarg
`check_types` determines whether the types of iterables within the structure
must be the same-- see **kwargs definition below.
Args:
func: A callable with the signature func(path, *values, **kwargs) that is
evaluated on the leaves of the structure.
*structure: A variable number of compatible structures to process.
**kwargs: Optional kwargs to be passed through to func. Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.,
`map_structure(func, [1], (1,))` raises a `TypeError` exception). By
default, the types must match. To allow iteration over structures of
different types (but common arity), set this kwarg to `False`.
Returns:
A structure of the same form as the input structures whose leaves are the
result of evaluating func on corresponding leaves of the input structures.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
TypeError: If `check_types` is not `False` and the two structures differ in
the type of sequence in any of their substructures.
ValueError: If no structures are provided.
"""
def wrapper_func(tuple_path, *inputs, **kwargs):
string_path = "/".join(str(s) for s in tuple_path)
return func(string_path, *inputs, **kwargs)
return map_structure_with_tuple_paths_up_to(structure[0],
wrapper_func,
*structure,
**kwargs)
def map_structure_with_tuple_paths(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(tuple_path, x[0], x[1], ..., **kwargs)` where `x[i]` is an entry
in `structure[i]` and `tuple_path` is a tuple of indices and/or dictionary
keys (as returned by `nest.yield_flat_paths`), which uniquely specifies the
common path to x[i] in the structures. All structures in `structure` must have
the same arity, and the return value will contain the results in the same
structure. Special kwarg `check_types` determines whether the types of
iterables within the structure must be the same-- see **kwargs definition
below.
Args:
func: A callable with the signature `func(tuple_path, *values, **kwargs)`
that is evaluated on the leaves of the structure.
*structure: A variable number of compatible structures to process.
**kwargs: Optional kwargs to be passed through to func. Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Returns:
A structure of the same form as the input structures whose leaves are the
result of evaluating func on corresponding leaves of the input structures.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
TypeError: If `check_types` is not `False` and the two structures differ in
the type of sequence in any of their substructures.
ValueError: If no structures are provided.
"""
return map_structure_with_tuple_paths_up_to(structure[0],
func,
*structure,
**kwargs)
def _yield_flat_up_to(shallow_tree, input_tree, is_seq, path=()):
"""Yields (path, value) pairs of input_tree flattened up to shallow_tree.
Args:
shallow_tree: Nested structure. Traverse no further than its leaf nodes.
input_tree: Nested structure. Return the paths and values from this tree.
Must have the same upper structure as shallow_tree.
is_seq: Function used to test if a value should be treated as a sequence.
path: Tuple. Optional argument, only used when recursing. The path from the
root of the original shallow_tree, down to the root of the shallow_tree
arg of this recursive call.
Yields:
Pairs of (path, value), where path the tuple path of a leaf node in
shallow_tree, and value is the value of the corresponding node in
input_tree.
"""
if not is_seq(shallow_tree):
yield (path, input_tree)
else:
input_tree = dict(_yield_sorted_items(input_tree))
for shallow_key, shallow_subtree in _yield_sorted_items(shallow_tree):
subpath = path + (shallow_key,)
input_subtree = input_tree[shallow_key]
for leaf_path, leaf_value in _yield_flat_up_to(shallow_subtree,
input_subtree, is_seq,
path=subpath):
yield (leaf_path, leaf_value)
def assert_shallow_structure(shallow_tree,
input_tree,
check_types=True,
expand_composites=False):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = {"a": "A", "b": "B"}
input_tree = {"a": 1, "c": 2}
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same. Note that even with check_types==True,
this function will consider two different namedtuple classes with the same
name and _fields attribute to be the same class.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
if is_seq(shallow_tree):
if not is_seq(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if isinstance(shallow_tree, _wrapt.ObjectProxy):
shallow_type = type(shallow_tree.__wrapped__)
else:
shallow_type = type(shallow_tree)
if check_types and not isinstance(input_tree, shallow_type):
# Duck-typing means that nest should be fine with two different
# namedtuples with identical name and fields.
shallow_is_namedtuple = _is_namedtuple(shallow_tree, False)
input_is_namedtuple = _is_namedtuple(input_tree, False)
if shallow_is_namedtuple and input_is_namedtuple:
if not _same_namedtuples(shallow_tree, input_tree):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
elif ((_is_composite_tensor(shallow_tree) or
_is_composite_tensor(input_tree)) and
(_is_type_spec(shallow_tree) or _is_type_spec(input_tree))):
pass # Compatibility will be checked below.
elif not (isinstance(shallow_tree, _collections_abc.Mapping) and
isinstance(input_tree, _collections_abc.Mapping)):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
if _is_composite_tensor(shallow_tree) or _is_composite_tensor(input_tree):
if not (
(_is_composite_tensor(input_tree) or _is_type_spec(input_tree)) and
(_is_composite_tensor(shallow_tree) or _is_type_spec(shallow_tree))):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
type_spec_1 = (shallow_tree if _is_type_spec(shallow_tree) else
shallow_tree._type_spec) # pylint: disable=protected-access
type_spec_2 = (input_tree if _is_type_spec(input_tree) else
input_tree._type_spec) # pylint: disable=protected-access
try:
_ = type_spec_1.most_specific_compatible_type(type_spec_2)
except (TypeError, ValueError) as e:
raise ValueError(
"Incompatible CompositeTensor TypeSpecs: %s vs. %s -- %s" %
(type_spec_1, type_spec_2, e))
elif _is_type_spec(shallow_tree):
if not _is_type_spec(input_tree):
raise TypeError("If shallow structure is a TypeSpec, input must also "
"be a TypeSpec. Input has type: %s."
% type(input_tree))
else:
if len(input_tree) != len(shallow_tree):
raise ValueError(
_STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree)))
elif len(input_tree) < len(shallow_tree):
raise ValueError(
_INPUT_TREE_SMALLER_THAN_SHALLOW_TREE.format(
input_size=len(input_tree), shallow_size=len(shallow_tree)))
if isinstance(shallow_tree, _collections_abc.Mapping):
absent_keys = set(shallow_tree) - set(input_tree)
if absent_keys:
raise ValueError(_SHALLOW_TREE_HAS_INVALID_KEYS
.format(sorted(absent_keys)))
for shallow_branch, input_branch in zip(_yield_value(shallow_tree),
_yield_value(input_tree)):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types,
expand_composites=expand_composites)
def flatten_up_to(shallow_tree, input_tree, check_types=True,
expand_composites=False):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure layout
as `shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
check_types: bool. If True, check that each node in shallow_tree has the
same type as the corresponding node in input_tree.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
assert_shallow_structure(shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites)
# Discard paths returned by _yield_flat_up_to.
return list(v for _, v in _yield_flat_up_to(shallow_tree, input_tree, is_seq))
def flatten_with_tuple_paths_up_to(shallow_tree,
input_tree,
check_types=True,
expand_composites=False):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flattened output.
Returns a list of (path, value) pairs, where value a leaf node in the
flattened tree, and path is the tuple path of that leaf in input_tree.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[((), input_tree)]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure layout
as `shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_with_tuple_paths_up_to(shallow_tree,
input_tree)
flattened_shallow_tree = flatten_with_tuple_paths_up_to(shallow_tree,
shallow_tree)
# Output is:
# [((0, 0), [2, 2]),
# ((0, 1), [3, 3]),
# ((1, 0), [4, 9]),
# ((1, 1), [5, 5])]
#
# [((0, 0), True),
# ((0, 1), True),
# ((1, 0), False),
# ((1, 1), True)]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [((0, 0), ('a', 1)),
# ((0, 1, 0), ('b', 2)),
# ((0, 1, 1, 0), ('c', 3)),
# ((0, 1, 1, 1), ('d', 4))]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_with_tuple_paths_up_to(0, 0) # Output: [(), 0]
flatten_with_tuple_paths_up_to(0, [0, 1, 2]) # Output: [(), [0, 1, 2]]
flatten_with_tuple_paths_up_to([0, 1, 2], 0) # Output: TypeError
flatten_with_tuple_paths_up_to([0, 1, 2], [0, 1, 2])
# Output: [((0,) 0), ((1,), 1), ((2,), 2)]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
check_types: bool. If True, check that each node in shallow_tree has the
same type as the corresponding node in input_tree.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
assert_shallow_structure(shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites)
return list(_yield_flat_up_to(shallow_tree, input_tree, is_seq))
def map_structure_up_to(shallow_tree, func, *inputs, **kwargs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure layout as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
shallow_tree = [None, None]
inp_val = [1, 2, 3]
out = map_structure_up_to(shallow_tree, lambda x: 2 * x, inp_val)
# Output is: [2, 4]
```
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
**kwargs: kwargs to feed to func(). Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with the same structure layout as
`shallow_tree`.
"""
return map_structure_with_tuple_paths_up_to(
shallow_tree,
lambda _, *values: func(*values), # Discards the path arg.
*inputs,
**kwargs)
def map_structure_with_tuple_paths_up_to(shallow_tree, func, *inputs, **kwargs):
"""Applies a function or op to a number of partially flattened inputs.
Like map_structure_up_to(), except that the 'func' argument takes a path
tuple as its first argument, followed by the corresponding values from
*inputs.
Example:
```python
lowercase = {'a': 'a', 'b': ('b0', 'b1')}
uppercase = {'a': 'A', 'b': ('B0', 'B1')}
def print_path_and_values(path, *values):
print("path: {}, values: {}".format(path, values))
shallow_tree = {'a': None}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase)
path: ('a',), values: ('a', 'A')
path: ('b', 0), values: ('b0', 'B0')
path: ('b', 1), values: ('b1', 'B1')
shallow_tree = {'b': None}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase,
check_types=False)
path: ('b', 1), values: (('bo', 'b1'), ('B0', 'B1'))
shallow_tree = {'a': None, 'b': {1: None}}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase,
check_types=False)
path: ('a',), values: ('a', 'A')
path: ('b', 1), values: ('b1', B1')
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable that takes args (path, inputs_0_value, ... , inputs_N_value),
where path is a tuple path to a leaf node in shallow_tree, and
inputs_i_value is the corresponding value from inputs[i].
*inputs: nested structures that are all structurally compatible with
shallow_tree.
**kwargs: kwargs to feed to func(). Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Raises:
TypeError: If `shallow_tree` is a sequence but one of `*inputs` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
Result of repeatedly applying `func`. Has the same structure layout as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
check_types = kwargs.pop("check_types", True)
expand_composites = kwargs.pop("expand_composites", False)
is_seq = is_sequence_or_composite if expand_composites else is_sequence
for input_tree in inputs:
assert_shallow_structure(
shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
flat_value_lists = [
flatten_up_to( # pylint: disable=g-complex-comprehension
shallow_tree,
input_tree,
check_types,
expand_composites=expand_composites) for input_tree in inputs
]
flat_path_list = [path for path, _
in _yield_flat_up_to(shallow_tree, inputs[0], is_seq)]
results = [func(*args, **kwargs) for args in zip(flat_path_list,
*flat_value_lists)]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results,
expand_composites=expand_composites)
def get_traverse_shallow_structure(traverse_fn, structure,
expand_composites=False):
"""Generates a shallow structure from a `traverse_fn` and `structure`.
`traverse_fn` must accept any possible subtree of `structure` and return
a depth=1 structure containing `True` or `False` values, describing which
of the top-level subtrees may be traversed. It may also
return scalar `True` or `False` "traversal is OK / not OK for all subtrees."
Examples are available in the unit tests (nest_test.py).
Args:
traverse_fn: Function taking a substructure and returning either a scalar
`bool` (whether to traverse that substructure or not) or a depth=1
shallow structure of the same type, describing which parts of the
substructure to traverse.
structure: The structure to traverse.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Returns:
A shallow structure containing python bools, which can be passed to
`map_structure_up_to` and `flatten_up_to`.
Raises:
TypeError: if `traverse_fn` returns a sequence for a non-sequence input,
or a structure with depth higher than 1 for a sequence input,
or if any leaf values in the returned structure or scalar are not type
`bool`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
to_traverse = traverse_fn(structure)
if not is_seq(structure):
if not isinstance(to_traverse, bool):
raise TypeError("traverse_fn returned structure: %s for non-structure: %s"
% (to_traverse, structure))
return to_traverse
level_traverse = []
if isinstance(to_traverse, bool):
if not to_traverse:
# Do not traverse this substructure at all. Exit early.
return False
else:
# Traverse the entire substructure.
for branch in _yield_value(structure):
level_traverse.append(
get_traverse_shallow_structure(traverse_fn, branch,
expand_composites=expand_composites))
elif not is_seq(to_traverse):
raise TypeError("traverse_fn returned a non-bool scalar: %s for input: %s"
% (to_traverse, structure))
else:
# Traverse some subset of this substructure.
assert_shallow_structure(to_traverse, structure,
expand_composites=expand_composites)
for t, branch in zip(_yield_value(to_traverse),
_yield_value(structure)):
if not isinstance(t, bool):
raise TypeError(
"traverse_fn didn't return a depth=1 structure of bools. saw: %s "
" for structure: %s" % (to_traverse, structure))
if t:
level_traverse.append(
get_traverse_shallow_structure(traverse_fn, branch))
else:
level_traverse.append(False)
return _sequence_like(structure, level_traverse)
def yield_flat_paths(nest, expand_composites=False):
"""Yields paths for some nested structure.
Paths are lists of objects which can be str-converted, which may include
integers or other types which are used as indices in a dict.
The flat list will be in the corresponding order as if you called
`nest.flatten` on the structure. This is handy for naming Tensors such
the TF scope structure matches the tuple structure.
E.g. if we have a tuple `value = Foo(a=3, b=Bar(c=23, d=42))`
```shell
nest.flatten(value)
[3, 23, 42]
list(nest.yield_flat_paths(value))
[('a',), ('b', 'c'), ('b', 'd')]
```
```shell
list(nest.yield_flat_paths({'a': [3]}))
[('a', 0)]
list(nest.yield_flat_paths({'a': 3}))
[('a',)]
```
Args:
nest: the value to produce a flattened paths list for.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Yields:
Tuples containing index or key values which form the path to a specific
leaf value in the nested structure.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
for k, _ in _yield_flat_up_to(nest, nest, is_seq):
yield k
def flatten_with_joined_string_paths(structure, separator="/",
expand_composites=False):
"""Returns a list of (string path, data element) tuples.
The order of tuples produced matches that of `nest.flatten`. This allows you
to flatten a nested structure while keeping information about where in the
structure each data element was located. See `nest.yield_flat_paths`
for more information.
Args:
structure: the nested structure to flatten.
separator: string to separate levels of hierarchy in the results, defaults
to '/'.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Returns:
A list of (string, data element) tuples.
"""
flat_paths = yield_flat_paths(structure, expand_composites=expand_composites)
def stringify_and_join(path_elements):
return separator.join(str(path_element) for path_element in path_elements)
flat_string_paths = [stringify_and_join(path) for path in flat_paths]
return list(zip(flat_string_paths,
flatten(structure, expand_composites=expand_composites)))
def flatten_with_tuple_paths(structure, expand_composites=False):
"""Returns a list of `(tuple_path, leaf_element)` tuples.
The order of pairs produced matches that of `nest.flatten`. This allows you
to flatten a nested structure while keeping information about where in the
structure each data element was located. See `nest.yield_flat_paths`
for more information about tuple paths.
Args:
structure: the nested structure to flatten.
expand_composites: If true, then composite tensors such as tf.SparseTensor
and tf.RaggedTensor are expanded into their component tensors.
Returns:
A list of `(tuple_path, leaf_element)` tuples. Each `tuple_path` is a tuple
of indices and/or dictionary keys that uniquely specify the path to
`leaf_element` within `structure`.
"""
return list(zip(yield_flat_paths(structure,
expand_composites=expand_composites),
flatten(structure, expand_composites=expand_composites)))
def _list_to_tuple(structure):
"""Replace all lists with tuples.
The fork of nest that tf.data uses treats lists as single elements, while
tf.nest treats them as structures to recurse into. Keras has chosen to adopt
the latter convention, and must therefore deeply replace all lists with tuples
before passing structures to Dataset.from_generator.
Args:
structure: A nested structure to be remapped.
Returns:
structure mapped to replace all lists with tuples.
"""
def sequence_fn(instance, args):
if isinstance(instance, list):
return tuple(args)
return _sequence_like(instance, args)
return _pack_sequence_as(structure, flatten(structure), False,
sequence_fn=sequence_fn)
# TODO(b/143287251): Only have `list_to_tuple`
list_to_tuple = _list_to_tuple
_pywrap_utils.RegisterType("Mapping", _collections_abc.Mapping)
_pywrap_utils.RegisterType("Sequence", _collections_abc.Sequence)
_pywrap_utils.RegisterType("MappingView", _collections_abc.MappingView)
_pywrap_utils.RegisterType("ObjectProxy", _wrapt.ObjectProxy)
| {
"content_hash": "79e3aac01ceae2cb0e2eb5bbbc6f9140",
"timestamp": "",
"source": "github",
"line_count": 1375,
"max_line_length": 105,
"avg_line_length": 38.99854545454546,
"alnum_prop": 0.6557820338287675,
"repo_name": "jhseu/tensorflow",
"id": "211e0cc5997f2e5bbd853f4444ab94a6198a49a0",
"size": "54313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/util/nest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "27480"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "875455"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "80051513"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112748"
},
{
"name": "Go",
"bytes": "1853641"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1729057"
},
{
"name": "Makefile",
"bytes": "62498"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "304661"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "19515"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "36791185"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "56741"
},
{
"name": "Shell",
"bytes": "685877"
},
{
"name": "Smarty",
"bytes": "35147"
},
{
"name": "Starlark",
"bytes": "3504187"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import json
import six
from st2common.constants import action as action_constants
from st2common import log as logging
from st2common.persistence import action as action_access
from st2common.services import action as action_service
from st2common.policies.concurrency import BaseConcurrencyApplicator
from st2common.services import coordination
__all__ = [
'ConcurrencyByAttributeApplicator'
]
LOG = logging.getLogger(__name__)
class ConcurrencyByAttributeApplicator(BaseConcurrencyApplicator):
def __init__(self, policy_ref, policy_type, threshold=0, action='delay', attributes=None):
super(ConcurrencyByAttributeApplicator, self).__init__(policy_ref=policy_ref,
policy_type=policy_type,
threshold=threshold,
action=action)
self.attributes = attributes or []
def _get_lock_uid(self, target):
meta = {
'policy_type': self._policy_type,
'action': target.action,
'attributes': self.attributes
}
return json.dumps(meta)
def _get_filters(self, target):
filters = {('parameters__%s' % k): v
for k, v in six.iteritems(target.parameters)
if k in self.attributes}
filters['action'] = target.action
filters['status'] = None
return filters
def _apply_before(self, target):
# Get the count of scheduled and running instances of the action.
filters = self._get_filters(target)
# Get the count of scheduled instances of the action.
filters['status'] = action_constants.LIVEACTION_STATUS_SCHEDULED
scheduled = action_access.LiveAction.count(**filters)
# Get the count of running instances of the action.
filters['status'] = action_constants.LIVEACTION_STATUS_RUNNING
running = action_access.LiveAction.count(**filters)
count = scheduled + running
# Mark the execution as scheduled if threshold is not reached or delayed otherwise.
if count < self.threshold:
LOG.debug('There are %s instances of %s in scheduled or running status. '
'Threshold of %s is not reached. Action execution will be scheduled.',
count, target.action, self._policy_ref)
status = action_constants.LIVEACTION_STATUS_SCHEDULED
else:
action = 'delayed' if self.policy_action == 'delay' else 'canceled'
LOG.debug('There are %s instances of %s in scheduled or running status. '
'Threshold of %s is reached. Action execution will be %s.',
count, target.action, self._policy_ref, action)
status = self._get_status_for_policy_action(action=self.policy_action)
# Update the status in the database. Publish status for cancellation so the
# appropriate runner can cancel the execution. Other statuses are not published
# because they will be picked up by the worker(s) to be processed again,
# leading to duplicate action executions.
publish = (status == action_constants.LIVEACTION_STATUS_CANCELING)
target = action_service.update_status(target, status, publish=publish)
return target
def apply_before(self, target):
# Exit if target not in schedulable state.
if target.status != action_constants.LIVEACTION_STATUS_REQUESTED:
LOG.debug('The live action is not schedulable therefore the policy '
'"%s" cannot be applied. %s', self._policy_ref, target)
return target
# Warn users that the coordination service is not configured.
if not coordination.configured():
LOG.warn('Coordination service is not configured. Policy enforcement is best effort.')
# Acquire a distributed lock before querying the database to make sure that only one
# scheduler is scheduling execution for this action. Even if the coordination service
# is not configured, the fake driver using zake or the file driver can still acquire
# a lock for the local process or server respectively.
lock_uid = self._get_lock_uid(target)
LOG.debug('%s is attempting to acquire lock "%s".', self.__class__.__name__, lock_uid)
with self.coordinator.get_lock(lock_uid):
target = self._apply_before(target)
return target
def _apply_after(self, target):
# Schedule the oldest delayed executions.
filters = self._get_filters(target)
filters['status'] = action_constants.LIVEACTION_STATUS_DELAYED
requests = action_access.LiveAction.query(order_by=['start_timestamp'], limit=1, **filters)
if requests:
action_service.update_status(
requests[0], action_constants.LIVEACTION_STATUS_REQUESTED, publish=True)
def apply_after(self, target):
# Warn users that the coordination service is not configured.
if not coordination.configured():
LOG.warn('Coordination service is not configured. Policy enforcement is best effort.')
# Acquire a distributed lock before querying the database to make sure that only one
# scheduler is scheduling execution for this action. Even if the coordination service
# is not configured, the fake driver using zake or the file driver can still acquire
# a lock for the local process or server respectively.
lock_uid = self._get_lock_uid(target)
LOG.debug('%s is attempting to acquire lock "%s".', self.__class__.__name__, lock_uid)
with self.coordinator.get_lock(lock_uid):
self._apply_after(target)
return target
| {
"content_hash": "81458dcf9730b2c40bd5f96973633d59",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 99,
"avg_line_length": 45.859375,
"alnum_prop": 0.6417376490630323,
"repo_name": "peak6/st2",
"id": "cf04735b77679c3df784c152137246ab1498f9a1",
"size": "6650",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "st2actions/st2actions/policies/concurrency_by_attr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "42545"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "4012891"
},
{
"name": "Shell",
"bytes": "41016"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
import array
import sys
if sys.version > '3':
basestring = str
xrange = range
unicode = str
from abc import ABCMeta
import copy
import numpy as np
from py4j.java_gateway import JavaObject
from pyspark.ml.linalg import DenseVector, Vector
from pyspark.ml.util import Identifiable
__all__ = ['Param', 'Params', 'TypeConverters']
class Param(object):
"""
A param with self-contained documentation.
.. versionadded:: 1.3.0
"""
def __init__(self, parent, name, doc, typeConverter=None):
if not isinstance(parent, Identifiable):
raise TypeError("Parent must be an Identifiable but got type %s." % type(parent))
self.parent = parent.uid
self.name = str(name)
self.doc = str(doc)
self.typeConverter = TypeConverters.identity if typeConverter is None else typeConverter
def _copy_new_parent(self, parent):
"""Copy the current param to a new parent, must be a dummy param."""
if self.parent == "undefined":
param = copy.copy(self)
param.parent = parent.uid
return param
else:
raise ValueError("Cannot copy from non-dummy parent %s." % parent)
def __str__(self):
return str(self.parent) + "__" + self.name
def __repr__(self):
return "Param(parent=%r, name=%r, doc=%r)" % (self.parent, self.name, self.doc)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
if isinstance(other, Param):
return self.parent == other.parent and self.name == other.name
else:
return False
class TypeConverters(object):
"""
.. note:: DeveloperApi
Factory methods for common type conversion functions for `Param.typeConverter`.
.. versionadded:: 2.0.0
"""
@staticmethod
def _is_numeric(value):
vtype = type(value)
return vtype in [int, float, np.float64, np.int64] or vtype.__name__ == 'long'
@staticmethod
def _is_integer(value):
return TypeConverters._is_numeric(value) and float(value).is_integer()
@staticmethod
def _can_convert_to_list(value):
vtype = type(value)
return vtype in [list, np.ndarray, tuple, xrange, array.array] or isinstance(value, Vector)
@staticmethod
def _can_convert_to_string(value):
vtype = type(value)
return isinstance(value, basestring) or vtype in [np.unicode_, np.string_, np.str_]
@staticmethod
def identity(value):
"""
Dummy converter that just returns value.
"""
return value
@staticmethod
def toList(value):
"""
Convert a value to a list, if possible.
"""
if type(value) == list:
return value
elif type(value) in [np.ndarray, tuple, xrange, array.array]:
return list(value)
elif isinstance(value, Vector):
return list(value.toArray())
else:
raise TypeError("Could not convert %s to list" % value)
@staticmethod
def toListFloat(value):
"""
Convert a value to list of floats, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return [float(v) for v in value]
raise TypeError("Could not convert %s to list of floats" % value)
@staticmethod
def toListInt(value):
"""
Convert a value to list of ints, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_integer(v), value)):
return [int(v) for v in value]
raise TypeError("Could not convert %s to list of ints" % value)
@staticmethod
def toListString(value):
"""
Convert a value to list of strings, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)):
return [TypeConverters.toString(v) for v in value]
raise TypeError("Could not convert %s to list of strings" % value)
@staticmethod
def toVector(value):
"""
Convert a value to a MLlib Vector, if possible.
"""
if isinstance(value, Vector):
return value
elif TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return DenseVector(value)
raise TypeError("Could not convert %s to vector" % value)
@staticmethod
def toFloat(value):
"""
Convert a value to a float, if possible.
"""
if TypeConverters._is_numeric(value):
return float(value)
else:
raise TypeError("Could not convert %s to float" % value)
@staticmethod
def toInt(value):
"""
Convert a value to an int, if possible.
"""
if TypeConverters._is_integer(value):
return int(value)
else:
raise TypeError("Could not convert %s to int" % value)
@staticmethod
def toString(value):
"""
Convert a value to a string, if possible.
"""
if isinstance(value, basestring):
return value
elif type(value) in [np.string_, np.str_]:
return str(value)
elif type(value) == np.unicode_:
return unicode(value)
else:
raise TypeError("Could not convert %s to string type" % type(value))
@staticmethod
def toBoolean(value):
"""
Convert a value to a boolean, if possible.
"""
if type(value) == bool:
return value
else:
raise TypeError("Boolean Param requires value of type bool. Found %s." % type(value))
class Params(Identifiable):
"""
Components that take parameters. This also provides an internal
param map to store parameter values attached to the instance.
.. versionadded:: 1.3.0
"""
__metaclass__ = ABCMeta
def __init__(self):
super(Params, self).__init__()
#: internal param map for user-supplied values param map
self._paramMap = {}
#: internal param map for default values
self._defaultParamMap = {}
#: value returned by :py:func:`params`
self._params = None
# Copy the params from the class to the object
self._copy_params()
def _copy_params(self):
"""
Copy all params defined on the class to current object.
"""
cls = type(self)
src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)]
src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs))
for name, param in src_params:
setattr(self, name, param._copy_new_parent(self))
@property
def params(self):
"""
Returns all params ordered by name. The default implementation
uses :py:func:`dir` to get all attributes of type
:py:class:`Param`.
"""
if self._params is None:
self._params = list(filter(lambda attr: isinstance(attr, Param),
[getattr(self, x) for x in dir(self) if x != "params" and
not isinstance(getattr(type(self), x, None), property)]))
return self._params
def explainParam(self, param):
"""
Explains a single param and returns its name, doc, and optional
default value and user-supplied value in a string.
"""
param = self._resolveParam(param)
values = []
if self.isDefined(param):
if param in self._defaultParamMap:
values.append("default: %s" % self._defaultParamMap[param])
if param in self._paramMap:
values.append("current: %s" % self._paramMap[param])
else:
values.append("undefined")
valueStr = "(" + ", ".join(values) + ")"
return "%s: %s %s" % (param.name, param.doc, valueStr)
def explainParams(self):
"""
Returns the documentation of all params with their optionally
default values and user-supplied values.
"""
return "\n".join([self.explainParam(param) for param in self.params])
def getParam(self, paramName):
"""
Gets a param by its name.
"""
param = getattr(self, paramName)
if isinstance(param, Param):
return param
else:
raise ValueError("Cannot find param with name %s." % paramName)
def isSet(self, param):
"""
Checks whether a param is explicitly set by user.
"""
param = self._resolveParam(param)
return param in self._paramMap
def hasDefault(self, param):
"""
Checks whether a param has a default value.
"""
param = self._resolveParam(param)
return param in self._defaultParamMap
def isDefined(self, param):
"""
Checks whether a param is explicitly set by user or has
a default value.
"""
return self.isSet(param) or self.hasDefault(param)
def hasParam(self, paramName):
"""
Tests whether this instance contains a param with a given
(string) name.
"""
if isinstance(paramName, str):
p = getattr(self, paramName, None)
return isinstance(p, Param)
else:
raise TypeError("hasParam(): paramName must be a string")
def getOrDefault(self, param):
"""
Gets the value of a param in the user-supplied param map or its
default value. Raises an error if neither is set.
"""
param = self._resolveParam(param)
if param in self._paramMap:
return self._paramMap[param]
else:
return self._defaultParamMap[param]
def extractParamMap(self, extra=None):
"""
Extracts the embedded default param values and user-supplied
values, and then merges them with extra values from input into
a flat param map, where the latter value is used if there exist
conflicts, i.e., with ordering: default param values <
user-supplied values < extra.
:param extra: extra param values
:return: merged param map
"""
if extra is None:
extra = dict()
paramMap = self._defaultParamMap.copy()
paramMap.update(self._paramMap)
paramMap.update(extra)
return paramMap
def copy(self, extra=None):
"""
Creates a copy of this instance with the same uid and some
extra params. The default implementation creates a
shallow copy using :py:func:`copy.copy`, and then copies the
embedded and extra parameters over and returns the copy.
Subclasses should override this method if the default approach
is not sufficient.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
that = copy.copy(self)
that._paramMap = {}
that._defaultParamMap = {}
return self._copyValues(that, extra)
def _shouldOwn(self, param):
"""
Validates that the input param belongs to this Params instance.
"""
if not (self.uid == param.parent and self.hasParam(param.name)):
raise ValueError("Param %r does not belong to %r." % (param, self))
def _resolveParam(self, param):
"""
Resolves a param and validates the ownership.
:param param: param name or the param instance, which must
belong to this Params instance
:return: resolved param instance
"""
if isinstance(param, Param):
self._shouldOwn(param)
return param
elif isinstance(param, str):
return self.getParam(param)
else:
raise ValueError("Cannot resolve %r as a param." % param)
@staticmethod
def _dummy():
"""
Returns a dummy Params instance used as a placeholder to
generate docs.
"""
dummy = Params()
dummy.uid = "undefined"
return dummy
def _set(self, **kwargs):
"""
Sets user-supplied params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None:
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e))
self._paramMap[p] = value
return self
def _clear(self, param):
"""
Clears a param from the param map if it has been explicitly set.
"""
if self.isSet(param):
del self._paramMap[param]
def _setDefault(self, **kwargs):
"""
Sets default params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None and not isinstance(value, JavaObject):
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid default param value given for param "%s". %s'
% (p.name, e))
self._defaultParamMap[p] = value
return self
def _copyValues(self, to, extra=None):
"""
Copies param values from this instance to another instance for
params shared by them.
:param to: the target instance
:param extra: extra params to be copied
:return: the target instance with param values copied
"""
paramMap = self._paramMap.copy()
if extra is not None:
paramMap.update(extra)
for param in self.params:
# copy default params
if param in self._defaultParamMap and to.hasParam(param.name):
to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param]
# copy explicitly set params
if param in paramMap and to.hasParam(param.name):
to._set(**{param.name: paramMap[param]})
return to
def _resetUid(self, newUid):
"""
Changes the uid of this instance. This updates both
the stored uid and the parent uid of params and param maps.
This is used by persistence (loading).
:param newUid: new uid to use, which is converted to unicode
:return: same instance, but with the uid and Param.parent values
updated, including within param maps
"""
newUid = unicode(newUid)
self.uid = newUid
newDefaultParamMap = dict()
newParamMap = dict()
for param in self.params:
newParam = copy.copy(param)
newParam.parent = newUid
if param in self._defaultParamMap:
newDefaultParamMap[newParam] = self._defaultParamMap[param]
if param in self._paramMap:
newParamMap[newParam] = self._paramMap[param]
param.parent = newUid
self._defaultParamMap = newDefaultParamMap
self._paramMap = newParamMap
return self
| {
"content_hash": "16fd0870a1ce899a1908aca9754487b4",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 99,
"avg_line_length": 34.15157894736842,
"alnum_prop": 0.5603501417827641,
"repo_name": "wangyixiaohuihui/spark2-annotation",
"id": "8516289796d48e848829fb6172fa643029823cb7",
"size": "17022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/ml/param/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "33815"
},
{
"name": "Batchfile",
"bytes": "24294"
},
{
"name": "C",
"bytes": "1542"
},
{
"name": "CSS",
"bytes": "23957"
},
{
"name": "HTML",
"bytes": "10012"
},
{
"name": "HiveQL",
"bytes": "1828674"
},
{
"name": "Java",
"bytes": "3737029"
},
{
"name": "JavaScript",
"bytes": "143063"
},
{
"name": "Makefile",
"bytes": "7980"
},
{
"name": "PLpgSQL",
"bytes": "9666"
},
{
"name": "PowerShell",
"bytes": "3751"
},
{
"name": "Python",
"bytes": "2248750"
},
{
"name": "R",
"bytes": "1027534"
},
{
"name": "Roff",
"bytes": "14420"
},
{
"name": "SQLPL",
"bytes": "3603"
},
{
"name": "Scala",
"bytes": "22897473"
},
{
"name": "Shell",
"bytes": "156941"
},
{
"name": "Thrift",
"bytes": "33665"
},
{
"name": "q",
"bytes": "147332"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0022_auto_20190519_1105'),
]
operations = [
migrations.CreateModel(
name='TermsOfService',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Publiseringsdato')),
],
),
migrations.RemoveField(
model_name='profile',
name='tos_accepted',
),
migrations.AddField(
model_name='profile',
name='accepted_tos',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='userprofile.TermsOfService', verbose_name='Seneste aksepterte TOS'),
),
]
| {
"content_hash": "969cdf74d6d46807b3a31284eeb928f5",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 177,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.6019230769230769,
"repo_name": "hackerspace-ntnu/website",
"id": "311bf6ecdef7ab985658b0ee930d2c22dd49ae44",
"size": "1089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userprofile/migrations/0023_auto_20200205_2055.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16771"
},
{
"name": "HTML",
"bytes": "235369"
},
{
"name": "JavaScript",
"bytes": "43249"
},
{
"name": "Python",
"bytes": "323186"
}
],
"symlink_target": ""
} |
import os
import tarfile
import uuid
from datetime import datetime, timedelta
from io import BytesIO
import pytest
import yaml
from indico.core.db.sqlalchemy.links import LinkType
from indico.modules.attachments.util import get_attached_items
from indico.modules.events.contributions import Contribution
from indico.modules.events.export import export_event, import_event
from indico.modules.events.sessions import Session
from indico.util.date_time import as_utc
class _MockUUID:
def __init__(self):
self.counter = 0
def uuid4(self):
u = uuid.UUID(int=self.counter, version=4)
self.counter += 1
return u
@pytest.fixture
def reproducible_uuids(monkeypatch):
muid = _MockUUID()
monkeypatch.setattr('indico.modules.events.export.uuid4', muid.uuid4)
@pytest.fixture
def static_indico_version(monkeypatch):
monkeypatch.setattr('indico.__version__', '1.3.3.7')
@pytest.mark.usefixtures('reproducible_uuids', 'static_indico_version')
def test_event_export(db, dummy_event, monkeypatch):
monkeypatch.setattr('indico.modules.events.export.now_utc', lambda: as_utc(datetime(2017, 8, 24, 9, 0, 0)))
f = BytesIO()
dummy_event.created_dt = as_utc(datetime(2017, 8, 24, 0, 0, 0))
dummy_event.start_dt = as_utc(datetime(2017, 8, 24, 10, 0, 0))
dummy_event.end_dt = as_utc(datetime(2017, 8, 24, 12, 0, 0))
s = Session(event=dummy_event, title='sd', is_deleted=True)
Contribution(event=dummy_event, title='c1', duration=timedelta(minutes=30))
Contribution(event=dummy_event, title='c2', session=s, duration=timedelta(minutes=30), is_deleted=True)
db.session.flush()
export_event(dummy_event, f)
f.seek(0)
with open(os.path.join(os.path.dirname(__file__), 'export_test_1.yaml')) as ref_file:
data_yaml_content = ref_file.read()
# check composition of tarfile and data.yaml content
with tarfile.open(fileobj=f) as tarf:
assert tarf.getnames() == ['data.yaml']
assert tarf.extractfile('data.yaml').read().decode() == data_yaml_content
@pytest.mark.usefixtures('reproducible_uuids')
def test_event_attachment_export(db, dummy_event, dummy_attachment):
s = Session(event=dummy_event, title='sd', is_deleted=True)
Contribution(event=dummy_event, title='c1', duration=timedelta(minutes=30))
Contribution(event=dummy_event, title='c2', session=s, duration=timedelta(minutes=30), is_deleted=True)
dummy_attachment.folder.event = dummy_event
dummy_attachment.folder.linked_event = dummy_event
dummy_attachment.folder.link_type = LinkType.event
dummy_attachment.file.save(BytesIO(b'hello world'))
db.session.flush()
f = BytesIO()
export_event(dummy_event, f)
f.seek(0)
with tarfile.open(fileobj=f) as tarf:
data_file = tarf.extractfile('data.yaml')
data = yaml.unsafe_load(data_file)
objs = data['objects']
event_uid = objs[0][1]['id'][1]
# check that the exported metadata contains all the right objects
assert [obj[0] for obj in objs] == ['events.events', 'events.sessions', 'events.contributions',
'events.contributions', 'attachments.folders', 'attachments.attachments',
'attachments.files']
# check that the attached file's metadata is included
assert objs[5][1]['title'] == 'dummy_attachment'
assert objs[5][1]['folder_id'] is not None
assert objs[4][1]['title'] == 'dummy_folder'
assert objs[4][1]['linked_event_id'][1] == event_uid
file_ = objs[6][1]['__file__'][1]
assert file_['filename'] == 'dummy_file.txt'
assert file_['content_type'] == 'text/plain'
assert file_['size'] == 11
assert file_['md5'] == '5eb63bbbe01eeed093cb22bb8f5acdc3'
# check that the file itself was included (and verify content)
assert tarf.getnames() == ['00000000-0000-4000-8000-000000000013', 'data.yaml']
assert tarf.extractfile('00000000-0000-4000-8000-000000000013').read() == b'hello world'
@pytest.mark.usefixtures('static_indico_version')
def test_event_import(db, dummy_user):
with open(os.path.join(os.path.dirname(__file__), 'export_test_2.yaml')) as ref_file:
data_yaml_content = ref_file.read()
data_yaml = BytesIO(data_yaml_content.encode())
tar_buffer = BytesIO()
# User should be matched by e-mail
dummy_user.email = '1337@example.com'
db.session.flush()
# create a tar file artificially, using the provided YAML
with tarfile.open(mode='w', fileobj=tar_buffer) as tarf:
tar_info = tarfile.TarInfo('data.yaml')
tar_info.size = len(data_yaml_content)
tarf.addfile(tar_info, data_yaml)
tar_info = tarfile.TarInfo('00000000-0000-4000-8000-00000000001c')
tar_info.size = 11
tarf.addfile(tar_info, BytesIO(b'hello world'))
tar_buffer.seek(0)
e = import_event(tar_buffer, create_users=False)
# Check that event metadata is fine
assert e.title == 'dummy#0'
assert e.creator == dummy_user
assert e.created_dt == as_utc(datetime(2017, 8, 24, 15, 28, 42, 652626))
assert e.start_dt == as_utc(datetime(2017, 8, 24, 10, 0, 0))
assert e.end_dt == as_utc(datetime(2017, 8, 24, 12, 0, 0))
# Check that attachment metadata is fine
assert get_attached_items(e)['files'] == []
folder = get_attached_items(e)['folders'][0]
assert folder.title == 'dummy_folder'
attachment = folder.attachments[0]
assert attachment.title == 'dummy_attachment'
# Check that the actual file is accessible
assert attachment.file.open().read() == b'hello world'
| {
"content_hash": "ef08ca34256e9405611b02be7bb3d500",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 117,
"avg_line_length": 40.09154929577465,
"alnum_prop": 0.6641489548568418,
"repo_name": "DirkHoffmann/indico",
"id": "7836950538e2999adf8d73330990f08a5123eb4f",
"size": "5907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/events/export_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33249"
},
{
"name": "HTML",
"bytes": "1398354"
},
{
"name": "JavaScript",
"bytes": "2295843"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5426206"
},
{
"name": "SCSS",
"bytes": "496904"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23435"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.notificationhubs import NotificationHubsManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-notificationhubs
# USAGE
python name_space_authorization_rule_create.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NotificationHubsManagementClient(
credential=DefaultAzureCredential(),
subscription_id="29cfa613-cbbc-4512-b1d6-1b3a92c7fa40",
)
response = client.namespaces.create_or_update_authorization_rule(
resource_group_name="5ktrial",
namespace_name="nh-sdk-ns",
authorization_rule_name="sdk-AuthRules-1788",
parameters={"properties": {"rights": ["Listen", "Send"]}},
)
print(response)
# x-ms-original-file: specification/notificationhubs/resource-manager/Microsoft.NotificationHubs/stable/2017-04-01/examples/Namespaces/NHNameSpaceAuthorizationRuleCreate.json
if __name__ == "__main__":
main()
| {
"content_hash": "f2afd71f02f2592f1124e4315842e62e",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 174,
"avg_line_length": 37.94285714285714,
"alnum_prop": 0.7394578313253012,
"repo_name": "Azure/azure-sdk-for-python",
"id": "517dfb8d7ad7720d19784fd77015e2918b7651aa",
"size": "1796",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/notificationhubs/azure-mgmt-notificationhubs/generated_samples/name_space_authorization_rule_create.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import numpy as np
import cv2
import cvui
WINDOW_NAME = 'Mouse'
def main():
frame = np.zeros((300, 600, 3), np.uint8)
# Init cvui and tell it to create a OpenCV window, i.e. cv::namedWindow(WINDOW_NAME).
cvui.init(WINDOW_NAME);
# Rectangle to be rendered according to mouse interactions.
rectangle = cvui.Rect(0, 0, 0, 0)
while (True):
# Fill the frame with a nice color
frame[:] = (49, 52, 49)
# Show the coordinates of the mouse pointer on the screen
cvui.text(frame, 10, 30, 'Click (any) mouse button and drag the pointer around to select an area.')
cvui.printf(frame, 10, 50, 'Mouse pointer is at (%d,%d)', cvui.mouse().x, cvui.mouse().y)
# The function "bool cvui.mouse(int query)" allows you to query the mouse for events.
# E.g. cvui.mouse(cvui.DOWN)
#
# Available queries:
# - cvui.DOWN: any mouse button was pressed. cvui.mouse() returns true for a single frame only.
# - cvui.UP: any mouse button was released. cvui.mouse() returns true for a single frame only.
# - cvui.CLICK: any mouse button was clicked (went down then up, no matter the amount of frames in between). cvui.mouse() returns true for a single frame only.
# - cvui.IS_DOWN: any mouse button is currently pressed. cvui.mouse() returns true for as long as the button is down/pressed.
# Did any mouse button go down?
if cvui.mouse(cvui.DOWN):
# Position the rectangle at the mouse pointer.
rectangle.x = cvui.mouse().x
rectangle.y = cvui.mouse().y
# Is any mouse button down (pressed)?
if cvui.mouse(cvui.IS_DOWN):
# Adjust rectangle dimensions according to mouse pointer
rectangle.width = cvui.mouse().x - rectangle.x
rectangle.height = cvui.mouse().y - rectangle.y
# Show the rectangle coordinates and size
cvui.printf(frame, rectangle.x + 5, rectangle.y + 5, 0.3, 0xff0000, '(%d,%d)', rectangle.x, rectangle.y)
cvui.printf(frame, cvui.mouse().x + 5, cvui.mouse().y + 5, 0.3, 0xff0000, 'w:%d, h:%d', rectangle.width, rectangle.height)
# Did any mouse button go up?
if cvui.mouse(cvui.UP):
# Hide the rectangle
rectangle.x = 0
rectangle.y = 0
rectangle.width = 0
rectangle.height = 0
# Was the mouse clicked (any button went down then up)?
if cvui.mouse(cvui.CLICK):
cvui.text(frame, 10, 70, 'Mouse was clicked!')
# Render the rectangle
cvui.rect(frame, rectangle.x, rectangle.y, rectangle.width, rectangle.height, 0xff0000)
# This function must be called *AFTER* all UI components. It does
# all the behind the scenes magic to handle mouse clicks, etc, then
# shows the frame in a window like cv2.imshow() does.
cvui.imshow(WINDOW_NAME, frame)
# Check if ESC key was pressed
if cv2.waitKey(20) == 27:
break
if __name__ == '__main__':
main() | {
"content_hash": "a79457578de052edd468fea3c7a11ecb",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 161,
"avg_line_length": 37.08108108108108,
"alnum_prop": 0.6887755102040817,
"repo_name": "Dovyski/cvui",
"id": "471a5ab67583ea3cc043e2a1f1dca304264776f3",
"size": "2995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/src/mouse/mouse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "191997"
},
{
"name": "CMake",
"bytes": "17560"
},
{
"name": "Python",
"bytes": "168294"
},
{
"name": "Shell",
"bytes": "2665"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import unittest
import numa
class NumaTestCast(unittest.TestCase):
def test_available(self):
self.assertEqual(True, numa.available())
def test_node_size(self):
for node in range(numa.get_max_node()+1):
print('Node: %d, size: %r' % (node, numa.get_node_size(node)))
def test_preferred(self):
print('Preferred node:', numa.get_preferred())
def test_node_to_cpus(self):
print('Node CPUs:', numa.node_to_cpus(numa.get_preferred()))
def test_nodemask(self):
if not hasattr(numa, 'set_to_numa_nodemask'):
raise unittest.SkipTest("skipped for Cython")
self.assertEqual(set([0]), numa.numa_nodemask_to_set(numa.set_to_numa_nodemask(set([0]))))
def test_interleave(self):
numa.set_interleave_mask(set([0]))
self.assertEqual(set([0]), numa.get_interleave_mask())
def test_zz_bind(self):
# conflicts with test_node_to_cpus
numa.bind(set([0]))
def test_set_preferred(self):
numa.set_preferred(0)
def test_localalloc(self):
numa.set_localalloc()
def test_membind(self):
numa.set_membind([0])
self.assertEqual(set([0]), numa.get_membind())
def test_run_on_nodemask(self):
numa.set_run_on_node_mask(set([0]))
self.assertEqual(set([0]), numa.get_run_on_node_mask())
def test_get_distance(self):
self.assertEqual(10, numa.get_distance(0, 0))
def test_affinity(self):
numa.set_affinity(0, set([0]))
self.assertEqual(set([0]), numa.get_affinity(0))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "28e94bc53ec210c123b783440c785b00",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 98,
"avg_line_length": 28.637931034482758,
"alnum_prop": 0.6146899458157736,
"repo_name": "smira/py-numa",
"id": "d4a35e6a8c5920297c32f07a222e1191bedf8fae",
"size": "1711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_ctypes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23449"
}
],
"symlink_target": ""
} |
import os.path
import string
import urllib, re
from datetime import datetime
from xml.dom.minidom import parse, parseString
# Django
from django.core import serializers
from django.conf import settings
from django.db import models
# Methodmint
def pubmed(keywords, latest_query=None):
# Get matching publications from Pubmed service
# We explode the keywords append [TW] for all text-search
# then build a string for the datetime since last update
keywordl = keywords.split(',')
keywordq = '(' + '[TW] '.join(keywordl) + '[TW])' # produce[TW] this[TW] string[TW]
if latest_query == None:
timeq = ''
else:
timeq = ' AND ("%s"[EPDAT] : "3000"[EPDAT])' % latest_query.strftime("%Y/%m/%d")
print "Querying pubmed with: %s %s" % (keywordq, timeq)
f = urllib.urlopen("http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&term=%s %s" % (keywordq, timeq))
# Build DOM for requested data
dom = parse(f)
f.close()
uris = []
if dom:
if dom.getElementsByTagName('Id'):
for item in dom.getElementsByTagName('Id'):
uris.append( 'pmid:%d' % int( item.childNodes[0].data ) )
# Limit max number of subsequent requests;
# we will continue from the oldest found next time
uris = uris[-25:]
return uris
| {
"content_hash": "0f477d46c7e9ce26df52f875ede325ce",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 124,
"avg_line_length": 31.53488372093023,
"alnum_prop": 0.6393805309734514,
"repo_name": "mfitzp/django-golifescience",
"id": "d692e297965937f4e27e5c375c61cea9709d51a6",
"size": "1356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/publications/autoref.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "201813"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup, find_packages
from datapurge import __version__ as version
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
try:
from pypandoc import convert
def read_md(f):
return convert(f, 'rst')
except ImportError:
print(
"warning: pypandoc module not found, could not convert Markdown to RST"
)
def read_md(f):
return open(f, 'r').read() # noqa
setup(
name='django-datapurge',
version=version,
packages=find_packages(),
include_package_data=True,
license='BSD License',
description=(
'A simple Django app to easily handle '
'cleanup of old data (sessions, nonces, etc.)'
),
long_description=read_md('README.md'),
url='https://github.com/swistakm/django-datapurge',
author = 'Michał Jaworski',
author_email = 'swistakm@gmail.com',
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.4',
'Framework :: Django :: 1.5',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
test_suite='datapurge.runtests.runtests.main',
)
| {
"content_hash": "7fff7057e2f4ee86f17835563bd1f0ab",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 79,
"avg_line_length": 30.596774193548388,
"alnum_prop": 0.5951502372166578,
"repo_name": "swistakm/django-datapurge",
"id": "6d0cdd47fb00a4df4bc8985d63eac6e00162dee5",
"size": "1922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "16059"
}
],
"symlink_target": ""
} |
''' Operation tests '''
from xml.etree import ElementTree
from datetime import date, timedelta
from django.contrib.auth.models import User
from django.test import Client, TestCase
from lawe.models import Account, Transaction
class TestOperations(TestCase):
''' Тестирование операций с применением API '''
# @todo #70:15min Этот класс тестирует View, но называется некорректно
# и его надо как-то упорядочить с тем кассом, который идет ниже.
# Я его изначально неправильно назвал.
# Но возможно здесь не все относится к View
def setUp(self):
self.user = User.objects.create_user('john', 'password')
self.client = Client()
self.client.force_login(self.user)
def testPageOk(self):
''' Тестируем наличие url '''
# Given
# When
response = self.client.get('/')
# Then
self.assertEqual(response.status_code, 200)
def testPageContainLastOperation(self):
''' Последняя введенная операция отображается в окне операций '''
# Given
a1 = Account.objects.create()
a1.allow_users.add(self.user)
a2 = Account.objects.create()
a2.allow_users.add(self.user)
Transaction.objects.create(debit=a1, credit=a2, amount=432, description='')
# When
response = self.client.get('/')
# Then
self.assertEqual(response.status_code, 200)
self.assertIn('432', response.content.decode('utf8'))
def testForbiddenCreditPost(self):
''' Запрет создания операций, если нет доступа к счету дебета '''
# Given
a1 = Account.objects.create()
a1.allow_users.add(self.user)
a2 = Account.objects.create()
# When
response = self.client.post('/', {
'date': 0,
'debit_id': a1.id,
'credit_id': a2.id,
'amount': 1,
'unit': 'RUB',
'description': 'Проверка запрета'
})
# Then
self.assertEqual(response.status_code, 403)
def testForbiddenDebitPost(self):
''' Запрет создания операций, если нет доступа к счету кредита '''
# Given
a1 = Account.objects.create()
a2 = Account.objects.create()
a2.allow_users.add(self.user)
# When
response = self.client.post('/', {
'date': 0,
'debit_id': a1.id,
'credit_id': a2.id,
'amount': 1,
'unit': 'RUB',
'description': 'Проверка запрета'
})
# Then
self.assertEqual(response.status_code, 403)
def testGrantedPost(self):
''' Разрешение операций если есть доступ к обоим счетам '''
# Given
a1 = Account.objects.create()
a1.allow_users.add(self.user)
a2 = Account.objects.create()
a2.allow_users.add(self.user)
# When
response = self.client.post('/', {
'date': 0,
'debit_id': a1.id,
'credit_id': a2.id,
'amount': 1,
'unit': 'RUB',
'description': 'Проверка разрешения'
})
# Then
self.assertEqual(response.status_code, 200)
def testHideUnaccessibleAccounts(self):
''' В списке аккаунтов отображаются только разрешенные аккаунты '''
# Given
a1 = Account.objects.create(shortname='Enabled')
a1.allow_users.add(self.user)
Account.objects.create(shortname='Disabled')
# When
response = self.client.get('/')
# Then
self.assertEqual(response.status_code, 200)
text = response.content.decode('utf8')
self.assertIn('Enabled', text)
self.assertNotIn('Disabled', text)
def testHideOperationWithDisabledAccounts(self):
''' В списке операций отображаются только операции,
где хотя бы один счет доступен '''
# Given
a1 = Account.objects.create()
a1.allow_users.add(self.user)
a2 = Account.objects.create()
a3 = Account.objects.create()
Transaction.objects.create(debit=a1, credit=a2, amount=1, description='Show')
Transaction.objects.create(debit=a2, credit=a3, amount=1, description='Hide')
# When
response = self.client.get('/')
# Then
self.assertEqual(response.status_code, 200)
text = response.content.decode('utf8')
self.assertIn('Show', text)
self.assertNotIn('Hide', text)
class TestOperationsView(TestOperations):
''' Тестирование того, что содержится в ответе сервера '''
def setUp(self):
''' Метод настройки '''
super().setUp()
self.a1 = Account.objects.create()
self.a1.allow_users.add(self.user)
self.a2 = Account.objects.create()
self.a2.allow_users.add(self.user)
def parse(self, response):
''' Разор xml ответа от сервера '''
self.assertEqual(response.status_code, 200)
text = response.content.decode('utf8')
return ElementTree.fromstring(text)
def testDefaultOperationUnitsIsRub(self):
''' Единица измерения по умолчанию - рубли '''
# Given
Transaction.objects.create(debit=self.a1, credit=self.a2, amount=1, description='RUB')
# When
response = self.client.get('/')
# Then
root = self.parse(response)
op = root.find(".//operation[description='RUB']")
self.assertEqual(op.find('unit').text, 'RUB')
def testOperationUnitsIsKg(self):
''' Единица измерения - килограммы '''
# Given
Transaction.objects.create(debit=self.a1, credit=self.a2, amount=1, description='KG', unit='KG')
# When
response = self.client.get('/')
# Then
root = self.parse(response)
op = root.find(".//operation[description='KG']")
self.assertEqual(op.find('unit').text, 'KG')
def testOrderedByOpdata(self):
''' Транзакции отображаются в порядке opdata '''
# Given
Transaction.objects.create(opdate=date.today(), debit=self.a1, credit=self.a2, amount=1)
Transaction.objects.create(
opdate=date.today() - timedelta(days=2),
debit=self.a1,
credit=self.a2,
amount=2
)
# When
response = self.client.get('/')
# Then
root = self.parse(response)
self.assertEqual(int(root.find(".//operation[1]/amount").text), 1)
self.assertEqual(int(root.find(".//operation[last()]/amount").text), 2)
def testForHiddenAccountsInList(self):
''' Не все аккаунты, используемые в операциях доступны в форме '''
# Given
a3 = Account.objects.create()
Transaction.objects.create(opdate=date.today(), debit=self.a1, credit=a3, amount=1)
# When
response = self.client.get('/')
# Then
root = self.parse(response)
self.assertEqual(int(root.find(".//account[@hidden]/id").text), a3.id)
def test100RecordsOnFirstPage(self):
''' Отображаем по 100 записей на страницу, для остальных страниц будет нафигация '''
# Given
for n in range(150):
Transaction.objects.create(debit=self.a1, credit=self.a2, amount=n)
# When
response = self.client.get('/')
# Then
root = self.parse(response)
self.assertEqual(len(root.findall(".//operation")), 100)
def testEachOperationShownOnlyOnce(self):
''' После выкатки возникла ошибка, что данные операций дублируются '''
# Given
nu = User.objects.create_user('new', 'new')
self.a2.allow_users.add(nu)
Transaction.objects.create(debit=self.a1, credit=self.a2, amount=42)
# When
response = self.client.get('/')
# Then
root = self.parse(response)
self.assertEqual(len(root.findall(".//operation")), 1)
| {
"content_hash": "1fb402b6542f871915ca7912dd0e5770",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 98,
"avg_line_length": 31.078341013824886,
"alnum_prop": 0.6942467378410438,
"repo_name": "DronMDF/laweb",
"id": "4db2403b750a1245dcf6afe10803073a7f9dff90",
"size": "7632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lawe/tests/testOperationsView.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "662"
},
{
"name": "HTML",
"bytes": "1876"
},
{
"name": "Python",
"bytes": "37441"
},
{
"name": "Shell",
"bytes": "199"
},
{
"name": "XSLT",
"bytes": "6310"
}
],
"symlink_target": ""
} |
"""Tests the for mactime parser."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import mactime as mactime_formatter
from plaso.lib import eventdata
from plaso.lib import timelib_test
from plaso.parsers import mactime
from plaso.parsers import test_lib
class MactimeUnitTest(test_lib.ParserTestCase):
"""Tests the for mactime parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._parser = mactime.MactimeParser()
def testParse(self):
"""Tests the Parse function."""
test_file = self._GetTestFilePath(['mactime.body'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
# The file contains 12 lines x 4 timestamps per line, which should be
# 48 events in total. However several of these events have an empty
# timestamp value and are omitted.
# Total number of valid entries per line:
# 1: 3
# 2: 3
# 3: 3
# 4: 3
# 5: 0
# 6: 0
# 7: 3
# 8: 0
# 9: 0
# 10: 3
# 11: 4
# 12: 4
# Total: 6 * 3 + 2 * 4 = 26
self.assertEquals(len(event_objects), 26)
# Test this entry:
# 0|/a_directory/another_file|16|r/rrw-------|151107|5000|22|1337961583|
# 1337961584|1337961585|0
event_object = event_objects[6]
expected_timestamp = timelib_test.CopyStringToTimestamp(
u'2012-05-25 15:59:43+00:00')
self.assertEquals(event_object.timestamp, expected_timestamp)
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.ACCESS_TIME)
self.assertEquals(event_object.inode, 16)
event_object = event_objects[6]
expected_timestamp = timelib_test.CopyStringToTimestamp(
u'2012-05-25 15:59:43+00:00')
self.assertEquals(event_object.timestamp, expected_timestamp)
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.ACCESS_TIME)
expected_string = u'/a_directory/another_file'
self._TestGetMessageStrings(event_object, expected_string, expected_string)
event_object = event_objects[8]
expected_timestamp = timelib_test.CopyStringToTimestamp(
u'2012-05-25 15:59:44+00:00')
self.assertEquals(event_object.timestamp, expected_timestamp)
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.MODIFICATION_TIME)
event_object = event_objects[7]
expected_timestamp = timelib_test.CopyStringToTimestamp(
u'2012-05-25 15:59:45+00:00')
self.assertEquals(event_object.timestamp, expected_timestamp)
self.assertEquals(
event_object.timestamp_desc, eventdata.EventTimestamp.CHANGE_TIME)
self.assertEquals(event_object.filename, u'/a_directory/another_file')
self.assertEquals(event_object.mode_as_string, u'r/rrw-------')
event_object = event_objects[25]
self.assertEquals(event_object.inode, 4)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "6a17178b820ab0001d519da0c7498489",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 80,
"avg_line_length": 32.71739130434783,
"alnum_prop": 0.6887043189368771,
"repo_name": "cvandeplas/plaso",
"id": "c3a44f428e52b73b9672a6c0b37d0de18424f719",
"size": "3708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/parsers/mactime_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2812257"
},
{
"name": "Shell",
"bytes": "22724"
}
],
"symlink_target": ""
} |
"""Coders for individual Variable objects."""
from typing import Any
import warnings
from functools import partial
import numpy as np
import pandas as pd
from ..core import dtypes, duck_array_ops, indexing
from ..core.pycompat import dask_array_type
from ..core.variable import Variable
class SerializationWarning(RuntimeWarning):
"""Warnings about encoding/decoding issues in serialization."""
class VariableCoder(object):
"""Base class for encoding and decoding transformations on variables.
We use coders for transforming variables between xarray's data model and
a format suitable for serialization. For example, coders apply CF
conventions for how data should be represented in netCDF files.
Subclasses should implement encode() and decode(), which should satisfy
the identity ``coder.decode(coder.encode(variable)) == variable``. If any
options are necessary, they should be implemented as arguments to the
__init__ method.
The optional name argument to encode() and decode() exists solely for the
sake of better error messages, and should correspond to the name of
variables in the underlying store.
"""
def encode(self, variable, name=None):
# type: (Variable, Any) -> Variable
"""Convert an encoded variable to a decoded variable."""
raise NotImplementedError
def decode(self, variable, name=None):
# type: (Variable, Any) -> Variable
"""Convert an decoded variable to a encoded variable."""
raise NotImplementedError
class _ElementwiseFunctionArray(indexing.ExplicitlyIndexedNDArrayMixin):
"""Lazily computed array holding values of elemwise-function.
Do not construct this object directly: call lazy_elemwise_func instead.
Values are computed upon indexing or coercion to a NumPy array.
"""
def __init__(self, array, func, dtype):
assert not isinstance(array, dask_array_type)
self.array = indexing.as_indexable(array)
self.func = func
self._dtype = dtype
@property
def dtype(self):
return np.dtype(self._dtype)
def __getitem__(self, key):
return type(self)(self.array[key], self.func, self.dtype)
def __array__(self, dtype=None):
return self.func(self.array)
def __repr__(self):
return ("%s(%r, func=%r, dtype=%r)" %
(type(self).__name__, self.array, self.func, self.dtype))
def lazy_elemwise_func(array, func, dtype):
"""Lazily apply an element-wise function to an array.
Parameters
----------
array : any valid value of Variable._data
func : callable
Function to apply to indexed slices of an array. For use with dask,
this should be a pickle-able object.
dtype : coercible to np.dtype
Dtype for the result of this function.
Returns
-------
Either a dask.array.Array or _ElementwiseFunctionArray.
"""
if isinstance(array, dask_array_type):
return array.map_blocks(func, dtype=dtype)
else:
return _ElementwiseFunctionArray(array, func, dtype)
def unpack_for_encoding(var):
return var.dims, var.data, var.attrs.copy(), var.encoding.copy()
def unpack_for_decoding(var):
return var.dims, var._data, var.attrs.copy(), var.encoding.copy()
def safe_setitem(dest, key, value, name=None):
if key in dest:
var_str = ' on variable {!r}'.format(name) if name else ''
raise ValueError(
'failed to prevent overwriting existing key {} in attrs{}. '
'This is probably an encoding field used by xarray to describe '
'how a variable is serialized. To proceed, remove this key from '
"the variable's attributes manually.".format(key, var_str))
dest[key] = value
def pop_to(source, dest, key, name=None):
"""
A convenience function which pops a key k from source to dest.
None values are not passed on. If k already exists in dest an
error is raised.
"""
value = source.pop(key, None)
if value is not None:
safe_setitem(dest, key, value, name=name)
return value
def _apply_mask(
data: np.ndarray,
encoded_fill_values: list,
decoded_fill_value: Any,
dtype: Any,
) -> np.ndarray:
"""Mask all matching values in a NumPy arrays."""
data = np.asarray(data, dtype=dtype)
condition = False
for fv in encoded_fill_values:
condition |= data == fv
return np.where(condition, decoded_fill_value, data)
class CFMaskCoder(VariableCoder):
"""Mask or unmask fill values according to CF conventions."""
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if encoding.get('_FillValue') is not None:
fill_value = pop_to(encoding, attrs, '_FillValue', name=name)
if not pd.isnull(fill_value):
data = duck_array_ops.fillna(data, fill_value)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
raw_fill_values = [pop_to(attrs, encoding, attr, name=name)
for attr in ('missing_value', '_FillValue')]
if raw_fill_values:
encoded_fill_values = {fv for option in raw_fill_values
for fv in np.ravel(option)
if not pd.isnull(fv)}
if len(encoded_fill_values) > 1:
warnings.warn("variable {!r} has multiple fill values {}, "
"decoding all values to NaN."
.format(name, encoded_fill_values),
SerializationWarning, stacklevel=3)
dtype, decoded_fill_value = dtypes.maybe_promote(data.dtype)
if encoded_fill_values:
transform = partial(_apply_mask,
encoded_fill_values=encoded_fill_values,
decoded_fill_value=decoded_fill_value,
dtype=dtype)
data = lazy_elemwise_func(data, transform, dtype)
return Variable(dims, data, attrs, encoding)
def _scale_offset_decoding(data, scale_factor, add_offset, dtype):
data = np.array(data, dtype=dtype, copy=True)
if scale_factor is not None:
data *= scale_factor
if add_offset is not None:
data += add_offset
return data
def _choose_float_dtype(dtype, has_offset):
"""Return a float dtype that can losslessly represent `dtype` values."""
# Keep float32 as-is. Upcast half-precision to single-precision,
# because float16 is "intended for storage but not computation"
if dtype.itemsize <= 4 and np.issubdtype(dtype, np.floating):
return np.float32
# float32 can exactly represent all integers up to 24 bits
if dtype.itemsize <= 2 and np.issubdtype(dtype, np.integer):
# A scale factor is entirely safe (vanishing into the mantissa),
# but a large integer offset could lead to loss of precision.
# Sensitivity analysis can be tricky, so we just use a float64
# if there's any offset at all - better unoptimised than wrong!
if not has_offset:
return np.float32
# For all other types and circumstances, we just use float64.
# (safe because eg. complex numbers are not supported in NetCDF)
return np.float64
class CFScaleOffsetCoder(VariableCoder):
"""Scale and offset variables according to CF conventions.
Follows the formula:
decode_values = encoded_values * scale_factor + add_offset
"""
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if 'scale_factor' in encoding or 'add_offset' in encoding:
dtype = _choose_float_dtype(data.dtype, 'add_offset' in encoding)
data = data.astype(dtype=dtype, copy=True)
if 'add_offset' in encoding:
data -= pop_to(encoding, attrs, 'add_offset', name=name)
if 'scale_factor' in encoding:
data /= pop_to(encoding, attrs, 'scale_factor', name=name)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
if 'scale_factor' in attrs or 'add_offset' in attrs:
scale_factor = pop_to(attrs, encoding, 'scale_factor', name=name)
add_offset = pop_to(attrs, encoding, 'add_offset', name=name)
dtype = _choose_float_dtype(data.dtype, 'add_offset' in attrs)
transform = partial(_scale_offset_decoding,
scale_factor=scale_factor,
add_offset=add_offset,
dtype=dtype)
data = lazy_elemwise_func(data, transform, dtype)
return Variable(dims, data, attrs, encoding)
class UnsignedIntegerCoder(VariableCoder):
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
# from netCDF best practices
# https://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html
# "_Unsigned = "true" to indicate that
# integer data should be treated as unsigned"
if encoding.get('_Unsigned', 'false') == 'true':
pop_to(encoding, attrs, '_Unsigned')
signed_dtype = np.dtype('i%s' % data.dtype.itemsize)
if '_FillValue' in attrs:
new_fill = signed_dtype.type(attrs['_FillValue'])
attrs['_FillValue'] = new_fill
data = duck_array_ops.around(data).astype(signed_dtype)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
if '_Unsigned' in attrs:
unsigned = pop_to(attrs, encoding, '_Unsigned')
if data.dtype.kind == 'i':
if unsigned == 'true':
unsigned_dtype = np.dtype('u%s' % data.dtype.itemsize)
transform = partial(np.asarray, dtype=unsigned_dtype)
data = lazy_elemwise_func(data, transform, unsigned_dtype)
if '_FillValue' in attrs:
new_fill = unsigned_dtype.type(attrs['_FillValue'])
attrs['_FillValue'] = new_fill
else:
warnings.warn("variable %r has _Unsigned attribute but is not "
"of integer type. Ignoring attribute." % name,
SerializationWarning, stacklevel=3)
return Variable(dims, data, attrs, encoding)
| {
"content_hash": "7d9d38768e4c0f25ac2c8d0be2c116de",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 79,
"avg_line_length": 38.06666666666667,
"alnum_prop": 0.6202414969121578,
"repo_name": "chunweiyuan/xarray",
"id": "1f74181f3b3af93075e7b3871b540f639fff671f",
"size": "10849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xarray/coding/variables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3150"
},
{
"name": "Python",
"bytes": "2336715"
}
],
"symlink_target": ""
} |
from troveclient import base
from troveclient import common
class Quotas(base.ManagerWithFind):
"""Manage :class:`Quota` information."""
resource_class = base.Resource
def show(self, tenant_id):
"""Get a list of all quotas for a tenant id."""
url = "/mgmt/quotas/%s" % tenant_id
resp, body = self.api.client.get(url)
common.check_for_exceptions(resp, body, url)
if not body:
raise Exception("Call to " + url + " did not return a body.")
if 'quotas' not in body:
raise Exception("Missing key value 'quotas' in response body.")
return body['quotas']
def update(self, id, quotas):
"""Set limits for quotas."""
url = "/mgmt/quotas/%s" % id
body = {"quotas": quotas}
resp, body = self.api.client.put(url, body=body)
common.check_for_exceptions(resp, body, url)
if not body:
raise Exception("Call to " + url + " did not return a body.")
if 'quotas' not in body:
raise Exception("Missing key value 'quotas' in response body.")
return body['quotas']
# Appease the abc gods
def list(self):
pass
| {
"content_hash": "6f0bc08a9d5f52739e98f6da9ef652c2",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 75,
"avg_line_length": 33.138888888888886,
"alnum_prop": 0.5892707460184409,
"repo_name": "Tesora-Release/tesora-python-troveclient",
"id": "e461c52e75ce8f3454cee13f5c763fe274252730",
"size": "1923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "troveclient/v1/quota.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "651825"
},
{
"name": "Shell",
"bytes": "1432"
}
],
"symlink_target": ""
} |
from django import template
from crm.date.models import Month
register = template.Library()
@register.simple_tag
def previous_month_name():
return Month.previous()
@register.simple_tag
def current_month_name():
return Month.current()
@register.simple_tag
def next_month_name():
return Month.next()
| {
"content_hash": "42c334bace6df166e080ab2cca033b97",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 33,
"avg_line_length": 15.9,
"alnum_prop": 0.7327044025157232,
"repo_name": "MattAgile/django-spa-crm",
"id": "a29a5ad07da37fd889728dd0e5165b98af3c3e7a",
"size": "318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crm/date/templatetags/month.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "2294"
},
{
"name": "CSS",
"bytes": "1977"
},
{
"name": "HTML",
"bytes": "46944"
},
{
"name": "JavaScript",
"bytes": "14998"
},
{
"name": "Nginx",
"bytes": "1255"
},
{
"name": "Puppet",
"bytes": "1990"
},
{
"name": "Python",
"bytes": "93308"
},
{
"name": "Shell",
"bytes": "2809"
}
],
"symlink_target": ""
} |
"""
This file contains miscellanous utility functions.
"""
import pandas as pd
from typing import Dict, List, Tuple
import functools
def group_count(df, grouping_attr):
grp = df.groupby([grouping_attr]).size().to_frame().reset_index()
grp.rename(columns = {0: 'count'}, inplace = True)
return grp
def group_sum_attr(df, grouping_attr, reduced_attr):
grp = df.groupby([grouping_attr]).sum()[reduced_attr].to_frame().reset_index()
return grp
def shape_to_str(shape):
return "[" + ",".join(str(dim) for dim in shape) + "]"
def _merge_keys_values(
keys_lists: List[List],
values_lists: List[List],
empty_placeholder: object
) -> Dict:
# Concatenate the keys lists into a set of all keys
all_keys = set(functools.reduce(lambda a, b: a+b, keys_lists))
# Create the stacked output dictionary, and fill missing values.
dicts = [dict(zip(keys, values)) for keys, values in zip(keys_lists, values_lists)]
result = {}
for key in all_keys:
for d in dicts:
if key not in d:
d[key] = empty_placeholder
result[key] = [d[key] for d in dicts]
return result
def stack_dicts(dict_list: List[dict], empty_placeholder: object=0):
"""Stack lists of dictionaries as a single dictionary"""
# A list of names lists.
keys_lists = [list(d.keys()) for d in dict_list]
# A list of values lists.
values_lists = [list(d.values()) for d in dict_list]
return _merge_keys_values(keys_lists, values_lists, empty_placeholder)
def stack_dataframes(
df_list: List[pd.DataFrame],
names_col: str,
values_col: str,
empty_placeholder: object=0,
):
# A list of names lists.
names = [df[names_col].tolist() for df in df_list]
# A list of values lists.
values = [df[values_col].tolist() for df in df_list]
return _merge_keys_values(names, values, empty_placeholder) | {
"content_hash": "32cb983cb66c840117811c7b8d0ced30",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 87,
"avg_line_length": 28.44776119402985,
"alnum_prop": 0.64900314795383,
"repo_name": "NVIDIA/TensorRT",
"id": "ca2edf88ebafa4f4231ba7302e54604dda63d0bf",
"size": "2595",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/experimental/trt-engine-explorer/trex/misc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "804"
},
{
"name": "C",
"bytes": "26267"
},
{
"name": "C++",
"bytes": "174835683"
},
{
"name": "CMake",
"bytes": "73882"
},
{
"name": "Cuda",
"bytes": "713094"
},
{
"name": "Dockerfile",
"bytes": "21378"
},
{
"name": "HTML",
"bytes": "266"
},
{
"name": "Jupyter Notebook",
"bytes": "2284036"
},
{
"name": "Makefile",
"bytes": "9128"
},
{
"name": "PowerShell",
"bytes": "162"
},
{
"name": "PureBasic",
"bytes": "388"
},
{
"name": "Python",
"bytes": "2541976"
},
{
"name": "Shell",
"bytes": "20007"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, absolute_import
import pytest
import os
import warnings
from marvin import config, MarvinConfig
from marvin.core.exceptions import MarvinError, MarvinUserWarning
from brain import bconfig
from brain.core.exceptions import BrainError, BrainUserWarning
@pytest.fixture()
def netrc(monkeypatch, tmpdir):
config.access = 'public'
tmpnet = tmpdir.mkdir('netrc').join('.netrc')
monkeypatch.setattr(bconfig, '_netrc_path', str(tmpnet))
yield tmpnet
@pytest.fixture()
def goodnet(netrc):
netrc.write('')
os.chmod(bconfig._netrc_path, 0o600)
yield netrc
@pytest.fixture()
def bestnet(goodnet):
goodnet.write(write('data.sdss.org'))
goodnet.write(write('api.sdss.org'))
config._check_access()
config.access = 'collab'
config.setRelease('MPL-6')
yield goodnet
def write(host):
netstr = 'machine {0}\n'.format(host)
netstr += ' login test\n'
netstr += ' password test\n'
netstr += '\n'
return netstr
@pytest.fixture()
def initconfig(monkeypatch):
monkeypatch.delattr(config, '_tree')
monkeypatch.setattr(config, '_release', None)
@pytest.fixture()
def set_default(monkeypatch, request):
monkeypatch.setattr(config, '_release', request.param)
monkeypatch.setitem(config._custom_config, 'default_release', request.param)
config._check_config()
class TestVars(object):
''' test getting/setting variables '''
@pytest.mark.parametrize('var, toval',
[('mode', 'remote'), ('access', 'public')])
def test_set(self, monkeypatch, var, toval):
defval = config.__getattribute__(var)
assert defval != toval
monkeypatch.setattr(config, var, toval)
newval = config.__getattribute__(var)
assert newval == toval
@pytest.mark.parametrize('var, toval',
[('mode', 'super'), ('access', 'always')])
def test_set_wrong(self, var, toval):
with pytest.raises(ValueError) as cm:
config.__setattr__(var, toval)
assert 'config.{0} must be'.format(var) in str(cm.value)
class TestAccess(object):
def test_bad_access(self, netrc):
assert config.access == 'public'
with pytest.raises(BrainError) as cm:
config.access = 'collab'
assert 'No .netrc file found in your HOME directory!' in str(cm.value)
assert config.access == 'public'
def test_public_access(self, bestnet):
assert config.access == 'collab'
assert 'MPL-5' in config._allowed_releases
assert 'DR15' in config._allowed_releases
config.access = 'public'
assert 'MPL-5' not in config._allowed_releases
assert 'DR15' in config._allowed_releases
def test_tree(self, bestnet):
assert config.access == 'collab'
assert 'mangawork' in os.environ['MANGA_SPECTRO_REDUX']
assert 'MPL' in config.release
config.access = 'public'
assert 'sas/dr' in os.environ['MANGA_SPECTRO_REDUX']
assert 'DR' in config.release
config.access = 'collab'
assert 'sas/dr' in os.environ['MANGA_SPECTRO_REDUX']
assert 'DR' in config.release
class TestReleases(object):
@pytest.mark.parametrize('release', [('dr15'), ('dr14'), ('mpl-5')])
def test_tree(self, bestnet, release):
assert config.access == 'collab'
assert 'mangawork' in os.environ['MANGA_SPECTRO_REDUX']
assert 'MPL' in config.release
config.setRelease(release)
if 'mpl' in release:
assert 'mangawork' in os.environ['MANGA_SPECTRO_REDUX']
else:
assert release in os.environ['MANGA_SPECTRO_REDUX']
assert config.release == release.upper()
@pytest.mark.parametrize('release', [('dr15'), ('mpl-6')])
def test_drpall(self, bestnet, release):
assert 'mangawork' in config.drpall
config.setRelease(release)
if config.drpall:
word = 'mangawork' if 'mpl' in release else release
assert word in config.drpall
def test_invalid_release(self):
with pytest.raises(MarvinError) as cm:
config.setRelease('MPL-1')
assert 'trying to set an invalid release version.' in str(cm.value)
def test_invalid_dr(self):
with pytest.raises(AssertionError) as cm:
config.setDR('MPL-5')
assert 'Must specify a DRXX version!' in str(cm.value)
def test_invalid_mpl(self):
with pytest.raises(AssertionError) as cm:
config.setMPL('DR15')
assert 'Must specify an MPL-X version!' in str(cm.value)
class TestNetrc(object):
''' test the netrc access '''
@pytest.mark.parametrize('host, msg',
[('data.sdss.org', 'api.sdss.org not found in netrc. You will not have remote access to SDSS data'),
('api.sdss.org', 'data.sdss.org not found in netrc. You will not be able to download SDSS data')],
ids=['noapi', 'nodata'])
def test_only_one_host(self, goodnet, host, msg):
goodnet.write(write(host))
with pytest.warns(BrainUserWarning) as cm:
config._check_access()
assert msg in str(cm[0].message)
def test_good_netrc(self, bestnet):
config._check_access()
assert config.access == 'collab'
class TestConfig(object):
def test_exists(self):
assert config is not None
def test_bad_login(self):
config.access = 'public'
with pytest.raises(AssertionError) as cm:
config.login()
assert 'You must have collaboration access to login.' in str(cm.value)
@pytest.mark.parametrize('defrel, exprel',
[('DR20', 'MPL-7'), ('bad_release', 'MPL-7')])
def test_bad_default_release(self, initconfig, defrel, exprel):
''' this tests some initial conditions on config '''
config._release = defrel
config._check_access()
msg = 'Release {0} is not in the allowed releases. Switching to {1}'.format(defrel, exprel)
with pytest.warns(MarvinUserWarning):
warnings.warn(msg, MarvinUserWarning)
assert config.release == exprel
class TestSasUrl(object):
def test_sasurl_nonetrc(self, initconfig, netrc):
assert 'DR' in config.release
assert 'dr15.sdss.org/api/marvin' in config.sasurl
@pytest.mark.parametrize('release',
[('MPL-6'), ('DR15')],
ids=['collab', 'public'])
def test_sasurl(self, bestnet, release):
assert 'api.sdss.org/marvin' in config.sasurl
config.setRelease(release)
sasurl = 'dr15.sdss.org/api/marvin' if 'DR' in release else 'api.sdss.org/marvin'
assert sasurl in config.sasurl
@pytest.mark.parametrize('sas, exp',
[('utah', 'api.sdss.org'),
('public', 'dr15.sdss.org/api'),
('test', 'api.sdss.org/test'),
('local', 'localhost')],
)
def test_sasurl_switch(self, sas, exp):
public = sas == 'public'
test = sas == 'test'
sas = 'utah' if sas == 'public' or sas == 'test' else sas
config.switchSasUrl(sas, public=public, test=test)
assert exp in config.sasurl
# @pytest.mark.parametrize('set_default, defrel, exp',
# [('MPL-5', 'MPL-5', 'api.sdss.org'),
# ('DR15', 'DR15', 'dr15.sdss.org/api')], indirect=['set_default'])
# def test_sasurl_default_release(self, set_default, defrel, exp):
# assert config.release == defrel
# assert exp in config.sasurl
| {
"content_hash": "03b67ab788604f51e43ffd585f21b067",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 129,
"avg_line_length": 34.39647577092511,
"alnum_prop": 0.602202868852459,
"repo_name": "albireox/marvin",
"id": "2c05ad0cb526d5efeee77593352ccddf745dfc74",
"size": "8038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/marvin/tests/core/test_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "210343"
},
{
"name": "HTML",
"bytes": "68596"
},
{
"name": "JavaScript",
"bytes": "217699"
},
{
"name": "PLpgSQL",
"bytes": "1577"
},
{
"name": "Python",
"bytes": "1390874"
},
{
"name": "SQLPL",
"bytes": "141212"
},
{
"name": "Shell",
"bytes": "1150"
}
],
"symlink_target": ""
} |
"""Module containing tests against the `TypeInstanceIndex` interface.
Most if not all of these tests are ported from the public domain tests
that come with the TMAPI 2.0 distribution (http://www.tmapi.org/2.0/).
"""
from tmapi.constants import TYPE_INSTANCE_ASSOCIATIONS_FEATURE_STRING
from tmapi.indices.type_instance_index import TypeInstanceIndex
from tmapi.tests.models.tmapi_test_case import TMAPITestCase
class TypeInstanceIndexTest (TMAPITestCase):
def setUp (self):
super(TypeInstanceIndexTest, self).setUp()
self._index = self.tm.get_index(TypeInstanceIndex)
self._index.open()
def tearDown (self):
super(TypeInstanceIndexTest, self).tearDown()
self._index.close()
def _update_index (self):
if not self._index.is_auto_updated():
self._index.reindex()
def test_topic (self):
self._update_index()
self.assertEqual(0, self._index.get_topics().count())
self.assertEqual(0, self._index.get_topic_types().count())
topic = self.tm.create_topic()
self._update_index()
self.assertEqual(0, self._index.get_topic_types().count())
self.assertEqual(1, self._index.get_topics().count())
self.assertTrue(topic in self._index.get_topics())
type1 = self.tm.create_topic()
type2 = self.tm.create_topic()
self._update_index()
self.assertEqual(0, self._index.get_topic_types().count())
self.assertEqual(3, self._index.get_topics().count())
self.assertTrue(topic in self._index.get_topics())
self.assertTrue(type1 in self._index.get_topics())
self.assertTrue(type2 in self._index.get_topics())
self.assertEqual(0, self._index.get_topics(
[type1, type2], match_all=False).count())
self.assertEqual(0, self._index.get_topics(
[type1, type2], match_all=True).count())
# Topic with one type.
topic.add_type(type1)
self._update_index()
self.assertEqual(1, self._index.get_topic_types().count())
self.assertTrue(type1 in self._index.get_topic_types())
if self.tms.get_feature(TYPE_INSTANCE_ASSOCIATIONS_FEATURE_STRING):
self.assertEqual(5, self._index.get_topics().count())
else:
self.assertEqual(2, self._index.get_topics().count())
self.assertFalse(topic in self._index.get_topics())
self.assertTrue(type1 in self._index.get_topics())
self.assertTrue(type2 in self._index.get_topics())
self.assertEqual(1, self._index.get_topics(type1).count())
self.assertEqual(1, self._index.get_topics(
[type1, type2], match_all=False).count())
self.assertTrue(topic in self._index.get_topics(
[type1, type2], match_all=False))
self.assertEqual(0, self._index.get_topics(
[type1, type2], match_all=True).count())
# Topic with two types.
topic.add_type(type2)
self._update_index()
self.assertEqual(2, self._index.get_topic_types().count())
self.assertTrue(type1 in self._index.get_topic_types())
self.assertTrue(type2 in self._index.get_topic_types())
if self.tms.get_feature(TYPE_INSTANCE_ASSOCIATIONS_FEATURE_STRING):
self.assertEqual(5, self._index.get_topics().count())
else:
self.assertEqual(2, self._index.get_topics().count())
self.assertFalse(topic in self._index.get_topics())
self.assertTrue(type1 in self._index.get_topics())
self.assertTrue(type2 in self._index.get_topics())
self.assertEqual(1, self._index.get_topics(type1).count())
self.assertTrue(topic in self._index.get_topics(type1))
self.assertEqual(1, self._index.get_topics(type2).count())
self.assertTrue(topic in self._index.get_topics(type2))
self.assertEqual(1, self._index.get_topics(
[type1, type2], match_all=False).count())
self.assertTrue(topic in self._index.get_topics(
[type1, type2], match_all=False))
self.assertEqual(1, self._index.get_topics(
[type1, type2], match_all=True).count())
self.assertTrue(topic in self._index.get_topics(
[type1, type2], match_all=True))
# Topic removal.
topic.remove()
self._update_index()
self.assertEqual(0, self._index.get_topic_types().count())
if self.tms.get_feature(TYPE_INSTANCE_ASSOCIATIONS_FEATURE_STRING):
self.assertEqual(5, self._index.get_topics().count())
else:
self.assertEqual(2, self._index.get_topics().count())
self.assertTrue(type1 in self._index.get_topics())
self.assertTrue(type2 in self._index.get_topics())
self.assertEqual(0, self._index.get_topics(type1).count())
self.assertEqual(0, self._index.get_topics(type2).count())
self.assertEqual(0, self._index.get_topics(
[type1, type2], match_all=False).count())
self.assertEqual(0, self._index.get_topics(
[type1, type2], match_all=True).count())
def test_association (self):
type = self.create_topic()
self._update_index()
self.assertEqual(0, self._index.get_associations(type).count())
self.assertEqual(0, self._index.get_association_types().count())
typed = self.create_association()
self._update_index()
self.assertEqual(0, self._index.get_associations(type).count())
self.assertFalse(type in self._index.get_association_types())
self.assertEqual(1, self._index.get_association_types().count())
typed.set_type(type)
self._update_index()
self.assertNotEqual(0, self._index.get_association_types().count())
self.assertEqual(1, self._index.get_associations(type).count())
self.assertTrue(typed in self._index.get_associations(type))
self.assertTrue(type in self._index.get_association_types())
typed.set_type(self.create_topic())
self._update_index()
self.assertFalse(type in self._index.get_association_types())
self.assertEqual(1, self._index.get_association_types().count())
typed.set_type(type)
typed.remove()
self._update_index()
self.assertEqual(0, self._index.get_associations(type).count())
self.assertEqual(0, self._index.get_association_types().count())
def test_role (self):
type = self.create_topic()
self._update_index()
self.assertEqual(0, self._index.get_roles(type).count())
self.assertEqual(0, self._index.get_role_types().count())
parent = self.create_association()
typed = parent.create_role(self.create_topic(), self.create_topic())
self._update_index()
self.assertEqual(1, self._index.get_role_types().count())
self.assertFalse(type in self._index.get_role_types())
typed.set_type(type)
self._update_index()
self.assertEqual(1, self._index.get_role_types().count())
self.assertEqual(1, self._index.get_roles(type).count())
self.assertTrue(typed in self._index.get_roles(type))
typed.set_type(self.create_topic())
self._update_index()
self.assertEqual(1, self._index.get_role_types().count())
self.assertFalse(type in self._index.get_role_types())
typed.set_type(type)
typed.remove()
self._update_index()
self.assertEqual(0, self._index.get_roles(type).count())
self.assertEqual(0, self._index.get_role_types().count())
# The same test, but the parent is removed.
typed = parent.create_role(type, self.create_topic())
self._update_index()
self.assertEqual(1, self._index.get_role_types().count())
self.assertEqual(1, self._index.get_roles(type).count())
self.assertTrue(typed in self._index.get_roles(type))
parent.remove()
self._update_index()
self.assertEqual(0, self._index.get_roles(type).count())
self.assertEqual(0, self._index.get_role_types().count())
def test_occurrence (self):
type = self.create_topic()
self._update_index()
self.assertEqual(0, self._index.get_occurrences(type).count())
self.assertEqual(0, self._index.get_occurrence_types().count())
parent = self.create_topic()
typed = parent.create_occurrence(self.create_topic(), 'tinyTiM')
self._update_index()
self.assertEqual(0, self._index.get_occurrences(type).count())
self.assertEqual(1, self._index.get_occurrence_types().count())
self.assertFalse(type in self._index.get_occurrence_types())
typed.set_type(type)
self._update_index()
self.assertEqual(1, self._index.get_occurrence_types().count())
self.assertEqual(1, self._index.get_occurrences(type).count())
self.assertTrue(typed in self._index.get_occurrences(type))
self.assertTrue(type in self._index.get_occurrence_types())
typed.set_type(self.create_topic())
self._update_index()
self.assertEqual(0, self._index.get_occurrences(type).count())
self.assertEqual(1, self._index.get_occurrence_types().count())
self.assertFalse(type in self._index.get_occurrence_types())
typed.set_type(type)
typed.remove()
self._update_index()
self.assertEqual(0, self._index.get_occurrences(type).count())
self.assertEqual(0, self._index.get_occurrence_types().count())
def test_name (self):
type = self.tm.create_topic()
self._update_index()
self.assertEqual(0, self._index.get_names(type).count())
self.assertEqual(0, self._index.get_name_types().count())
parent = self.tm.create_topic()
typed = parent.create_name('tinyTiM')
self._update_index()
self.assertEqual(1, self._index.get_name_types().count())
self.assertFalse(type in self._index.get_name_types())
self.assertEqual(0, self._index.get_names(type).count())
typed.set_type(type)
self._update_index()
self.assertNotEqual(0, self._index.get_name_types().count())
self.assertEqual(1, self._index.get_names(type).count())
self.assertTrue(typed in self._index.get_names(type))
self.assertTrue(type in self._index.get_name_types())
typed.set_type(self.create_topic())
self._update_index()
self.assertEqual(0, self._index.get_names(type).count())
self.assertFalse(type in self._index.get_name_types())
self.assertEqual(1, self._index.get_name_types().count())
typed.set_type(type)
typed.remove()
self._update_index()
self.assertEqual(0, self._index.get_names(type).count())
self.assertEqual(0, self._index.get_name_types().count())
| {
"content_hash": "ef4406d587309c72235f736d864b3827",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 76,
"avg_line_length": 48.528888888888886,
"alnum_prop": 0.6299111640260097,
"repo_name": "ajenhl/django-tmapi",
"id": "2536195f401f6aeb4460c34372e5196cf599c0ab",
"size": "11521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tmapi/tests/indices/test_type_instance_index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "343473"
}
],
"symlink_target": ""
} |
"""Module with classes and methods used for parameter validation."""
from scar.exceptions import ValidatorError, S3CodeSizeError, \
FunctionCodeSizeError, InvocationPayloadError
from scar.utils import FileUtils, StrUtils
VALID_LAMBDA_NAME_REGEX = (r"(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}(-gov)?-[a-z]+-\d{1}:)?("
r"\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\$LATEST|[a-zA-Z0-9-_]+))?")
KB = 1024
MB = KB * KB
MAX_POST_BODY_SIZE = MB * 6
MAX_POST_BODY_SIZE_ASYNC = KB * 95
class AWSValidator():
"""Class with methods to validate AWS properties."""
@staticmethod
def validate_kwargs(cls, **kwargs):
aws_functions = kwargs.get('functions', {}).get('aws', {})
for function in aws_functions:
if 'iam' in function:
cls.validate_iam(function['iam'])
if 'lambda' in function:
cls.validate_lambda(function['lambda'])
if 'batch' in function:
cls.validate_batch(function['batch'])
@staticmethod
def validate_iam(iam_properties):
if ("role" not in iam_properties) or (iam_properties["role"] == ""):
error_msg = ("Please, specify a valid iam role in the "
"configuration file (usually located in ~/.scar/scar.cfg).")
raise ValidatorError(parameter='iam_role',
parameter_value=iam_properties,
error_msg=error_msg)
@staticmethod
def validate_lambda(cls, lambda_properties):
if 'name' in lambda_properties:
cls.validate_function_name(lambda_properties['name'])
if 'memory' in lambda_properties:
cls.validate_memory(lambda_properties['memory'])
if 'time' in lambda_properties:
cls.validate_time(lambda_properties['time'])
@staticmethod
def validate_batch(cls, batch_properties):
if 'vcpus' in batch_properties:
cls.validate_batch_vcpus(batch_properties['vcpus'])
if 'memory' in batch_properties:
cls.validate_batch_memory(batch_properties['memory'])
if 'compute_resources' in batch_properties and \
'comp_type' in batch_properties['compute_resources']:
cls.validate_batch_comp_type(batch_properties['compute_resources']['comp_type'])
@staticmethod
def validate_time(lambda_time):
if (lambda_time <= 0) or (lambda_time > 900):
error_msg = 'Please, set a value between 0 and 900.'
raise ValidatorError(parameter='lambda_time',
parameter_value=lambda_time,
error_msg=error_msg)
@staticmethod
def validate_memory(lambda_memory):
if (lambda_memory < 128) or (lambda_memory > 3008):
error_msg = 'Please, set a value between 128 and 3008.'
raise ValidatorError(parameter='lambda_memory',
parameter_value=lambda_memory,
error_msg=error_msg)
@staticmethod
def validate_function_name(function_name):
if not StrUtils.find_expression(function_name, VALID_LAMBDA_NAME_REGEX):
error_msg = ("Find name restrictions in: https://docs.aws.amazon.com/lambda/latest/"
"dg/API_CreateFunction.html#SSS-CreateFunction-request-FunctionName")
raise ValidatorError(parameter='function_name',
parameter_value=function_name,
error_msg=error_msg)
@staticmethod
def validate_function_code_size(scar_folder, max_payload_size):
if FileUtils.get_tree_size(scar_folder) > max_payload_size:
raise FunctionCodeSizeError(code_size='50MB')
@staticmethod
def validate_s3_code_size(scar_folder, max_s3_payload_size):
if FileUtils.get_tree_size(scar_folder) > max_s3_payload_size:
raise S3CodeSizeError(code_size='250MB')
@staticmethod
def validate_http_payload_size(file_path, async_call=False):
file_size = FileUtils.get_file_size(file_path)
if file_size > MAX_POST_BODY_SIZE:
filesize = '{0:.2f}MB'.format(file_size / MB)
maxsize = '{0:.2f}MB'.format(MAX_POST_BODY_SIZE / MB)
raise InvocationPayloadError(file_size=filesize, max_size=maxsize)
if async_call and file_size > MAX_POST_BODY_SIZE_ASYNC:
filesize = '{0:.2f}KB'.format(file_size / KB)
maxsize = '{0:.2f}KB'.format(MAX_POST_BODY_SIZE_ASYNC / KB)
raise InvocationPayloadError(file_size=filesize, max_size=maxsize)
@staticmethod
def validate_batch_vcpus(batch_vcpus):
if batch_vcpus < 1:
error_msg = 'Please, set at least one vCPU.'
raise ValidatorError(parameter='batch_vcpus',
parameter_value=batch_vcpus,
error_msg=error_msg)
@staticmethod
def validate_batch_memory(batch_memory):
if batch_memory < 4:
error_msg = 'Please, set a value greater than 4.'
raise ValidatorError(parameter='batch_memory',
parameter_value=batch_memory,
error_msg=error_msg)
@staticmethod
def validate_batch_comp_type(batch_comp_type):
if batch_comp_type not in ['SPOT', 'EC2']:
error_msg = 'Please, set a valid compute environment type ("EC2" or "SPOT")'
raise ValidatorError(parameter='batch_comp_type',
parameter_value=batch_comp_type,
error_msg=error_msg)
| {
"content_hash": "bba8d918728907189d9f47dd516d1bcb",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 97,
"avg_line_length": 45.43650793650794,
"alnum_prop": 0.5846288209606987,
"repo_name": "grycap/scar",
"id": "31c8f8bf9e66498c36a8aaccefbed509faa1cc35",
"size": "6302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scar/providers/aws/validators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "601"
},
{
"name": "Python",
"bytes": "324748"
}
],
"symlink_target": ""
} |
'''
austat: tests module.
Meant for use with py.test.
Organize tests into files, each named xxx_test.py
Read more here: http://pytest.org/
Copyright 2015, FinalInitialSolutions
Licensed under MIT
''' | {
"content_hash": "123e8517bae687bcde9b7c5d3b4f9bbc",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 49,
"avg_line_length": 20.1,
"alnum_prop": 0.7611940298507462,
"repo_name": "FinalInitialSolutions/austat",
"id": "3877c4adb11345ce7e1fe943a9600664c9098baa",
"size": "201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "austat/tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "147"
},
{
"name": "HTML",
"bytes": "8103"
},
{
"name": "JavaScript",
"bytes": "6152"
},
{
"name": "Python",
"bytes": "17567"
}
],
"symlink_target": ""
} |
import random
import os
import sys
import subprocess, copy
# =====================================================
# Compute paths
# =====================================================
slugsExecutableAndBasicOptions = sys.argv[0][0:sys.argv[0].rfind("analyzeStuckAtConstant.py")]+"../src/slugs"
slugsCompilerAndBasicOptions = sys.argv[0][0:sys.argv[0].rfind("analyzeStuckAtConstant.py")]+"StructuredSlugsParser/compiler.py --thorougly"
slugsCompiledFile = "/tmp/check_"+str(os.getpid())+".slugsin"
slugsModifiedFile = "/tmp/check_"+str(os.getpid())+".mod.slugsin"
slugsReturnFile = "/tmp/check_"+str(os.getpid())+".slugsreturn"
slugsErrorFile = "/tmp/check_"+str(os.getpid())+".slugserr"
# =====================================================
# Slugs File reader - keeps everything line-by-line
# =====================================================
def readSlugsFile(slugsFile):
specFile = open(slugsFile,"r")
mode = ""
lines = {"[ENV_TRANS]":[],"[ENV_INIT]":[],"[INPUT]":[],"[OUTPUT]":[],"[SYS_TRANS]":[],"[SYS_INIT]":[],"[ENV_LIVENESS]":[],"[SYS_LIVENESS]":[] }
for line in specFile.readlines():
line = line.strip()
print >>sys.stderr, line
if line == "":
pass
elif line.startswith("["):
mode = line
# if not mode in lines:
# lines[mode] = []
else:
if mode=="" and line.startswith("#"):
# Initial comments
pass
else:
lines[mode].append(line)
specFile.close()
return lines
# =====================================================
# Slugs File Writer - keeps everything line-by-line
# Remove emptylines at the end
# =====================================================
def writeSlugsFile(slugsFile,lines):
specFile = open(slugsFile,"w")
for a in ["[INPUT]","[OUTPUT]","[ENV_TRANS]","[ENV_INIT]","[SYS_TRANS]","[SYS_INIT]","[ENV_LIVENESS]","[SYS_LIVENESS]"]:
specFile.write(a+"\n")
for c in lines[a]:
specFile.write(c+"\n")
specFile.write("\n")
specFile.close()
# =====================================================
# Custom exception
# =====================================================
class SlugsException(Exception):
pass
# =====================================================
# Realizability Checker
# =====================================================
def checkRealizability(inputFile):
# =====================================================
# Compile to a structured Slugs specification
# =====================================================
command = slugsCompilerAndBasicOptions + " "+inputFile+" > "+slugsCompiledFile+" 2> "+slugsErrorFile
print >>sys.stderr, "Executing: "+command
retValue = os.system(command)
if (retValue!=0):
print >>sys.stderr, "================================================"
print >>sys.stderr, "Slugs compilation failed!"
print >>sys.stderr, "================================================\n"
with open(slugsErrorFile,"r") as errorFile:
for line in errorFile.readlines():
sys.stderr.write(line)
raise SlugsException("Could not build report")
command = slugsExecutableAndBasicOptions + " "+slugsCompiledFile+" > "+slugsReturnFile+" 2> "+slugsErrorFile
print >>sys.stderr, "Executing: "+command
retValue = os.system(command)
if (retValue!=0):
print >>sys.stderr, "Slugs failed!"
raise Exception("Could not build report")
realizable = None
with open(slugsErrorFile,"r") as f:
for line in f.readlines():
if line.startswith("RESULT: Specification is realizable."):
realizable = True
elif line.startswith("RESULT: Specification is unrealizable."):
realizable = False
if realizable==None:
print >>sys.stderr, "Error: slugs was unable to determine the realizability of the specification."
raise SlugsException("Fatal error")
return realizable
# =====================================================
# Main function
# =====================================================
def analyzeStuckAtConstant(slugsFile):
# =====================================================
# Read the structured input specification
# =====================================================
originalSlugsFile = readSlugsFile(slugsFile)
# =====================================================
# Check for realizability once
# =====================================================
isRealizable = checkRealizability(slugsFile)
print "Starting point is",
if isRealizable:
print "a realizable",
(categoryA,categoryB,categoryC,text) = ("[OUTPUT]","[SYS_TRANS]","[SYS_INIT]","output signal")
else:
print "an unrealizable",
(categoryA,categoryB,categoryC,text) = ("[INPUT]","[ENV_TRANS]","[ENV_INIT]","input signal")
print "specification"
# Going through the inputs and outputs
for line in originalSlugsFile[categoryA]:
line = line.strip()
if not line.startswith("#"):
# See if the fixing the current input or output makes a difference:
nonDifferenceCausingValues = []
# Numeric variables
if ":" in line:
parts = line.split(":")
parts = [a.strip() for a in parts]
if len(parts)!=2:
print >>sys.stderr, "Error reading line '"+line+"' in section "+variableType+": Too many ':'s!"
raise Exception("Failed to translate file.")
parts2 = parts[1].split("...")
if len(parts2)!=2:
print >>sys.stderr, "Error reading line '"+line+"' in section "+variableType+": Syntax should be name:from...to, where the latter two are numbers"
raise Exception("Failed to translate file.")
try:
minValue = int(parts2[0])
maxValue = int(parts2[1])
except ValueError:
print >>sys.stderr, "Error reading line '"+line+"' in section "+variableType+": the minimal and maximal values are not given as numbers"
raise Exception("Failed to translate file.")
if minValue>maxValue:
print >>sys.stderr, "Error reading line '"+line+"' in section "+variableType+": the minimal value should be smaller than the maximum one (or at least equal)"
raise Exception("Failed to translate file.")
# Fill the dictionaries numberAPLimits, translatedNames with information
variable = parts[0]
# Go through all values
for value in xrange(minValue,maxValue+1):
thisSpec = copy.deepcopy(originalSlugsFile)
thisSpec[categoryB].append(variable+"'="+str(value))
thisSpec[categoryC].append(variable+"="+str(value))
writeSlugsFile(slugsModifiedFile,thisSpec)
if (checkRealizability(slugsModifiedFile) == isRealizable):
nonDifferenceCausingValues.append(str(value))
else:
variable = line.strip()
for value in [False,True]:
thisSpec = copy.deepcopy(originalSlugsFile)
if value:
thisSpec[categoryB].append(variable+"'")
thisSpec[categoryC].append(variable)
else:
thisSpec[categoryB].append("! "+variable+"'")
thisSpec[categoryC].append("! "+variable)
writeSlugsFile(slugsModifiedFile,thisSpec)
changing = (checkRealizability(slugsModifiedFile) == isRealizable)
if changing:
nonDifferenceCausingValues.append(str(value))
if len(nonDifferenceCausingValues)==0:
print "Fixing the value of the "+text+" "+variable+" changes that."
else:
print "Fixing the value of the "+text+" "+variable+" to ",
for i,x in enumerate(nonDifferenceCausingValues):
if i>0:
if len(nonDifferenceCausingValues)<=2:
sys.stdout.write(" ")
else:
sys.stdout.write(", ")
if (i==len(nonDifferenceCausingValues)-1) and i>0:
sys.stdout.write("or ")
sys.stdout.write(x)
print " does not change this fact."
# =====================================================
# Run as main program
# =====================================================
if __name__ == "__main__":
# Take random seed from the terminal if it exists, otherwise make up one
if len(sys.argv)>1:
slugsFile = sys.argv[1]
else:
print >>sys.stderr,"Error: Expected non-incremental slugs file name as input."
sys.exit(1)
try:
analyzeStuckAtConstant(slugsFile)
except SlugsException,e:
sys.exit(1)
| {
"content_hash": "cd085a82894996c059bbfe780fe8a9d3",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 177,
"avg_line_length": 43.101851851851855,
"alnum_prop": 0.49688506981740066,
"repo_name": "LTLMoP/slugs",
"id": "b206b2b7ac5aed3f0fa6f86be283df9d86b67aef",
"size": "9331",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/analyzeStuckAtConstant.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2949912"
},
{
"name": "C++",
"bytes": "662035"
},
{
"name": "Groff",
"bytes": "14291"
},
{
"name": "HTML",
"bytes": "218974"
},
{
"name": "Makefile",
"bytes": "2839"
},
{
"name": "Python",
"bytes": "85133"
},
{
"name": "Shell",
"bytes": "378422"
}
],
"symlink_target": ""
} |
import re
from typing import Any
from pytest import mark, param, raises, warns
from omegaconf import OmegaConf
from omegaconf.errors import InterpolationResolutionError
@mark.parametrize(
("cfg", "key", "expected_value", "expected_warning"),
[
param(
{"a": 10, "b": "${oc.deprecated: a}"},
"b",
10,
"'b' is deprecated. Change your code and config to use 'a'",
id="value",
),
param(
{"a": 10, "b": "${oc.deprecated: a, '$OLD_KEY is deprecated'}"},
"b",
10,
"b is deprecated",
id="value-custom-message",
),
param(
{
"a": 10,
"b": "${oc.deprecated: a, ${warning}}",
"warning": "$OLD_KEY is bad, $NEW_KEY is good",
},
"b",
10,
"b is bad, a is good",
id="value-custom-message-config-variable",
),
param(
{"a": {"b": 10}, "b": "${oc.deprecated: a}"},
"b",
OmegaConf.create({"b": 10}),
"'b' is deprecated. Change your code and config to use 'a'",
id="dict",
),
param(
{"a": {"b": 10}, "b": "${oc.deprecated: a}"},
"b.b",
10,
"'b' is deprecated. Change your code and config to use 'a'",
id="dict_value",
),
param(
{"a": [0, 1], "b": "${oc.deprecated: a}"},
"b",
OmegaConf.create([0, 1]),
"'b' is deprecated. Change your code and config to use 'a'",
id="list",
),
param(
{"a": [0, 1], "b": "${oc.deprecated: a}"},
"b[1]",
1,
"'b' is deprecated. Change your code and config to use 'a'",
id="list_value",
),
],
)
def test_deprecated(
cfg: Any, key: str, expected_value: Any, expected_warning: str
) -> None:
cfg = OmegaConf.create(cfg)
with warns(UserWarning, match=re.escape(expected_warning)):
value = OmegaConf.select(cfg, key)
assert value == expected_value
assert type(value) == type(expected_value)
@mark.parametrize(
("cfg", "error"),
[
param(
{"a": "${oc.deprecated: z}"},
"ConfigKeyError raised while resolving interpolation:"
" In oc.deprecated resolver at 'a': Key not found: 'z'",
id="target_not_found",
),
param(
{"a": "${oc.deprecated: 111111}"},
"TypeError raised while resolving interpolation: oc.deprecated:"
" interpolation key type is not a string (int)",
id="invalid_key_type",
),
param(
{"a": "${oc.deprecated: b, 1000}", "b": 10},
"TypeError raised while resolving interpolation: oc.deprecated:"
" interpolation message type is not a string (int)",
id="invalid_message_type",
),
],
)
def test_deprecated_target_not_found(cfg: Any, error: str) -> None:
cfg = OmegaConf.create(cfg)
with raises(
InterpolationResolutionError,
match=re.escape(error),
):
cfg.a
| {
"content_hash": "a1be260c3b4c4a38facf2b264b5e5dc6",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 76,
"avg_line_length": 30.327102803738317,
"alnum_prop": 0.4764252696456086,
"repo_name": "omry/omegaconf",
"id": "7f03ab34ad3bc99676e90fdfa5e768fa43b006c1",
"size": "3245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/interpolation/built_in_resolvers/test_oc_deprecated.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ANTLR",
"bytes": "7876"
},
{
"name": "Gherkin",
"bytes": "203"
},
{
"name": "Python",
"bytes": "1020026"
}
],
"symlink_target": ""
} |
import sys
from glob import glob
from distutils import log
from distutils.cmd import Command
from distutils.version import LooseVersion
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
# The quiet=True option will silence all of the name setting warnings:
# Ignoring attempt to set 'name' (from 'nipy.core' to
# 'nipy.core.image')
# Robert Kern recommends setting quiet=True on the numpy list, stating
# these messages are probably only used in debugging numpy distutils.
config.get_version('nipy/version.py') # sets config.version
config.add_subpackage('nipy', 'nipy')
return config
################################################################################
# For some commands, use setuptools
if len(set(('develop', 'bdist_egg', 'bdist_rpm', 'bdist', 'bdist_dumb',
'bdist_wininst', 'install_egg_info', 'egg_info', 'easy_install',
)).intersection(sys.argv)) > 0:
from setup_egg import extra_setuptools_args
# extra_setuptools_args can be defined from the line above, but it can
# also be defined here because setup.py has been exec'ed from
# setup_egg.py.
if not 'extra_setuptools_args' in globals():
extra_setuptools_args = dict()
# Dependency checks
def package_check(pkg_name, version=None,
optional=False,
checker=LooseVersion,
version_getter=None,
):
''' Check if package `pkg_name` is present, and correct version
Parameters
----------
pkg_name : str
name of package as imported into python
version : {None, str}, optional
minimum version of the package that we require. If None, we don't
check the version. Default is None
optional : {False, True}, optional
If False, raise error for absent package or wrong version;
otherwise warn
checker : callable, optional
callable with which to return comparable thing from version
string. Default is ``distutils.version.LooseVersion``
version_getter : {None, callable}:
Callable that takes `pkg_name` as argument, and returns the
package version string - as in::
``version = version_getter(pkg_name)``
If None, equivalent to::
mod = __import__(pkg_name); version = mod.__version__``
'''
if version_getter is None:
def version_getter(pkg_name):
mod = __import__(pkg_name)
return mod.__version__
try:
mod = __import__(pkg_name)
except ImportError:
if not optional:
raise RuntimeError('Cannot import package "%s" '
'- is it installed?' % pkg_name)
log.warn('Missing optional package "%s"; '
'you may get run-time errors' % pkg_name)
return
if not version:
return
try:
have_version = version_getter(pkg_name)
except AttributeError:
raise RuntimeError('Cannot find version for %s' % pkg_name)
if checker(have_version) < checker(version):
v_msg = 'You have version %s of package "%s"' \
' but we need version >= %s' % (
have_version,
pkg_name,
version,
)
if optional:
log.warn(v_msg + '; you may get run-time errors')
else:
raise RuntimeError(v_msg)
# Hard and soft dependency checking
package_check('scipy', '0.5')
package_check('sympy', '0.6.6')
def _mayavi_version(pkg_name):
from enthought.mayavi import version
return version.version
package_check('mayavi', '3.0', optional=True,
version_getter=_mayavi_version)
################################################################################
# Import the documentation building classes.
try:
from build_docs import cmdclass
except ImportError:
""" Pass by the doc build gracefully if sphinx is not installed """
print "Sphinx is not installed, docs cannot be built"
cmdclass = {}
################################################################################
# commands for installing the data
from numpy.distutils.command.install_data import install_data
from numpy.distutils.command.build_ext import build_ext
def data_install_msgs():
from nipy.utils import make_datasource, DataError
for name in ('templates', 'data'):
try:
make_datasource('nipy', name)
except DataError, exception:
log.warn('%s\n%s' % ('_'*80, exception))
class MyInstallData(install_data):
""" Subclass the install_data to generate data install warnings if necessary
"""
def run(self):
install_data.run(self)
data_install_msgs()
class MyBuildExt(build_ext):
""" Subclass the build_ext to generate data install warnings if
necessary: warn at build == warn early
This is also important to get a warning when run a 'develop'.
"""
def run(self):
build_ext.run(self)
data_install_msgs()
cmdclass['install_data'] = MyInstallData
cmdclass['build_ext'] = MyBuildExt
################################################################################
# We need to import nipy as late as possible,
from nipy import __doc__
def main(**extra_args):
from numpy.distutils.core import setup
setup( name = 'nipy',
description = 'This is a neuroimaging python package',
author = 'Various',
author_email = 'nipy-devel@neuroimaging.scipy.org',
url = 'http://neuroimaging.scipy.org',
long_description = __doc__,
configuration = configuration,
cmdclass = cmdclass,
scripts = glob('scripts/*.py'),
**extra_args)
if __name__ == "__main__":
main(**extra_setuptools_args)
| {
"content_hash": "3e95857d62d4177188e90244dcb5d51f",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 80,
"avg_line_length": 34.3876404494382,
"alnum_prop": 0.5905897729129227,
"repo_name": "yarikoptic/NiPy-OLD",
"id": "14342eb0c8212825b2c2e9dc215fd75f8d7e9784",
"size": "6143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4411801"
},
{
"name": "Objective-C",
"bytes": "4262"
},
{
"name": "Python",
"bytes": "2617786"
}
],
"symlink_target": ""
} |
import logging
import threading
from ceilometerclient import client as ceilometer_client
from django.conf import settings
from django.utils import datastructures
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
def get_flavor_names(request):
# TODO(lsmola) The flavors can be set per project,
# so it should show only valid ones.
try:
flavors = nova.flavor_list(request, None)
return [f.name for f in flavors]
except Exception:
return ['m1.tiny', 'm1.small', 'm1.medium',
'm1.large', 'm1.xlarge']
def is_iterable(var):
"""Return True if the given is list or tuple."""
return (isinstance(var, (list, tuple)) or
issubclass(var.__class__, (list, tuple)))
def make_query(user_id=None, tenant_id=None, resource_id=None,
user_ids=None, tenant_ids=None, resource_ids=None):
"""Returns query built form given parameters.
This query can be then used for querying resources, meters and
statistics.
:Parameters:
- `user_id`: user_id, has a priority over list of ids
- `tenant_id`: tenant_id, has a priority over list of ids
- `resource_id`: resource_id, has a priority over list of ids
- `user_ids`: list of user_ids
- `tenant_ids`: list of tenant_ids
- `resource_ids`: list of resource_ids
"""
user_ids = user_ids or []
tenant_ids = tenant_ids or []
resource_ids = resource_ids or []
query = []
if user_id:
user_ids = [user_id]
for u_id in user_ids:
query.append({"field": "user_id", "op": "eq", "value": u_id})
if tenant_id:
tenant_ids = [tenant_id]
for t_id in tenant_ids:
query.append({"field": "project_id", "op": "eq", "value": t_id})
if resource_id:
resource_ids = [resource_id]
for r_id in resource_ids:
query.append({"field": "resource_id", "op": "eq", "value": r_id})
return query
class Meter(base.APIResourceWrapper):
"""Represents one Ceilometer meter."""
_attrs = ['name', 'type', 'unit', 'resource_id', 'user_id',
'project_id']
def __init__(self, apiresource):
super(Meter, self).__init__(apiresource)
self._label = self.name
self._description = ""
def augment(self, label=None, description=None):
if label:
self._label = label
if description:
self._description = description
@property
def description(self):
return self._description
@property
def label(self):
return self._label
class Resource(base.APIResourceWrapper):
"""Represents one Ceilometer resource."""
_attrs = ['resource_id', 'source', 'user_id', 'project_id', 'metadata',
'links']
def __init__(self, apiresource, ceilometer_usage=None):
super(Resource, self).__init__(apiresource)
# Save empty strings to IDs rather than None, so it gets
# serialized correctly. We don't want 'None' strings.
self.project_id = self.project_id or ""
self.user_id = self.user_id or ""
self.resource_id = self.resource_id or ""
self._id = "%s__%s__%s" % (self.project_id,
self.user_id,
self.resource_id)
# Meters with statistics data
self._meters = {}
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and self.project_id:
self._tenant = ceilometer_usage.get_tenant(self.project_id)
else:
self._tenant = None
if ceilometer_usage and self.user_id:
self._user = ceilometer_usage.get_user(self.user_id)
else:
self._user = None
self._query = make_query(tenant_id=self.project_id,
user_id=self.user_id,
resource_id=self.resource_id)
@property
def name(self):
name = self.metadata.get("name", None)
display_name = self.metadata.get("display_name", None)
return name or display_name or ""
@property
def id(self):
return self._id
@property
def tenant(self):
return self._tenant
@property
def user(self):
return self._user
@property
def resource(self):
return self.resource_id
@property
def query(self):
return self._query
@property
def meters(self):
return self._meters
def get_meter(self, meter_name):
return self._meters.get(meter_name, None)
def set_meter(self, meter_name, value):
self._meters[meter_name] = value
class ResourceAggregate(Resource):
"""Represents aggregate of more resources together.
Aggregate of resources can be obtain by specifing
multiple ids in one parameter or by not specifying
one parameter.
Or it can be specified by query directly.
Example:
We obtain can have aggregate of resources by specifying
multiple resource_ids in resource_id parameter in init.
Or we can specify only tenant_id, which will return
all resources of that tenant.
"""
def __init__(self, tenant_id=None, user_id=None, resource_id=None,
tenant_ids=None, user_ids=None, resource_ids=None,
ceilometer_usage=None, query=None, identifier=None):
self._id = identifier
self.tenant_id = None
self.user_id = None
self.resource_id = None
# Meters with statistics data
self._meters = {}
if query:
self._query = query
else:
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if (ceilometer_usage and tenant_id):
self.tenant_id = tenant_id
self._tenant = ceilometer_usage.get_tenant(tenant_id)
else:
self._tenant = None
if (ceilometer_usage and user_id):
self.user_id = user_id
self._user = ceilometer_usage.get_user(user_id)
else:
self._user = None
if (resource_id):
self.resource_id = resource_id
self._query = make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id,
tenant_ids=tenant_ids,
user_ids=user_ids,
resource_ids=resource_ids)
@property
def id(self):
return self._id
class Sample(base.APIResourceWrapper):
"""Represents one Ceilometer sample."""
_attrs = ['counter_name', 'user_id', 'resource_id', 'timestamp',
'resource_metadata', 'source', 'counter_unit', 'counter_volume',
'project_id', 'counter_type', 'resource_metadata']
@property
def instance(self):
display_name = self.resource_metadata.get('display_name', None)
instance_id = self.resource_metadata.get('instance_id', None)
return display_name or instance_id
@property
def name(self):
name = self.resource_metadata.get("name", None)
display_name = self.resource_metadata.get("display_name", None)
return name or display_name or ""
class Statistic(base.APIResourceWrapper):
"""Represents one Ceilometer statistic."""
_attrs = ['period', 'period_start', 'period_end',
'count', 'min', 'max', 'sum', 'avg',
'duration', 'duration_start', 'duration_end']
@memoized
def ceilometerclient(request):
"""Initialization of Ceilometer client."""
endpoint = base.url_for(request, 'metering')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return ceilometer_client.Client('2', endpoint,
token=(lambda: request.user.token.id),
insecure=insecure,
cacert=cacert)
def resource_list(request, query=None, ceilometer_usage_object=None):
"""List the resources."""
resources = ceilometerclient(request).\
resources.list(q=query)
return [Resource(r, ceilometer_usage_object) for r in resources]
def sample_list(request, meter_name, query=None):
"""List the samples for this meters."""
samples = ceilometerclient(request).samples.list(meter_name=meter_name,
q=query)
return [Sample(s) for s in samples]
def meter_list(request, query=None):
"""List the user's meters."""
meters = ceilometerclient(request).meters.list(query)
return [Meter(m) for m in meters]
def statistic_list(request, meter_name, query=None, period=None):
"""List of statistics."""
statistics = ceilometerclient(request).\
statistics.list(meter_name=meter_name, q=query, period=period)
return [Statistic(s) for s in statistics]
class ThreadedUpdateResourceWithStatistics(threading.Thread):
"""Multithread wrapper for update_with_statistics method of
resource_usage.
A join logic is placed in process_list class method. All resources
will have its statistics attribute filled in separate threads.
The resource_usage object is shared between threads. Each thread is
updating one Resource.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `resources`: List of Resource or ResourceAggregate object,
that will be filled by statistic data.
- `resource_usage`: Wrapping resource usage object, that holds
all statistics data.
- `meter_names`: List of meter names of the statistics we want.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will be
returned, divided into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the attribute name of the stats.
E.g. (avg, max, min...) If None is given, whole
statistic object is returned,
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
# TODO(lsmola) Can be removed once Ceilometer supports sample-api
# and group-by, so all of this optimization will not be necessary.
# It is planned somewhere to I.
def __init__(self, resource_usage, resource, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
super(ThreadedUpdateResourceWithStatistics, self).__init__()
self.resource_usage = resource_usage
self.resource = resource
self.meter_names = meter_names
self.period = period
self.stats_attr = stats_attr
self.additional_query = additional_query
def run(self):
# Run the job
self.resource_usage.update_with_statistics(
self.resource,
meter_names=self.meter_names, period=self.period,
stats_attr=self.stats_attr, additional_query=self.additional_query)
@classmethod
def process_list(cls, resource_usage, resources, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
threads = []
for resource in resources:
# add statistics data into resource
thread = cls(resource_usage, resource, meter_names=meter_names,
period=period, stats_attr=stats_attr,
additional_query=additional_query)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
class CeilometerUsage(object):
"""Represents wrapper of any Ceilometer queries.
One instance of this class should be shared between resources
as this class provides a place where users and tenants are
cached. So there are no duplicate queries to API.
This class also wraps Ceilometer API calls and provides parallel
HTTP calls to API.
This class should also serve as reasonable abstraction, that will
cover huge amount of optimization due to optimization of Ceilometer
service, without changing of the interface.
"""
def __init__(self, request):
self._request = request
# Cached users and tenants.
self._users = {}
self._tenants = {}
def get_user(self, user_id):
"""Returns user fetched form API
Caching the result, so it doesn't contact API twice with the
same query
"""
user = self._users.get(user_id, None)
if not user:
user = keystone.user_get(self._request, user_id)
# caching the user, for later use
self._users[user_id] = user
return user
def preload_all_users(self):
"""Preloads all users into dictionary.
It's more effective to preload all users, rather the fetching many
users by separate API get calls.
"""
users = keystone.user_list(self._request)
# Cache all users on right indexes, this is more effective than to
# obtain large number of users one by one by keystone.user_get
for u in users:
self._users[u.id] = u
def get_tenant(self, tenant_id):
"""Returns tenant fetched form API.
Caching the result, so it doesn't contact API twice with the
same query
"""
tenant = self._tenants.get(tenant_id, None)
if not tenant:
tenant = keystone.tenant_get(self._request, tenant_id)
# caching the tenant for later use
self._tenants[tenant_id] = tenant
return tenant
def preload_all_tenants(self):
"""Preloads all teannts into dictionary.
It's more effective to preload all tenants, rather the fetching many
tenants by separate API get calls.
"""
tenants, more = keystone.tenant_list(self._request)
# Cache all tenants on right indexes, this is more effective than to
# obtain large number of tenants one by one by keystone.tenant_get
for t in tenants:
self._tenants[t.id] = t
def global_data_get(self, used_cls=None, query=None,
with_statistics=False, additional_query=None,
with_users_and_tenants=True):
"""Obtaining a resources for table view.
It obtains resources with statistics data according to declaration
in used_cls class.
:Parameters:
- `user_cls`: Class wrapper for usage data. It acts as wrapper for
settings needed. See the call of this method for
details.
- `query`: Explicit query definition for fetching the resources. If
no query is provided, it takes a default_query from
used_cls. If no default query is provided, it fetches
all the resources and filters them by meters defined
in used_cls.
- `with_statistic`: Define whether statistics data from the meters
defined in used_cls should be fetched.
Can be used to first obtain only the pure
resources, then with the statistics data by
AJAX.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
default_query = used_cls.default_query
query = query or default_query
filter_func = None
def filter_resources(resource):
"""Method for filtering resources by theirs links.rel attr.
The links.rel attributes contains all meters the resource have.
"""
for link in resource.links:
if link['rel'] in used_cls.meters:
return True
return False
if not query:
# Not all resource types can be obtain by query, if there is not
# a query, we are filtering all resources by this function.
filter_func = filter_resources
if with_statistics:
# Will add statistic data into resources.
resources = self.resources_with_statistics(
query,
used_cls.meters,
filter_func=filter_func,
stats_attr=used_cls.stats_attr,
additional_query=additional_query,
with_users_and_tenants=with_users_and_tenants)
else:
# Will load only resources without statistical data.
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
return [used_cls(resource) for resource in resources]
def query_from_object_id(self, object_id):
"""Obtaining a query from resource id.
Query can be then used to identify a resource in resources or meters
API calls. ID is being built in the Resource initializer, or returned
by Datatable into UpdateRow functionality.
"""
try:
tenant_id, user_id, resource_id = object_id.split("__")
except ValueError:
return []
return make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id)
def update_with_statistics(self, resource, meter_names=None, period=None,
stats_attr=None, additional_query=None):
"""Adding statistical data into one Resource or ResourceAggregate.
It adds each statistic of each meter_names into the resource
attributes. Attribute name is the meter name with replaced '.' to '_'.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given a faceted result will be
returned, dividend into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
if not meter_names:
raise ValueError("meter_names and resource must be defined to be"
"able to obtain the statistics.")
# query for identifying one resource in meters
query = resource.query
if additional_query:
if not is_iterable(additional_query):
raise ValueError("Additional query must be list of"
" conditions. See the docs for format.")
query = query + additional_query
# TODO(lsmola) thread for each meter will be probably overkill
# but I should test lets say thread pool with 100 of threads
# and apply it only to this code.
# Though I do expect Ceilometer will support bulk requests,
# so all of this optimization will not be necessary.
for meter in meter_names:
statistics = statistic_list(self._request, meter,
query=query, period=period)
meter = meter.replace(".", "_")
if statistics:
if stats_attr:
# I want to load only a specific attribute
resource.set_meter(
meter,
getattr(statistics[0], stats_attr, None))
else:
# I want a dictionary of all statistics
resource.set_meter(meter, statistics)
else:
resource.set_meter(meter, None)
return resource
def resources(self, query=None, filter_func=None,
with_users_and_tenants=False):
"""Obtaining resources with the query or filter_func.
Obtains resources and also fetch tenants and users associated
with those resources if with_users_and_tenants flag is true.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
if with_users_and_tenants:
ceilometer_usage_object = self
else:
ceilometer_usage_object = None
resources = resource_list(
self._request,
query=query, ceilometer_usage_object=ceilometer_usage_object)
if filter_func:
resources = [resource for resource in resources if
filter_func(resource)]
return resources
def resources_with_statistics(self, query=None, meter_names=None,
period=None, filter_func=None,
stats_attr=None, additional_query=None,
with_users_and_tenants=False):
"""Obtaining resources with statistics data inside.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
ThreadedUpdateResourceWithStatistics.process_list(
self, resources,
meter_names=meter_names, period=period, stats_attr=stats_attr,
additional_query=additional_query)
return resources
def resource_aggregates(self, queries=None):
"""Obtaining resource aggregates with queries.
Representing a resource aggregate by query is a most general way
how to obtain a resource aggregates.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
"""
resource_aggregates = []
for identifier, query in queries.items():
resource_aggregates.append(ResourceAggregate(query=query,
ceilometer_usage=None,
identifier=identifier))
return resource_aggregates
def resource_aggregates_with_statistics(self, queries=None,
meter_names=None, period=None,
filter_func=None, stats_attr=None,
additional_query=None):
"""Obtaining resource aggregates with statistics data inside.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
resource_aggregates = self.resource_aggregates(queries)
ThreadedUpdateResourceWithStatistics.process_list(
self,
resource_aggregates, meter_names=meter_names, period=period,
stats_attr=stats_attr, additional_query=additional_query)
return resource_aggregates
def diff_lists(a, b):
if not a:
return []
elif not b:
return a
else:
return list(set(a) - set(b))
class Meters(object):
"""Class for listing of available meters
It is listing meters defined in this class that are available
in Ceilometer meter_list.
It is storing information that is not available in Ceilometer, i.e.
label, description.
"""
def __init__(self, request=None, ceilometer_meter_list=None):
# Storing the request.
self._request = request
# Storing the Ceilometer meter list
if ceilometer_meter_list:
self._ceilometer_meter_list = ceilometer_meter_list
else:
try:
self._ceilometer_meter_list = meter_list(request)
except Exception:
self._ceilometer_meter_list = []
exceptions.handle(self._request,
_('Unable to retrieve Ceilometer meter'
'list.'))
# Storing the meters info categorized by their services.
self._nova_meters_info = self._get_nova_meters_info()
self._neutron_meters_info = self._get_neutron_meters_info()
self._glance_meters_info = self._get_glance_meters_info()
self._cinder_meters_info = self._get_cinder_meters_info()
self._swift_meters_info = self._get_swift_meters_info()
self._kwapi_meters_info = self._get_kwapi_meters_info()
# Storing the meters info of all services together.
all_services_meters = (self._nova_meters_info,
self._neutron_meters_info,
self._glance_meters_info,
self._cinder_meters_info,
self._swift_meters_info,
self._kwapi_meters_info)
self._all_meters_info = {}
for service_meters in all_services_meters:
self._all_meters_info.update(dict([(meter_name, meter_info)
for meter_name, meter_info
in service_meters.items()]))
# Here will be the cached Meter objects, that will be reused for
# repeated listing.
self._cached_meters = {}
def list_all(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names
:Parameters:
- `only_meters`: The list of meter_names we want to show
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=only_meters,
except_meters=except_meters)
def list_nova(self, except_meters=None):
"""Returns a list of meters tied to nova
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._nova_meters_info.keys(),
except_meters=except_meters)
def list_neutron(self, except_meters=None):
"""Returns a list of meters tied to neutron
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._neutron_meters_info.keys(),
except_meters=except_meters)
def list_glance(self, except_meters=None):
"""Returns a list of meters tied to glance
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._glance_meters_info.keys(),
except_meters=except_meters)
def list_cinder(self, except_meters=None):
"""Returns a list of meters tied to cinder
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._cinder_meters_info.keys(),
except_meters=except_meters)
def list_swift(self, except_meters=None):
"""Returns a list of meters tied to swift
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._swift_meters_info.keys(),
except_meters=except_meters)
def list_kwapi(self, except_meters=None):
"""Returns a list of meters tied to kwapi
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._kwapi_meters_info.keys(),
except_meters=except_meters)
def _list(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names
:Parameters:
- `only_meters`: The list of meter_names we want to show
- `except_meters`: The list of meter names we don't want to show
"""
# Get all wanted meter names.
if only_meters:
meter_names = only_meters
else:
meter_names = [meter_name for meter_name
in self._all_meters_info.keys()]
meter_names = diff_lists(meter_names, except_meters)
# Collect meters for wanted meter names.
return self._get_meters(meter_names)
def _get_meters(self, meter_names):
"""Obtain meters based on meter_names
The meters that do not exist in Ceilometer meter list are left out.
:Parameters:
- `meter_names`: A list of meter names we want to fetch.
"""
meters = []
for meter_name in meter_names:
meter = self._get_meter(meter_name)
if meter:
meters.append(meter)
return meters
def _get_meter(self, meter_name):
"""Obtains a meter
Obtains meter either from cache or from Ceilometer meter list
joined with statically defined meter info like label and description.
:Parameters:
- `meter_name`: A meter name we want to fetch.
"""
meter = self._cached_meters.get(meter_name, None)
if not meter:
meter_candidates = [m for m in self._ceilometer_meter_list
if m.name == meter_name]
if meter_candidates:
meter_info = self._all_meters_info.get(meter_name, None)
if meter_info:
label = meter_info["label"]
description = meter_info["description"]
else:
label = ""
description = ""
meter = meter_candidates[0]
meter.augment(label=label, description=description)
self._cached_meters[meter_name] = meter
return meter
def _get_nova_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
meters_info = datastructures.SortedDict([
("instance", {
'label': '',
'description': _("Duration of instance"),
}),
("instance:<type>", {
'label': '',
'description': _("Duration of instance <type> "
"(openstack types)"),
}),
("memory", {
'label': '',
'description': _("Volume of RAM in MB"),
}),
("cpu", {
'label': '',
'description': _("CPU time used"),
}),
("cpu_util", {
'label': '',
'description': _("Average CPU utilization"),
}),
("vcpus", {
'label': '',
'description': _("Number of VCPUs"),
}),
("disk.read.requests", {
'label': '',
'description': _("Number of read requests"),
}),
("disk.write.requests", {
'label': '',
'description': _("Number of write requests"),
}),
("disk.read.bytes", {
'label': '',
'description': _("Volume of reads in B"),
}),
("disk.write.bytes", {
'label': '',
'description': _("Volume of writes in B"),
}),
("disk.root.size", {
'label': '',
'description': _("Size of root disk in GB"),
}),
("disk.ephemeral.size", {
'label': '',
'description': _("Size of ephemeral disk "
"in GB"),
}),
("network.incoming.bytes", {
'label': '',
'description': _("Number of incoming bytes "
"on the network for a VM interface"),
}),
("network.outgoing.bytes", {
'label': '',
'description': _("Number of outgoing bytes "
"on the network for a VM interface"),
}),
("network.incoming.packets", {
'label': '',
'description': _("Number of incoming "
"packets for a VM interface"),
}),
("network.outgoing.packets", {
'label': '',
'description': _("Number of outgoing "
"packets for a VM interface"),
})
])
# Adding flavor based meters into meters_info dict
# TODO(lsmola) this kind of meter will be probably deprecated
# https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then.
for flavor in get_flavor_names(self._request):
name = 'instance:%s' % flavor
meters_info[name] = dict(meters_info["instance:<type>"])
meters_info[name]['description'] = (
_('Duration of instance type %s (openstack flavor)') %
flavor)
# TODO(lsmola) allow to set specific in local_settings. For all meters
# because users can have their own agents and meters.
return meters_info
def _get_neutron_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('network', {
'label': '',
'description': _("Duration of network"),
}),
('network.create', {
'label': '',
'description': _("Creation requests for this network"),
}),
('network.update', {
'label': '',
'description': _("Update requests for this network"),
}),
('subnet', {
'label': '',
'description': _("Duration of subnet"),
}),
('subnet.create', {
'label': '',
'description': _("Creation requests for this subnet"),
}),
('subnet.update', {
'label': '',
'description': _("Update requests for this subnet"),
}),
('port', {
'label': '',
'description': _("Duration of port"),
}),
('port.create', {
'label': '',
'description': _("Creation requests for this port"),
}),
('port.update', {
'label': '',
'description': _("Update requests for this port"),
}),
('router', {
'label': '',
'description': _("Duration of router"),
}),
('router.create', {
'label': '',
'description': _("Creation requests for this router"),
}),
('router.update', {
'label': '',
'description': _("Update requests for this router"),
}),
('ip.floating', {
'label': '',
'description': _("Duration of floating ip"),
}),
('ip.floating.create', {
'label': '',
'description': _("Creation requests for this floating ip"),
}),
('ip.floating.update', {
'label': '',
'description': _("Update requests for this floating ip"),
}),
])
def _get_glance_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('image', {
'label': '',
'description': _("Image existence check"),
}),
('image.size', {
'label': '',
'description': _("Uploaded image size"),
}),
('image.update', {
'label': '',
'description': _("Number of update on the image"),
}),
('image.upload', {
'label': '',
'description': _("Number of upload of the image"),
}),
('image.delete', {
'label': '',
'description': _("Number of delete on the image"),
}),
('image.download', {
'label': '',
'description': _("Image is downloaded"),
}),
('image.serve', {
'label': '',
'description': _("Image is served out"),
}),
])
def _get_cinder_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('volume', {
'label': '',
'description': _("Duration of volume"),
}),
('volume.size', {
'label': '',
'description': _("Size of volume"),
}),
])
def _get_swift_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('storage.objects', {
'label': '',
'description': _("Number of objects"),
}),
('storage.objects.size', {
'label': '',
'description': _("Total size of stored objects"),
}),
('storage.objects.containers', {
'label': '',
'description': _("Number of containers"),
}),
('storage.objects.incoming.bytes', {
'label': '',
'description': _("Number of incoming bytes"),
}),
('storage.objects.outgoing.bytes', {
'label': '',
'description': _("Number of outgoing bytes"),
}),
('storage.api.request', {
'label': '',
'description': _("Number of API requests against swift"),
}),
])
def _get_kwapi_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return datastructures.SortedDict([
('energy', {
'label': '',
'description': _("Amount of energy"),
}),
('power', {
'label': '',
'description': _("Power consumption"),
}),
])
| {
"content_hash": "c167ba46c21918624c3a13bb0507d6bc",
"timestamp": "",
"source": "github",
"line_count": 1184,
"max_line_length": 79,
"avg_line_length": 37.32432432432432,
"alnum_prop": 0.5461395727733527,
"repo_name": "xme1226/horizon",
"id": "4568d0cb876cc2df4052ee15496aa8966470e15b",
"size": "44738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/api/ceilometer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "912401"
},
{
"name": "JavaScript",
"bytes": "239824"
},
{
"name": "Python",
"bytes": "4031589"
},
{
"name": "Shell",
"bytes": "16967"
}
],
"symlink_target": ""
} |
import matplotlib
import numpy as np
import pandas as pd
from collections import namedtuple
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
EpisodeStats = namedtuple("Stats",["episode_lengths", "episode_rewards"])
def plot_cost_to_go_mountain_car(env, estimator, num_tiles=20):
x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles)
y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles)
X, Y = np.meshgrid(x, y)
Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y]))
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Position')
ax.set_ylabel('Velocity')
ax.set_zlabel('Value')
ax.set_title("Mountain \"Cost To Go\" Function")
fig.colorbar(surf)
plt.show()
def plot_value_function(V, title="Value Function"):
"""
Plots the value function as a surface plot.
"""
min_x = min(k[0] for k in V.keys())
max_x = max(k[0] for k in V.keys())
min_y = min(k[1] for k in V.keys())
max_y = max(k[1] for k in V.keys())
x_range = np.arange(min_x, max_x + 1)
y_range = np.arange(min_y, max_y + 1)
X, Y = np.meshgrid(x_range, y_range)
# Find value for all (x, y) coordinates
Z_noace = np.apply_along_axis(lambda _: V[(_[0], _[1], False)], 2, np.dstack([X, Y]))
Z_ace = np.apply_along_axis(lambda _: V[(_[0], _[1], True)], 2, np.dstack([X, Y]))
def plot_surface(X, Y, Z, title):
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Player Sum')
ax.set_ylabel('Dealer Showing')
ax.set_zlabel('Value')
ax.set_title(title)
ax.view_init(ax.elev, -120)
fig.colorbar(surf)
plt.show()
plot_surface(X, Y, Z_noace, "{} (No Usable Ace)".format(title))
plot_surface(X, Y, Z_ace, "{} (Usable Ace)".format(title))
def plot_episode_stats(stats, smoothing_window=10, noshow=False):
# Plot the episode length over time
fig1 = plt.figure(figsize=(10,5))
plt.plot(stats.episode_lengths)
plt.xlabel("Epsiode")
plt.ylabel("Epsiode Length")
plt.title("Episode Length over Time")
if noshow:
plt.close(fig1)
else:
plt.show(fig1)
# Plot the episode reward over time
fig2 = plt.figure(figsize=(10,5))
rewards_smoothed = pd.Series(stats.episode_rewards).rolling(smoothing_window, min_periods=smoothing_window).mean()
plt.plot(rewards_smoothed)
plt.xlabel("Epsiode")
plt.ylabel("Epsiode Reward (Smoothed)")
plt.title("Episode Reward over Time (Smoothed over window size {})".format(smoothing_window))
if noshow:
plt.close(fig2)
else:
plt.show(fig2)
# Plot time steps and episode number
fig3 = plt.figure(figsize=(10,5))
plt.plot(np.cumsum(stats.episode_lengths), np.arange(len(stats.episode_lengths)))
plt.xlabel("Time Steps")
plt.ylabel("Episode")
plt.title("Episode per time step")
if noshow:
plt.close(fig3)
else:
plt.show(fig3)
return fig1, fig2, fig3
| {
"content_hash": "197253d2e16c6b2a68af44c94d124c65",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 118,
"avg_line_length": 35.275510204081634,
"alnum_prop": 0.6219265258894996,
"repo_name": "transedward/ml-playground",
"id": "8efed363f615dffcbc1e415a4d1c26d7bb8161d9",
"size": "3457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/plotting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3167015"
},
{
"name": "Python",
"bytes": "76537"
}
],
"symlink_target": ""
} |
ab={ 'Swaroop' : 'swaroopch@byteofpython.info',
'Larry' : 'larry@wall.org',
'Matsumoto' : 'matz@ruby-lang.org',
'Spammer' : 'spammer@hotmail.com'
}
print "Swaroop's address is %s" %ab['Swaroop']
# Adding a key/value pair
ab['Guido']='guido@python.org'
# Deleting a key/value pair
del ab['Spammer']
print '\nThere are %d contacts in the address-book\n' %len(ab)
for name,address in ab.items():
print 'Contact %s at %s' %(name,address)
if 'Guido' in ab: # OR ab.has_key('Guido')
print "\nGuido's address is %s" %ab['Guido']
| {
"content_hash": "779d55564e2d38603fa4e79ae9048859",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 62,
"avg_line_length": 25.571428571428573,
"alnum_prop": 0.659217877094972,
"repo_name": "weepingdog/HelloWorld",
"id": "439bd89f8497b642f1ed248eea5ebdcf748c10ae",
"size": "623",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/using_dict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "1553"
}
],
"symlink_target": ""
} |
from office365.sharepoint.base_entity import BaseEntity
class SPAuthEvent(BaseEntity):
@property
def entity_type_name(self):
return "Microsoft.SharePoint.AuthPolicy.Events.SPAuthEvent"
| {
"content_hash": "9885537d8a7807c9be086a75c110c757",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 67,
"avg_line_length": 20.6,
"alnum_prop": 0.7572815533980582,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "140497dee252d4890385f056fc9a7ccd24872e99",
"size": "206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "office365/sharepoint/authpolicy/events/auth_event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
} |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AzureMachineLearningWorkspacesConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for AzureMachineLearningWorkspaces.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword api_version: Api Version. The default value is "2022-10-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(AzureMachineLearningWorkspacesConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2022-10-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-machinelearningservices/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| {
"content_hash": "6c2af1fd1b13554c6e1b554f98616ee6",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 130,
"avg_line_length": 48.875,
"alnum_prop": 0.7154731457800512,
"repo_name": "Azure/azure-sdk-for-python",
"id": "aaac6cd461cc4429d07a4fc784fccdf7ad550263",
"size": "3596",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2022_10_01/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
08b.py
~~~~~~
Advent of Code 2017 - Day 8: I Heard You Like Registers
Part Two
To be safe, the CPU also needs to know the highest value held in any
register during this process so that it can decide how much memory to
allocate to these operations. For example, in the previous instructions,
the highest value ever held was 10 (in register c after the third
instruction was evaluated).
:copyright: (c) 2017 by Martin Bor.
:license: MIT, see LICENSE for more details.
"""
import sys
from collections import defaultdict
import operator
ops = {'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'inc': operator.add,
'dec': operator.sub}
def solve(program):
"""Find the largest value ever held.
:program: list of instructions, separated by newlines
:return: largest value ever held
>>> solve('''b inc 5 if a > 1
... a inc 1 if b < 5
... c dec -10 if a >= 1
... c inc -20 if c == 10''')
10
"""
regs = defaultdict(int)
largest = 0
for line in program.split('\n'):
r, op, val, _, cmp_reg, cmp_op, cmp_val = line.split()
if ops[cmp_op](regs[cmp_reg], int(cmp_val)):
regs[r] = ops[op](regs[r], int(val))
largest = max(largest, regs[r])
return largest
def main(argv):
if len(argv) == 2:
f = open(argv[1], 'r')
else:
sys.stderr.write('reading from stdin...\n')
f = sys.stdin
print(solve(f.read().strip()))
if __name__ == "__main__":
sys.exit(main(sys.argv))
| {
"content_hash": "e2f040ebd62fbbf4f617880e26c20186",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 76,
"avg_line_length": 24.65671641791045,
"alnum_prop": 0.5756658595641646,
"repo_name": "mcbor/adventofcode",
"id": "fc2f1f331cc337db4926900203b8917d521e4124",
"size": "1699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2017/08b.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "209281"
},
{
"name": "Shell",
"bytes": "211"
}
],
"symlink_target": ""
} |
"""Emulating the wheel from apple products"""
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from lib import graphics
from contrib import euclid
import cairo
import math
class Scene(graphics.Scene):
def __init__(self, progress):
graphics.Scene.__init__(self, scale=True, keep_aspect=True)
self.progress = progress
self.wheel = graphics.Circle(200, 200, "#aaa", x = 20, y=20, interactive=True, pivot_x=100, pivot_y=100)
self.add_child(self.wheel)
self.add_child(graphics.Circle(50, 50, "#fafafa", x=95, y=95, interactive=True))
self.ticker = graphics.Label("*tick*", size=24, color="#000", x=5, y=220, opacity=0)
self.ticker.last_degrees = 0
self.add_child(self.ticker)
self.connect("on-mouse-move", self.on_mouse_move)
self.connect("on-mouse-down", self.on_mouse_down)
self.connect("on-mouse-up", self.on_mouse_up)
self.drag_point = None
self.start_rotation = None
def on_mouse_down(self, scene, event):
sprite = self.get_sprite_at_position(event.x, event.y)
if sprite == self.wheel:
self.drag_point = euclid.Point2(event.x, event.y)
self.start_rotation = self.wheel.rotation
def on_mouse_up(self, scene, event):
self.drag_point = None
self.start_rotation = None
def flash_tick(self):
if self.ticker.opacity < 0.5:
self.ticker.opacity = 1
self.ticker.animate(opacity=0, duration=0.2)
def on_mouse_move(self, scene, event):
mouse_down = gdk.ModifierType.BUTTON1_MASK & event.state
if not mouse_down:
return
sprite = self.get_sprite_at_position(event.x, event.y)
if sprite == self.wheel:
if not self.drag_point:
self.on_mouse_down(scene, event)
pivot_x, pivot_y = self.wheel.get_matrix().transform_point(self.wheel.pivot_x, self.wheel.pivot_y)
pivot_point = euclid.Point2(pivot_x, pivot_y)
drag_vector = euclid.Point2(event.x, event.y) - pivot_point
start_vector = self.drag_point - pivot_point
angle = math.atan2(start_vector.y, start_vector.x) - math.atan2(drag_vector.y, drag_vector.x)
delta = (self.start_rotation - angle) - self.wheel.rotation
# full revolution jumps from -180 to 180 degrees
if abs(delta) >= math.pi:
delta = 0
else:
degrees = int(math.degrees(self.wheel.rotation))
self.ticker.last_degrees = self.ticker.last_degrees or degrees
if abs(self.ticker.last_degrees - degrees) >= 30:
self.ticker.last_degrees = degrees
self.flash_tick()
progress = min(1, max(0, self.progress.get_fraction() + delta / (math.pi * 2 * 10)))
self.progress.set_fraction(progress)
self.wheel.rotation = self.start_rotation - angle
else:
self.drag_point = None
class BasicWindow:
def __init__(self):
window = gtk.Window()
window.set_default_size(240, 280)
window.set_title("iThing")
window.connect("delete_event", lambda *args: gtk.main_quit())
vbox = gtk.VBox()
progress_bar = gtk.ProgressBar()
vbox.pack_start(Scene(progress_bar), True, True, 0)
vbox.pack_start(progress_bar, False, False, 0)
window.add(vbox)
window.show_all()
if __name__ == '__main__':
window = BasicWindow()
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL) # gtk3 screws up ctrl+c
gtk.main()
| {
"content_hash": "791843a4f6bc1d436eddc31014ae9bb0",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 112,
"avg_line_length": 33.13513513513514,
"alnum_prop": 0.598694942903752,
"repo_name": "projecthamster/experiments",
"id": "4a78a565e74b90250e33ab8a15021ed4253c989d",
"size": "3782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i_thing.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "734313"
}
],
"symlink_target": ""
} |
class ONE(object):
roman = 'I'
value = 1
left_allowed = None
@staticmethod
def roman_for(number):
return ONE.roman
@staticmethod
def interval():
return range(4)
@staticmethod
def remaining_for(number):
return number - ONE.value
class FIVE(object):
roman = 'V'
value = 5
left_allowed = ONE
@staticmethod
def roman_for(number):
if number < FIVE.value:
return FIVE.left_allowed.roman + FIVE.roman
return FIVE.roman
@staticmethod
def interval():
return range(4, 9)
@staticmethod
def remaining_for(number):
if number < FIVE.value:
return 0
return number - FIVE.value
class TEN(object):
roman = 'X'
value = 10
left_allowed = ONE
@staticmethod
def roman_for(number):
if number < TEN.value:
return TEN.left_allowed.roman + TEN.roman
return TEN.roman
@staticmethod
def interval():
return range(9, 40)
@staticmethod
def remaining_for(number):
if number < TEN.value:
return 0
return number - TEN.value
class FIFTY(object):
roman = 'L'
value = 50
left_allowed = TEN
@staticmethod
def roman_for(number):
if number < FIFTY.value:
return FIFTY.left_allowed.roman + FIFTY.roman
return FIFTY.roman
@staticmethod
def interval():
return range(40, 90)
@staticmethod
def remaining_for(number):
if number < FIFTY.value:
return number - (FIFTY.value - FIFTY.left_allowed.value)
return number - FIFTY.value
all_romans = [FIVE, TEN, FIFTY]
class Roman(object):
@staticmethod
def translate(number):
closest_roman_symbol = Roman.find_closest_roman_to(number)
base_symbol = closest_roman_symbol.roman_for(number)
remaining = closest_roman_symbol.remaining_for(number)
remaining_roman = Roman.translate(remaining) if base_symbol != ONE and remaining > 0 else ''
return base_symbol + remaining_roman
@staticmethod
def find_closest_roman_to(number):
found_roman = ONE
for roman in all_romans:
if number in roman.interval():
return roman
return found_roman
| {
"content_hash": "68273fa4c79efd3b869e2fddd1285dfb",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 100,
"avg_line_length": 22.69607843137255,
"alnum_prop": 0.5991360691144708,
"repo_name": "alejandrodob/dojo",
"id": "1d58d9833bf16fb3a841519cccb1d461d883eb95",
"size": "2315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/roman-numerals/python-13-10-15/roman_numerals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38344"
}
],
"symlink_target": ""
} |
import cv2
import numpy as np
import os
from solver import SudokuSolver
from finder import SudokuFinder
from util import img_show
def write_missing_values(img, coords, completed_values):
"""
Using the puzzle corner coordinates and the puzzle matrix
completed values, write the missing values on the image (img).
Some of these values are not the best, like the font_scale,
top_offset and left_offset. But these are good enough estimates.
:param img:
:param coords:
:param completed_values:
:return:
"""
color_green = (120, 255, 140)
color_blueish = (255, 80, 70)
p1, p2, p3, p4 = coords
poly_points = np.array([[p1[0], p1[1]], [p2[0], p2[1]], [p4[0], p4[1]],
[p3[0], p3[1]]], dtype=np.int32)
cv2.polylines(img, [poly_points], isClosed=True, color=color_green,
thickness=3)
puzzle_cell_w = (p2[0] - p1[0]) / 9
puzzle_cell_h = (p3[1] - p1[1]) / 9
top_padding = p1[1]
left_padding = p1[0]
left_diff = (p3[0] - p1[0]) / 9
top_diff = (p2[1] - p1[1]) / 9
font_scale = puzzle_cell_h / 23
top_offset = int(puzzle_cell_h / 1.2)
left_offset = int(puzzle_cell_w / 2.8)
for x, y, digit in completed_values:
digit = str(digit)
y1 = left_padding + int(puzzle_cell_w * y) + left_offset
y1 += int(left_diff * x)
x1 = top_padding + int(puzzle_cell_h * x) + top_offset
x1 += int(top_diff * y)
cv2.putText(img, digit, (y1, x1),
cv2.FONT_HERSHEY_PLAIN, fontScale=font_scale, color=color_blueish,
thickness=1, lineType=cv2.LINE_AA)
return img
def load_test_image(test_img_name):
current_dir = os.path.dirname(os.path.abspath(__file__))
file_name = os.path.join(current_dir, 'test_images', test_img_name)
return cv2.imread(file_name, cv2.IMREAD_COLOR)
def main():
test_img_names = [
'handwritten.jpg', # 0, unable to read all digits
'site_sudoku.png', # 1, misreads some digits
'sudoku_test_rotated_ccw.png', # 2, ok
'sudoku_test_clear.png', # 3, ok
'sudoku_test_rotated_cw.png', # 4, ok
'sudoku_test_clear_smaller.png', # 5, ok
'sudoku_sample.png' # 6, ok
]
img = load_test_image(test_img_names[4])
sf = SudokuFinder(img, debug_mode=True )
puzzle, coords = sf.find_puzzle()
ss = SudokuSolver(puzzle)
solved = ss.solve(seconds_limit=4)
if solved:
completed_values = ss.get_completed_values()
write_missing_values(img, coords, completed_values)
img_show(img)
else:
print("Could not solve puzzle in under 4 seconds.")
if '__main__' == __name__:
main()
| {
"content_hash": "1976cf640cecdc16cc049d63ea85ce55",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 86,
"avg_line_length": 31.413793103448278,
"alnum_prop": 0.5923893157702159,
"repo_name": "bbuhai/sudoku-solver",
"id": "f2fa17a9a92206bf734310ccbe79b026e8fdc3cd",
"size": "2733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sudoku_solver/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20642"
}
],
"symlink_target": ""
} |
import os
import logging
import sys
import tempfile
from six import iteritems
from six.moves import range
from functools import partial, wraps
import ducky.config
import ducky.cpu.registers
import ducky.console
import ducky.log
import ducky.machine
import ducky.mm
import ducky.snapshot
import ducky.util
from ducky.util import F
from unittest import TestCase # noqa
try:
import unittest.mock as mock # noqa
except ImportError:
import mock # noqa
from testconfig import config
LOGGER = logging.getLogger()
PYPY = hasattr(sys, 'pypy_version_info')
from six import PY2 # noqa
from six import PY3 # noqa
#
# Hypothesis setup
#
from hypothesis import settings
DEFAULT_EXAMPLES = 200
if 'HYPOTHESIS_PROFILE' in os.environ:
profile = os.environ['HYPOTHESIS_PROFILE'].lower()
if profile not in config['hypothesis']:
LOGGER.warning('Unknown hypothesis profile "%s"', profile)
profile = settings(max_examples = DEFAULT_EXAMPLES)
else:
profile = settings(max_examples = int(config['hypothesis'][profile]))
settings.register_profile('ducky-profile', profile)
else:
settings.register_profile('ducky-profile', settings(max_examples = DEFAULT_EXAMPLES))
settings.load_profile('ducky-profile')
def repeat(*test_paths):
def wrap(fn):
@wraps(fn)
def wrapper():
_test_paths = test_paths + ('tests',)
for test_path in _test_paths:
count = config.get('repeats', {}).get(test_path)
if count is not None:
count = int(count)
break
else:
count = 1
for n in range(count):
yield (fn, n)
return wrapper
return wrap
tests_dir = partial(os.path.join, config['dirs']['tests'])
tmp_dir = partial(os.path.join, config['dirs']['tmp'])
def get_tempfile(keep = True):
return tempfile.NamedTemporaryFile('w+b', delete = not keep, dir = tmp_dir())
def prepare_file(size, messages = None, pattern = 0xDE):
f_tmp = get_tempfile()
# fill file with pattern
f_tmp.seek(0)
for _ in range(0, size):
f_tmp.write(ducky.util.str2bytes(chr(pattern)))
messages = messages or []
# write out messages
for offset, msg in messages:
f_tmp.seek(offset)
f_tmp.write(ducky.util.str2bytes(msg))
f_tmp.close()
return f_tmp
def assert_raises(callable, exc_class, message = ''):
try:
callable()
except exc_class:
pass
else:
assert False, message
def assert_registers(state, **regs):
for reg in ducky.cpu.registers.REGISTER_NAMES:
if reg in ('flags', 'ip', 'cnt'):
continue
val = regs.get(reg, 0)
reg_index = ducky.cpu.registers.REGISTER_NAMES.index(reg)
reg_value = state.registers[reg_index]
assert reg_value == val, F('Register {reg} expected to have value {expected} ({expected:L}), {actual} ({actual:L}) found instead', reg = reg, expected = val, actual = reg_value)
def assert_flags(state, **flags):
core_flags = ducky.cpu.CoreFlags.from_int(state.flags)
flag_labels = {
'privileged': 'privileged',
'hwint': 'hwint_allowed',
'e': 'equal',
'z': 'zero',
'o': 'overflow',
's': 'sign'
}
for short_flag, core_flag in iteritems(flag_labels):
passed = flags.get(short_flag, True if short_flag == 'privileged' else False)
expected = True if passed in (True, 1) else False
actual = getattr(core_flags, core_flag)
assert expected == actual, F('Flag {flag} expected to be {expected}, {actual} found instead', flag = core_flag, expected = expected, actual = actual)
def assert_mm(state, cells):
for addr, expected_value in cells:
addr = addr
expected_value = expected_value
page_index = ducky.mm.addr_to_page(addr)
page_offset = ducky.mm.addr_to_offset(addr)
for page in state.get_page_states():
if page.index != page_index:
continue
real_value = page.content[page_offset] | (page.content[page_offset + 1] << 8) | (page.content[page_offset + 2] << 16) | (page.content[page_offset + 3] << 24)
assert real_value == expected_value, 'Value at {} (page {}, offset {}) should be {}, {} found instead'.format(ducky.mm.ADDR_FMT(addr), page_index, ducky.mm.UINT8_FMT(page_offset), ducky.mm.UINT32_FMT(expected_value), ducky.mm.UINT32_FMT(real_value))
break
else:
assert False, 'Page {} (address {}) not found in memory'.format(page_index, ducky.mm.ADDR_FMT(addr))
def assert_mm_pages(state, *pages):
pg_indices = [pg.index for pg in state.get_page_states()]
for pg_id in pages:
assert pg_id in pg_indices, 'Page {} not found in VM state'.format(pg_id)
def assert_file_content(filename, cells):
with open(filename, 'rb') as f:
for cell_offset, cell_value in iteritems(cells):
f.seek(cell_offset)
real_value = ord(f.read(1))
assert real_value == cell_value, 'Value at {} (file {}) should be {}, {} found instead'.format(cell_offset, filename, ducky.mm.UINT8_FMT(cell_value), ducky.mm.UINT8_FMT(real_value))
def common_asserts(M, S, mm_asserts = None, file_asserts = None, **kwargs):
mm_asserts = mm_asserts or {}
file_asserts = file_asserts or []
assert_registers(S.get_child('machine').get_child('cpu0').get_child('core0'), **kwargs)
assert_flags(S.get_child('machine').get_child('cpu0').get_child('core0'), **kwargs)
assert_mm(S.get_child('machine').get_child('memory'), mm_asserts)
for filename, cells in file_asserts:
assert_file_content(filename, cells)
def compile_code(code):
f_asm = get_tempfile()
f_asm.write(ducky.util.str2bytes(code))
f_asm.flush()
f_asm.close()
f_obj_name = os.path.splitext(f_asm.name)[0] + '.o'
f_bin_name = os.path.splitext(f_asm.name)[0] + '.testbin'
os.system('PYTHONPATH={} {} -f -I {} -i {} -o {}'.format(os.getenv('PYTHONPATH'), os.getenv('DAS'), os.getenv('TOPDIR'), f_asm.name, f_obj_name))
os.system('PYTHONPATH={} {} -f -i {} -o {} --section-base=.text=0x0000'.format(os.getenv('PYTHONPATH'), os.getenv('DLD'), f_obj_name, f_bin_name))
os.unlink(f_asm.name)
os.unlink(f_obj_name)
return f_bin_name
def run_machine(code = None, binary = None, machine_config = None, coredump_file = None, pokes = None, post_setup = None, post_boot = None, post_run = None, logger = None, **kwargs):
pokes = pokes or []
post_setup = post_setup or []
post_boot = post_boot or []
post_run = post_run or []
logger = logger or ducky.log.create_logger(name = 'ducky-test', level = logging.DEBUG)
M = ducky.machine.Machine(logger = logger)
if os.getenv('VMDEBUG') == 'yes':
M.LOGGER.setLevel(logging.DEBUG)
if binary is None and code is not None:
binary = compile_code(code)
if binary is not None:
machine_config.add_section('bootloader')
machine_config.set('bootloader', 'file', binary)
M.hw_setup(machine_config)
if not all(fn(M) in (True, None) for fn in post_setup):
if code is not None:
os.unlink(binary)
return M
M.boot()
if code is not None:
os.unlink(binary)
if not all(fn(M) in (True, None) for fn in post_boot):
return M
for address, value, length in pokes:
M.rom_loader.poke(address, value, length)
M.run()
for fn in post_run:
fn(M, M.last_state)
return M
def common_run_machine(code = None, binary = None, machine_config = None,
cpus = 1, cores = 1,
pokes = None,
storages = None,
mmaps = None,
post_setup = None, post_boot = None, post_run = None,
logger = None,
**kwargs):
storages = storages or []
mmaps = mmaps or []
if code is not None and isinstance(code, list):
code = '\n'.join(code)
machine_config = machine_config or ducky.config.MachineConfig()
if not machine_config.has_section('machine'):
machine_config.add_section('machine')
machine_config.set('machine', 'cpus', cpus)
machine_config.set('machine', 'cores', cores)
if not machine_config.has_section('cpu'):
machine_config.add_section('cpu')
machine_config.set('cpu', 'math-coprocessor', 'yes')
if storages:
machine_config.add_device('bio', 'ducky.devices.storage.BlockIO')
for driver, id, path in storages:
machine_config.add_storage(driver, id, filepath = path)
for path, addr, size, offset, access, shared in mmaps:
machine_config.add_mmap(path, addr, size, offset = offset, access = access, shared = shared)
return run_machine(code = code, binary = binary, machine_config = machine_config, post_setup = post_setup, post_boot = post_boot, post_run = post_run, pokes = pokes, logger = logger)
| {
"content_hash": "3d1782aa908d1e5a683ee5e2bc4ebc84",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 255,
"avg_line_length": 29.30952380952381,
"alnum_prop": 0.6506904955320877,
"repo_name": "happz/ducky",
"id": "6f127d2a2369b5ff0df3acd351314a281a2dbc82",
"size": "8617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import cPickle
import numpy
from preprocessing import remove_short_sentences
from sen2vec import Sen2VecByWord2Vec,Sen2VecByDoc2Vec
load_dir_path = "../dataset/sentences_divided/"
save_dir_path = "../dataset/training_dataset/"
unlabeled_sentences = cPickle.load(open(load_dir_path + "unlabeled_sentences.pkl","rb"))
labeled_sentences = cPickle.load(open(load_dir_path + "labeled_sentences.pkl","rb"))
s2v = Sen2VecByWord2Vec()
unlabeled_vecs = s2v.sens2vec(unlabeled_sentences)
labeled_vecs = s2v.sens2vec(labeled_sentences)
labels = cPickle.load(open(save_dir_path + "labels.pkl","rb"))
print len(labeled_vecs),len(labels),len(unlabeled_vecs)
index =0
while index < len(unlabeled_vecs) :
if unlabeled_vecs[index] is None :
del unlabeled_vecs[index]
else:
index += 1
index =0
while index < len(labeled_vecs) :
if labeled_vecs[index] is None :
del labeled_vecs[index]
del labels[index]
else:
index += 1
x = numpy.array(labeled_vecs).astype("float32")
y = numpy.array(labels).astype("float32")
ul_x = numpy.array(unlabeled_vecs).astype("float32")
cPickle.dump((x,y), open(save_dir_path + "dataset.pkl","wb"))
cPickle.dump((ul_x,), open(save_dir_path + "ul_dataset.pkl","wb"))
| {
"content_hash": "7fc132338ee90d9c52d34d62f0b4db74",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 88,
"avg_line_length": 28.25,
"alnum_prop": 0.7031375703942075,
"repo_name": "jphacks/KB_02",
"id": "2fa3b95f24d3602b614f4f9cffa3899520cb6403",
"size": "1245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/make_training_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70687"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['ConstantTrend'] , ['Seasonal_DayOfMonth'] , ['NoAR'] ); | {
"content_hash": "09c2010cf992fb0873c7bd26753f88c8",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 91,
"avg_line_length": 41,
"alnum_prop": 0.7195121951219512,
"repo_name": "antoinecarme/pyaf",
"id": "d67e48d2cd17b273477f05199219ac13aaf1f9fe",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_ConstantTrend_Seasonal_DayOfMonth_NoAR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import oval57 as oval
class cybox_oval_mappings(object):
def __init__(self, id_namespace):
self.test_id_base = 0
self.obj_id_base = 0
self.ste_id_base = 0
self.def_id_base = 0
self.id_namespace = id_namespace
#Mappings
#CybOX Condition to OVAL operation mappings
self.operator_condition_mappings = {'Equals':'equals','DoesNotEqual':'not equal','Contains':'pattern match',\
'GreaterThan':'greater than', 'GreaterThanOrEqual':'greater than or equal',\
'LessThan':'less than','LessThanOrEqual':'less than or equal','FitsPattern':'pattern match',\
'BitwiseAnd':'bitwise and', 'BitwiseOr':'bitwise or'}
#CybOX Object Type to OVAL object mappings
self.object_mappings = {'WinRegistryKeyObj:WindowsRegistryKeyObjectType':'registry_object', 'FileObj:FileObjectType':'file_object',
'WinFileObj:WindowsFileObjectType':'file_object', 'WinExecutableFileObj:WindowsExecutableFileObjectType':'file_object'}
#CybOX FileObject to OVAL file_object mappings (CybOX element name : {OVAL element name, OVAL element datatype})
self.file_object_mappings = {'File_Path':{'name':'path','datatype':'string'},'Full_Path':{'name':'filepath','datatype':'string'},
'File_Name':{'name':'filename', 'datatype':'string'}}
#CybOX FileObject to OVAL file_state mappings
self.file_state_mappings = {'Size_In_Bytes':{'name':'size','datatype':'int'},'Accessed_Time':{'name':'a_time','datatype':'int'},\
'Modified_Time':{'name':'m_time','datatype':'int'},'Created_Time':{'name':'c_time','datatype':'int'}}
#CybOX WinRegistryObject to OVAL registry_object mappings
self.registry_object_mappings = {'Key':{'name':'key','datatype':'string'},'Hive':{'name':'hive','datatype':'string'},'Name':{'name':'name','datatype':'string'}}
#CybOX WinRegistryObject Values to OVAL registry_state mappings
self.registry_state_mappings = {'Name':{'name':'name','datatype':'string'},'Data':{'name':'value','datatype':'string'},'Datatype':{'name':'type','datatype':'string'}}
#Creates and returns a dictionary of OVAL test, object, and state (if applicable)
def create_oval(self, cybox_defined_object, reference):
oval_entities = {}
oval_states = []
object_type = cybox_defined_object._XSI_NS + ':' + cybox_defined_object._XSI_TYPE
if object_type in self.object_mappings.keys():
oval_object = self.create_oval_object(object_type, cybox_defined_object)
if oval_object is not None:
if object_type == 'WinRegistryKeyObj:WindowsRegistryKeyObjectType':
self.process_registry_values(cybox_defined_object, oval_object, oval_states)
else:
state = self.create_oval_state(object_type, cybox_defined_object)
if state is not None:
oval_states.append(self.create_oval_state(object_type, cybox_defined_object))
oval_test = self.create_oval_test(object_type, oval_object, oval_entities, oval_states, reference)
oval_entities['test'] = oval_test
oval_entities['object'] = oval_object
if oval_states is not None and len(oval_states) > 0:
oval_entities['state'] = oval_states
return oval_entities
else:
return None
#Create the OVAL object
def create_oval_object(self, object_type, cybox_defined_object):
oval_object_type = self.object_mappings.get(object_type)
oval_object_mappings = self.object_mappings.get(object_type) + '_mappings'
oval_object = getattr(oval,oval_object_type)()
oval_object.set_id(self.generate_obj_id())
oval_object.set_version(1)
object_fields = cybox_defined_object._fields
# File Object related corner cases
if "File" in object_type:
if object_fields["Full_Path"]:
del object_fields["File_Name"]
del object_fields["File_Path"]
# Corner case where file_path is meant to be used as the full path to the file
elif object_fields["File_Path"] and (not object_fields["Full_Path"] and not object_fields["File_Name"]):
object_fields["Full_Path"] = object_fields["File_Path"]
del object_fields["File_Path"]
for element, value in object_fields.items():
if value is not None:
if element in getattr(getattr(self,oval_object_mappings),'keys')():
element_dictionary = getattr(getattr(self,oval_object_mappings),'get')(element)
element_name = element_dictionary.get('name')
element_datatype = element_dictionary.get('datatype')
method = 'set_' + element_name
getattr(oval_object,method)(oval.EntityBaseType(datatype = element_datatype, operation = self.operator_condition_mappings.get(value), valueOf_=value))
#Do some basic object sanity checking for certain objects
if object_type == 'WinRegistryKeyObj:WindowsRegistryKeyObjectType' and (oval_object.hive is None or oval_object.key is None):
return None
elif 'FileObjectType' in object_type and (oval_object.filepath is None and (oval_object.path is None or oval_object.filename is None)):
return None
return oval_object
#Create any OVAL states
def create_oval_state(self, object_type, cybox_defined_object):
oval_state_type = self.object_mappings.get(object_type).split('_')[0] + '_state'
oval_state_mappings = oval_state_type + '_mappings'
oval_state = getattr(oval,oval_state_type)(version = 1, id = self.generate_ste_id())
oval_state.set_id(self.generate_ste_id())
object_fields = cybox_defined_object._fields
for element, value in object_fields.items():
if value is not None:
if element in getattr(getattr(self,oval_state_mappings),'keys')():
element_dictionary = getattr(getattr(self,oval_state_mappings),'get')(element)
element_name = element_dictionary.get('name')
element_datatype = element_dictionary.get('datatype')
method = 'set_' + element_name
getattr(oval_state,method)(oval.EntityBaseType(datatype = element_datatype, operation = self.operator_condition_mappings.get(value), valueOf_=value.value))
if oval_state.hasContent_():
return oval_state
#Create the OVAL test
def create_oval_test(self, object_type, oval_object, oval_entities, oval_states, reference = None):
oval_test_type = self.object_mappings.get(object_type).split('_')[0] + '_test'
#Create the test
comment = 'OVAL Test created from MAEC Action ' + reference
oval_test = getattr(oval,oval_test_type)(id = self.generate_test_id(), check = 'at least one', version=1.0, comment = comment)
oval_test.set_object(oval.ObjectRefType(object_ref = oval_object.get_id()))
if len(oval_states) > 0:
for state in oval_states:
if state is not None:
oval_test.add_state(oval.StateRefType(state_ref = state.get_id()))
return oval_test
#Handle any Values inside a Registry object
def process_registry_values(self, cybox_defined_object, oval_object, oval_states):
#Special registry Values handling
if cybox_defined_object.values is not None:
name_set = False
for reg_value in cybox_defined_object.values:
oval_state = oval.registry_state(version = 1, id = self.generate_ste_id())
for element, value in reg_value._fields.items():
if value is not None:
#Corner case for handling multiple name/value pairs in the OVAL object
if len(cybox_defined_object.values) == 1 and not name_set:
if element in self.registry_object_mappings.keys():
oval_element = self.registry_object_mappings.get(element)
method = 'set_' + oval_element.get('name')
getattr(oval_object,method)(oval.EntityBaseType(datatype = 'string', operation = self.operator_condition_mappings.get(value), valueOf_=value.value))
name_set = True
elif len(cybox_defined_object.values) > 1 and not name_set:
oval_object.set_name(oval.EntityBaseType(datatype = 'string', operation = 'pattern match', valueOf_='.*'))
name_set = True
if element in self.registry_state_mappings.keys():
oval_element = self.registry_state_mappings.get(element)
method = 'set_' + oval_element.get('name')
getattr(oval_state,method)(oval.EntityBaseType(datatype = 'string', operation = self.operator_condition_mappings.get(value), valueOf_=value.value))
if oval_state.hasContent_():
oval_states.append(oval_state)
def generate_test_id(self):
self.test_id_base += 1
test_id = 'oval:' + self.id_namespace + ':tst:' + str(self.test_id_base)
return test_id
def generate_obj_id(self):
self.obj_id_base += 1
obj_id = 'oval:' + self.id_namespace + ':obj:' + str(self.obj_id_base)
return obj_id
def generate_ste_id(self):
self.ste_id_base += 1
ste_id = 'oval:' + self.id_namespace + ':ste:' + str(self.ste_id_base)
return ste_id
def generate_def_id(self):
self.def_id_base += 1
def_id = 'oval:' + self.id_namespace + ':def:' + str(self.def_id_base)
return def_id
| {
"content_hash": "26c50accdbba50703b1cb3c0b397f8d5",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 180,
"avg_line_length": 60.37869822485207,
"alnum_prop": 0.5950607604860839,
"repo_name": "MAECProject/maec-to-oval",
"id": "78ca3198f7ede8f18eb479f0fd9836028d35a879",
"size": "10331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cybox_oval_mappings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1530584"
}
],
"symlink_target": ""
} |
default_app_config = 'dwitter.apps.DwitterConfig'
| {
"content_hash": "53a9d1fe77bdc1a950bbfab6ccb2ec44",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 49,
"avg_line_length": 50,
"alnum_prop": 0.8,
"repo_name": "lionleaf/dwitter",
"id": "4ada851685a1ae304c0f213f8e1c40a9b71d9c9e",
"size": "50",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dwitter/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "88799"
},
{
"name": "HTML",
"bytes": "47341"
},
{
"name": "JavaScript",
"bytes": "222169"
},
{
"name": "Makefile",
"bytes": "1239"
},
{
"name": "Python",
"bytes": "153398"
}
],
"symlink_target": ""
} |
"""Switchmap-NG setup.
Manages parameters required by all classes in the module.
"""
# Main python libraries
def main():
"""Process data.
Args:
None
Returns:
None
"""
# Check the environment
pass
if __name__ == 'switchmap':
main()
| {
"content_hash": "9f8adc3c3b41c07fccdf6ec6076ff49b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 57,
"avg_line_length": 11.4,
"alnum_prop": 0.5684210526315789,
"repo_name": "PalisadoesFoundation/switchmap-ng",
"id": "984554dc21cb0d7353b06e9542a92fc95edad32f",
"size": "308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "switchmap/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "29277"
},
{
"name": "HTML",
"bytes": "1518596"
},
{
"name": "JavaScript",
"bytes": "139533"
},
{
"name": "Python",
"bytes": "528654"
}
],
"symlink_target": ""
} |
LUA_SOURCE = '''
function main(splash)
splash.resource_timeout = splash.args.timeout
{}
local condition = false
while not condition do
splash:wait(splash.args.wait)
condition = splash:evaljs({}{}{})
end
{}
{}
splash:runjs("window.close()")
{}
end
'''
GO = '\tassert(splash:go{}splash.args.url, baseurl=nil, headers={}, http_method="{}", body={}, formdata={}{})' \
.format(*['{}'] * 6)
JS_PIECE = '`{}`, document, null, XPathResult.BOOLEAN_TYPE, null).booleanValue || document.evaluate('
USER_AGENT = '\tsplash:set_user_agent(\'{}\')'
GET_HTML_ONLY = '\tlocal html = splash:html()'
RETURN_HTML_ONLY = '\treturn html'
GET_ALL_DATA = '''
local entries = splash:history()
local last_response = entries[#entries].response
local url = splash:url()
local headers = last_response.headers
local http_status = last_response.status
local cookies = splash:get_cookies()
'''
RETURN_ALL_DATA = '''
return {
url = splash:url(),
headers = last_response.headers,
http_status = last_response.status,
cookies = splash:get_cookies(),
html = splash:html(),
}
'''
PREPARE_COOKIES = '''
splash:init_cookies({}
{}
{})
'''
SET_PROXY = '''
splash:on_request(function(request)
request:set_proxy{}
{}
{}
end)
'''
| {
"content_hash": "4ada3210b00c4cf3579cd10f1f853918",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 112,
"avg_line_length": 20.08955223880597,
"alnum_prop": 0.5950965824665676,
"repo_name": "postalXdude/PySplash",
"id": "c7322e1d45d06ee499822386436e9c5c580b7c87",
"size": "1346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py_splash/static.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13383"
}
],
"symlink_target": ""
} |
from six.moves.urllib.parse import urlparse, parse_qs, urlencode
from logging.config import dictConfig
import logging
try:
import gevent
except:
gevent = None
def parse_redis_connection(url):
split = urlparse(url)
protocol = split.scheme
netloc = split.netloc
path = split.path
db = int(parse_qs(split.query)['db'][0])
if protocol == 'tcp':
host, port = netloc.split(":")
port = int(port)
return {'protocol' : protocol,
'host' : host,
'port' : port,
'db' : db}
elif protocol == 'unix':
#not supported yet
return {'protocol' : protocol,
'path' : path,
'db' : db}
def make_query_url(url, data):
qs = urlencode(data)
return url + "?" + qs
def update_dictionaries(*dictionaries):
result = {}
for d in dictionaries:
result.update(d)
return result
def send_file(file_or_buffer):
from flask import Response
chunksize=10000
def generator():
with open(file_or_buffer, "rb") as f:
while True:
result = f.read(chunksize)
if not result:
break
else:
yield result
if gevent:
gevent.sleep(0)
return Response(generator(),
mimetype='application/octet-stream')
def setup_loghandlers(level=None):
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(logging.WARNING)
if not logging._handlers:
dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'format' : "%(created)f:%(process)d:%(name)s:%(message)s"
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'console',
},
},
'root': {
'handlers': ['console'],
'level': level or 'INFO',
}
})
| {
"content_hash": "d06b131ad7e40126d3d622b25b37301f",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 91,
"avg_line_length": 28.166666666666668,
"alnum_prop": 0.4902139280837506,
"repo_name": "hhuuggoo/kitchensink",
"id": "5f2883f533b66711e9d54a7ccde98c87bc873c21",
"size": "2197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kitchensink/utils/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "806"
},
{
"name": "CSS",
"bytes": "125713"
},
{
"name": "HTML",
"bytes": "14465"
},
{
"name": "JavaScript",
"bytes": "146752"
},
{
"name": "Python",
"bytes": "161023"
},
{
"name": "Shell",
"bytes": "779"
}
],
"symlink_target": ""
} |
import os
from pkg_resources import Requirement, RequirementParseError
def add_to_requirements(package_name, url=None, file_path='requirements.txt'):
if not url:
url = package_name
if os.path.exists(file_path):
with open(file_path) as f:
lines = f.read().strip().splitlines()
else:
lines = []
append = True
new_lines = []
for line in lines:
if line.strip() == '':
new_lines.append(line)
continue
try:
if line.endswith('#egg=%s' % package_name) or Requirement.parse(line).project_name == package_name:
new_lines.append(url)
append = False
continue
else:
new_lines.append(line)
except RequirementParseError:
new_lines.append(line)
if append:
new_lines.append(url)
with open(file_path, 'w') as f:
f.write('\n'.join(new_lines))
| {
"content_hash": "278244527cc06180e3c25036e8ec53eb",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 111,
"avg_line_length": 25.394736842105264,
"alnum_prop": 0.5512953367875648,
"repo_name": "django-cratis/cratis",
"id": "8075d40401b9ff2afab59c748247d509685f14c3",
"size": "965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cratis/generators/requirements.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "41957"
},
{
"name": "Shell",
"bytes": "148"
}
],
"symlink_target": ""
} |
"""
The main file for AAC. It is responsible to run the REST API
configured in controllers.py.
"""
from aac import app
app.run(port=8080, debug=True)
| {
"content_hash": "3d7477174399b255a0275dedfd1eaae3",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 60,
"avg_line_length": 21.714285714285715,
"alnum_prop": 0.7302631578947368,
"repo_name": "bdnog/avg-age-country",
"id": "ff78ccf494c08e29bc5bdcbaaca286455fad1ef0",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runserver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "4553"
}
],
"symlink_target": ""
} |
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.db import db_base_plugin_v2
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.dbexts import nsx_models
from neutron.plugins.vmware.extensions import maclearning as mac
LOG = logging.getLogger(__name__)
class MacLearningDbMixin(object):
"""Mixin class for mac learning."""
def _make_mac_learning_state_dict(self, port, fields=None):
res = {'port_id': port['port_id'],
mac.MAC_LEARNING: port[mac.MAC_LEARNING]}
return self._fields(res, fields)
def _extend_port_mac_learning_state(self, port_res, port_db):
state = port_db.mac_learning_state
if state and state.mac_learning_enabled:
port_res[mac.MAC_LEARNING] = state.mac_learning_enabled
# Register dict extend functions for ports
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_extend_port_mac_learning_state'])
def _update_mac_learning_state(self, context, port_id, enabled):
try:
query = self._model_query(context, nsx_models.MacLearningState)
state = query.filter(
nsx_models.MacLearningState.port_id == port_id).one()
state.update({mac.MAC_LEARNING: enabled})
except exc.NoResultFound:
self._create_mac_learning_state(context,
{'id': port_id,
mac.MAC_LEARNING: enabled})
def _create_mac_learning_state(self, context, port):
with context.session.begin(subtransactions=True):
enabled = port[mac.MAC_LEARNING]
state = nsx_models.MacLearningState(
port_id=port['id'],
mac_learning_enabled=enabled)
context.session.add(state)
return self._make_mac_learning_state_dict(state)
| {
"content_hash": "3d5f7b50ee2cc89dd20fa193089b265f",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 75,
"avg_line_length": 41.04255319148936,
"alnum_prop": 0.6340072576464489,
"repo_name": "cloudbase/neutron-virtualbox",
"id": "6a85162e1d321c6777bbb30a672ea383006d9460",
"size": "2558",
"binary": false,
"copies": "1",
"ref": "refs/heads/virtualbox_agent",
"path": "neutron/plugins/vmware/dbexts/maclearning.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "8448838"
},
{
"name": "Shell",
"bytes": "12510"
}
],
"symlink_target": ""
} |
"""Classifier on the latents representations in AEs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import all_aes
import tensorflow as tf
from lib import data, utils, train, classifiers
FLAGS = flags.FLAGS
flags.DEFINE_string('ae_dir', '', 'Folder containing AE to use for DA.')
class XLatentAE(train.Classify):
def process(self, x, label):
h = self.ae.eval_sess.run(self.ae.eval_ops.encode,
feed_dict={self.ae.eval_ops.x: x})
return h, label
def train_step(self, data, ops):
x = self.tf_sess.run(data)
x, label = x['x'], x['label']
x, label = self.process(x, label)
self.sess.run(ops.train_op, feed_dict={ops.x: x, ops.label: label})
def model(self):
x = tf.placeholder(tf.float32,
[None,
self.height >> self.ae.params['scales'],
self.width >> self.ae.params['scales'],
self.ae.params['latent']], 'x')
l = tf.placeholder(tf.float32, [None, self.nclass], 'label_onehot')
ops = classifiers.single_layer_classifier(x, l, self.nclass)
ops.x = x
ops.label = l
loss = tf.reduce_mean(ops.loss)
halfway = ((FLAGS.total_kimg << 10) // FLAGS.batch) // 2
lr = tf.train.exponential_decay(FLAGS.lr, tf.train.get_global_step(),
decay_steps=halfway,
decay_rate=0.1)
utils.HookReport.log_tensor(loss, 'xe')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
opt = tf.train.AdamOptimizer(lr)
ops.train_op = opt.minimize(loss, tf.train.get_global_step())
return ops
def main(argv):
del argv # Unused.
batch = FLAGS.batch
dataset = data.get_dataset(FLAGS.dataset, dict(batch_size=batch))
ae, ae_model = utils.load_ae(FLAGS.ae_dir, FLAGS.dataset, FLAGS.batch,
all_aes.ALL_AES)
with utils.HookReport.disable():
ae.eval_mode()
model = XLatentAE(
dataset,
FLAGS.train_dir)
model.train_dir = os.path.join(model.train_dir, ae_model)
model.ae = ae
model.train()
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "9262821a73b25cf329904ba34d5462d8",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 77,
"avg_line_length": 32.43421052631579,
"alnum_prop": 0.5679513184584178,
"repo_name": "brain-research/acai",
"id": "7b88b39eee26edc65e77f76bf4b907661fc1343b",
"size": "3063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classify_latent_ae.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "32134"
},
{
"name": "Python",
"bytes": "116166"
},
{
"name": "Shell",
"bytes": "13934"
}
],
"symlink_target": ""
} |
from unittest import TestCase, main as ut_main
from random import random
from wallp.service import Imgur, Google, DeviantArt, Reddit, Bing
from wallp.util import log
from wallp.db import func as dbfunc
class TestGetImageUrl(TestCase):
image_url_seen_ratio = 0
interactive = True
@classmethod
def setUpClass(cls):
log.start('stdout', log.levels['debug'])
cls.orig_image_url_seen = dbfunc.image_url_seen
dbfunc.image_url_seen = lambda x : random() < cls.image_url_seen_ratio
@classmethod
def tearDownClass(cls):
dbfunc.image_url_seen = cls.orig_image_url_seen
def print_info(self, image_trace):
print('')
for step in image_trace:
print("{0}. {1:<25}: {2}".format(step.step, step.name, step.data))
def service_get_image_url(self, service):
url = service.get_image_url()
if self.interactive:
print(url)
self.print_info(service.image_trace)
self.assertGreater(len(service.image_trace), 0)
def test_imgur(self):
self.service_get_image_url(Imgur())
def test_google(self):
self.service_get_image_url(Google())
def test_deviantart(self):
self.service_get_image_url(DeviantArt())
def test_reddit(self):
self.service_get_image_url(Reddit())
def test_bing(self):
self.service_get_image_url(Bing())
if __name__ == '__main__':
ut_main()
| {
"content_hash": "9693d967cba6853053d64cbaff1216f5",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 72,
"avg_line_length": 20.3125,
"alnum_prop": 0.703076923076923,
"repo_name": "amol9/wallp",
"id": "506a89885fbe673e01c7e88037cb8a6a7113cc02",
"size": "1300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wallp/test/service/test_get_image_url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Protocol Buffer",
"bytes": "816"
},
{
"name": "Python",
"bytes": "219490"
}
],
"symlink_target": ""
} |
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "daskfunk-"
cfg.versionfile_source = "daskfunk/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| {
"content_hash": "dc25b6289a54060caea67b9ba3e6883a",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 79,
"avg_line_length": 35.252941176470586,
"alnum_prop": 0.5697758496023138,
"repo_name": "Savvysherpa/dask-funk",
"id": "ea4a9654ee238744dffb398de4df169a0d92f8d0",
"size": "18454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daskfunk/_version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "98173"
}
],
"symlink_target": ""
} |
import datetime
import itertools
import unittest
import ddt
import mock
from rally.plugins.agent import masteragent
BASE = "rally.plugins.agent.masteragent"
class MyDict(dict):
pass
def annotated(d, name=None):
d = MyDict(**d)
if name:
setattr(d, "__name__", name)
return d
@ddt.ddt
class AgentsRequestTestCase(unittest.TestCase):
def test___call__(self):
request = masteragent.AgentsRequest(
req={
"foo": "bar",
},
config={
"config": "foobar",
},
req_id="42")
request.recv_responses = mock.Mock()
publish_socket = mock.Mock()
pull_socket = mock.Mock()
request(publish_socket, pull_socket)
publish_socket.send_json.assert_called_once_with(
{"foo": "bar", "req": "42"})
request.recv_responses.assert_called_once_with(
"42", pull_socket, config="foobar")
@mock.patch("%s.datetime_now" % BASE)
@ddt.data(
annotated({
# Poll timedout (returned False)
"req_id": "foobar",
"recv_json": [
{}, {"req": "bar"}, {}
],
"poll": [
True, True, True, False
],
"now": [10, 10.5, 10.8, 10.9],
"expected_missed_queue": [1],
"expected_queue": [0, 2],
}, name="poll timed out"),
annotated({
# Recv timedout (no time left after recv_json)
"req_id": "foobar",
"recv_json": [
{}, {"req": "bar"}, {}
],
"poll": [
True, True, True
],
"now": [10, 10.5, 10.8, 11.1],
"expected_missed_queue": [1],
"expected_queue": [0, 2],
}, name="recv timed out"),
annotated({
# All is missed
"req_id": "",
"recv_json": [
{"req": "foo"}, {"req": "bar"}
],
"poll": [
True, True
],
"now": [10, 10.8, 11.1],
"expected_missed_queue": [0, 1],
"expected_queue": [],
}, name="all is missed"),
annotated({
# Picked from missing
"req_id": "foobar",
"recv_json": [
{"req": "foo"}, {"req": "bar"}
],
"poll": [
True, True
],
"now": [10, 10.8, 11.1],
"expected_missed_queue": [0, 1],
"missed_queue": {"foobar": [{"req": "foobar"}]},
"expected_queue": [{"req": "foobar"}],
}, name="picked from missing"),
annotated({
# stopped on recv
"req_id": "foobar",
"recv_json": [
{}, {}, ValueError()
],
"poll": [
True, True, True
],
"now": [10, 10.8, 11.1],
"timeout": 10 * 1000,
"should_raise": ValueError,
}, name="stopped on recv"),
annotated({
# stopped on poll
"req_id": "foobar",
"recv_json": [
{}, {}, {},
],
"poll": [
True, True, True, ValueError(),
],
"now": [10, 11, 12, 13],
"timeout": 10 * 1000,
"should_raise": ValueError,
}, name="stopped on poll"),
annotated({
# stopped on now
"req_id": "foobar",
"recv_json": [
{}, {}, {},
],
"poll": [
True, True, True
],
"now": [10, 11, 12, ValueError()],
"timeout": 10 * 1000,
"should_raise": ValueError,
}, name="stopped on poll"),
annotated({
# recieved enough response
"req_id": "foobar",
"recv_json": [
{}, {"req": "foo"}, {}
],
"poll": [
True, True, True
],
"now": [10, 11, 12, 13],
"timeout": 10 * 1000,
"agents": 2,
"expected_missed_queue": [1],
"expected_queue": [0, 2]
}, name="recv enough responses"),
)
def test_recv_responses(self, param, mock_masteragent_datetime_now):
recv_json = param.pop("recv_json")
for r in recv_json:
try:
r.setdefault("req", param.get("req_id"))
except AttributeError:
pass
mock_pull_socket = mock.Mock(**{
"recv_json.side_effect": recv_json,
"poll.side_effect": param.pop("poll")
})
mock_masteragent_datetime_now.side_effect = [
ts if isinstance(ts, Exception) else
datetime.datetime.utcfromtimestamp(ts)
for ts in param.pop("now")
]
expected_missed_queue = {}
param.setdefault("missed_queue", {})
emq = param.pop("expected_missed_queue", None)
if emq is not None:
emq = [recv_json[i] for i in emq]
get_req = lambda j: j["req"]
emq = sorted(emq, key=get_req)
emq = dict([
(key, list(subiter))
for key, subiter in itertools.groupby(emq, key=get_req)])
expected_missed_queue = emq
del emq
expected_queue = param.pop("expected_queue", None)
if expected_queue:
expected_queue = [recv_json[i] if isinstance(i, int) else i
for i in expected_queue]
param["pull_socket"] = mock_pull_socket
should_raise = param.pop("should_raise", False)
if not should_raise:
retval = masteragent.AgentsRequest.recv_responses(**param)
self.assertEqual(expected_queue, retval)
else:
if should_raise is True:
should_raise = StopIteration
self.assertRaises(
should_raise,
masteragent.AgentsRequest.recv_responses,
**param)
self.assertEqual(expected_missed_queue, param["missed_queue"])
@ddt.ddt
class RequestHandlerTestCase(unittest.TestCase):
def setUp(self):
super(RequestHandlerTestCase, self).setUp()
self.mocks = [
mock.patch(
"%s.six.moves.BaseHTTPServer."
"BaseHTTPRequestHandler.__init__" % BASE)
]
for mock_ in self.mocks:
mock_.start()
def tearDown(self):
super(RequestHandlerTestCase, self).tearDown()
for mock_ in self.mocks:
mock_.stop()
def get_req_handler(self, path="/"):
server = mock.Mock(pull_socket="foo", publish_socket="bar",
server_vars=masteragent.ServerVariables())
return masteragent.RequestHandler(
request=None, client_address=None,
server=server, path=path)
def test_send_json_response(self):
req_handler = self.get_req_handler()
req_handler.send_response = mock.Mock()
req_handler.send_header = mock.Mock()
req_handler.end_headers = mock.Mock()
req_handler.wfile = mock.Mock()
req_handler.send_json_response(
data={"hello": "there"}, status="foobar")
req_handler.send_response.assert_called_once_with("foobar")
req_handler.send_header.assert_called_once_with(
"Content-Type", "text/json")
req_handler.end_headers.assert_called_once_with()
req_handler.wfile.write.assert_called_once_with(
b"""{"hello": "there"}\n"""
)
def test__get_request_from_url(self):
req_handler = self.get_req_handler()
req_handler.url = mock.Mock(query="a=b&c=d&e=f")
config = req_handler._get_request_from_url(a="z", g="h")
self.assertEqual(
{"a": "b", "c": "d", "e": "f", "g": "h"},
config)
@mock.patch("%s.AgentsRequest.recv_responses" % BASE)
def test_missed(self, mock_agents_request_recv_responses):
req_handler = self.get_req_handler()
req_handler.send_json_response = mock.Mock()
req_handler._get_request_from_url = mock.Mock(
return_value={"foo": "bar"})
req_handler.command = "DELETE"
req_handler.server_vars.missed_queue = mock.Mock()
req_handler.missed()
mock_agents_request_recv_responses.assert_called_once_with(
None, "foo", req_handler.server_vars.missed_queue, foo="bar")
req_handler.send_json_response.assert_called_once_with(
{"missed": req_handler.server_vars.missed_queue})
req_handler.server_vars.missed_queue.clear.assert_called_once_with()
def test_ping(self):
req_handler = self.get_req_handler()
req_handler.send_request_to_agents = mock.Mock()
req_handler._get_request_from_url = mock.Mock()
req_handler.ping()
req_handler._get_request_from_url.assert_called_once_with(
timeout=10000, agents=float("Inf")
)
req_handler.send_request_to_agents.assert_called_once_with(
req_handler._get_request_from_url.return_value)
@ddt.data(
{"req": "abc", "foo": "bar"},
{"foo": "bar"}
)
@mock.patch("%s.AgentsRequest.recv_responses" % BASE)
def test_poll(self, config, mock_agents_request_recv_responses):
req_handler = self.get_req_handler()
req_handler.send_json_response = mock.Mock()
req_handler._get_request_from_url = mock.Mock(
return_value=dict(**config))
req_handler.server_vars.last_req_id = "last_req_id"
req_handler.poll()
mock_agents_request_recv_responses.assert_called_once_with(
config.get("req", "last_req_id"),
req_handler.pull_socket, req_handler.server_vars.missed_queue,
foo="bar"
)
req_handler.send_json_response.assert_called_once_with(
mock_agents_request_recv_responses.return_value)
def test_route_ok(self):
self.assertEqual(
masteragent.RequestHandler.route,
masteragent.RequestHandler.do_PUT
)
self.assertEqual(
masteragent.RequestHandler.route,
masteragent.RequestHandler.do_GET
)
self.assertEqual(
masteragent.RequestHandler.route,
masteragent.RequestHandler.do_DELETE
)
@ddt.unpack
@ddt.data(
("/here", "GET", False),
("/here", "POST", True),
("/there", "POST", False)
)
def test_route(self, path, command, should_404):
req_handler = self.get_req_handler()
req_handler.url = mock.Mock(path=path)
req_handler.command = command
req_handler.send_response = mock.Mock()
req_handler.end_headers = mock.Mock()
req_handler.methods = {
"GET": {"/here": lambda x: "foobar"},
"POST": {"/there": lambda x: "foobar"},
}
retval = req_handler.route()
if should_404:
req_handler.send_response.assert_called_once_with(404)
else:
self.assertEqual("foobar", retval)
def test_do_POST(self):
req_handler = self.get_req_handler()
req_handler.send_request_to_agents = mock.Mock()
req_handler._get_request_from_url = mock.Mock()
req_handler.do_POST()
req_handler.send_request_to_agents.assert_called_once_with(
req_handler._get_request_from_url.return_value
)
@ddt.unpack
@ddt.data(
({"a": "b"}, {"a": "c"}, "", True),
({"b": "b"}, {"a": "c"}, "/here", False),
)
def test__parse_request(self, url, post, path, should_raise):
req_handler = self.get_req_handler(path)
req_handler._get_request_from_url = mock.Mock(
return_value=dict(**url))
req_handler._get_request_from_post = mock.Mock(
return_value=dict(**post))
req_handler.url = mock.Mock(path=path)
if should_raise:
self.assertRaises(ValueError, req_handler._parse_request)
return
post.update(url)
post["action"] = path[1:]
req = req_handler._parse_request()
self.assertEqual(post, req)
@mock.patch("%s.AgentsRequest" % BASE)
def test_send_request_to_agents(self, mock_masteragent_agents_request):
req_handler = self.get_req_handler()
req_handler._parse_request = mock.Mock()
req_handler.send_json_response = mock.Mock()
req_handler.send_request_to_agents({"foo": "bar"})
mock_masteragent_agents_request.assert_called_once_with(
req_handler._parse_request.return_value, {"foo": "bar"})
self.assertEqual(
mock_masteragent_agents_request.return_value.req_id,
req_handler.server_vars.last_req_id)
mock_masteragent_agents_request.return_value.assert_called_once_with(
"bar", "foo")
req_handler.send_json_response.assert_called_once_with(
mock_masteragent_agents_request.return_value.return_value)
def test__get_request_from_post_empty(self):
req_handler = self.get_req_handler()
req_handler.headers = {}
self.assertEqual({}, req_handler._get_request_from_post())
@mock.patch("%s.cgi.FieldStorage" % BASE)
def test__get_request_from_post(self, mock_cgi_field_storage):
req_handler = self.get_req_handler()
req_handler.headers = {
"Content-Type": "form/multi-part",
"Content-Length": 42
}
req_handler.rfile = mock.Mock()
class V(object):
def __init__(self, value):
self.value = value
mock_cgi_field_storage.return_value = {
"foo": [V("10"), V("test")],
"bar": V("{\"abc\": [10, 20, 30], \"foo\": 10}"),
"env": V("[\"D=E\", \"A=B=C\"]"),
}
retval = req_handler._get_request_from_post()
mock_cgi_field_storage.assert_called_once_with(
fp=req_handler.rfile,
headers=req_handler.headers,
environ={
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": req_handler.headers["Content-Type"],
}
)
self.assertEqual(
{
"bar": {u"abc": [10, 20, 30], u"foo": 10},
"env": {u"A": u"B=C", u"D": u"E"},
"foo": ["10", "test"]
},
retval)
class ModuleTestCase(unittest.TestCase):
@mock.patch("%s.zmq.Context" % BASE)
def test_init_zmq(self, mock_zmq_context):
masteragent.init_zmq("publish_url", "pull_url")
import zmq
self.assertEqual(
[
# PUB socket
# zmq.Context()
mock.call(),
# context.socket(zmq.PUB)
mock.call().socket(zmq.PUB),
# socket.bind
mock.call().socket().bind("publish_url"),
# PULL socket
# zmq.Context()
mock.call(),
# context.socket(zmq.PULL)
mock.call().socket(zmq.PULL),
# socket.bind
mock.call().socket().bind("pull_url"),
],
mock_zmq_context.mock_calls)
| {
"content_hash": "05f1f2b4ad0c15ac8c2367a4f041985a",
"timestamp": "",
"source": "github",
"line_count": 478,
"max_line_length": 77,
"avg_line_length": 32.30962343096234,
"alnum_prop": 0.5108132608132608,
"repo_name": "paboldin/rally",
"id": "edc516e64269716e4554fca0e2d083925b19ede8",
"size": "16074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/agent/test_masteragent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "28306"
},
{
"name": "Mako",
"bytes": "23385"
},
{
"name": "Python",
"bytes": "2951174"
},
{
"name": "Shell",
"bytes": "44161"
}
],
"symlink_target": ""
} |
import logging
import requests
import urlparse
from django.core.exceptions import ValidationError
from django.db import models
from cabot.metricsapp import defs
from cabot.metricsapp.api import get_series_ids, get_panel_url
logger = logging.getLogger(__name__)
class GrafanaInstance(models.Model):
class Meta:
app_label = 'metricsapp'
name = models.CharField(
unique=True,
max_length=30,
help_text='Unique name for Grafana site.'
)
url = models.CharField(
max_length=100,
help_text='Url of Grafana site.'
)
api_key = models.CharField(
max_length=100,
help_text='Grafana API token for authentication (http://docs.grafana.org/http_api/auth/).'
)
sources = models.ManyToManyField(
'MetricsSourceBase',
through='GrafanaDataSource',
help_text='Metrics sources used by this Grafana site.'
)
_sessions = dict()
def __unicode__(self):
return self.name
def clean(self, *args, **kwargs):
"""Make sure the input url/api key work"""
response = self.get_request('api/search')
try:
response.raise_for_status()
except requests.exception.HTTPError:
raise ValidationError('Request to Grafana API failed.')
@property
def session(self):
"""A requests.session object with the correct authorization headers"""
session = self._sessions.get(self.api_key)
if session is None:
session = requests.Session()
session.headers.update({'Authorization': 'Bearer {}'.format(self.api_key)})
self._sessions[self.api_key] = session
return session
def get_request(self, uri=''):
"""Make a request to the Grafana instance"""
return self.session.get(urlparse.urljoin(self.url, uri), timeout=defs.GRAFANA_REQUEST_TIMEOUT_S)
class GrafanaDataSource(models.Model):
"""
Intermediate model to match the name of a data source in a Grafana instance
with the corresponding MetricsDataSource
"""
class Meta:
app_label = 'metricsapp'
grafana_source_name = models.CharField(
max_length=30,
help_text='The name for a data source in grafana (e.g. metrics-stage")'
)
grafana_instance = models.ForeignKey('GrafanaInstance', on_delete=models.CASCADE)
metrics_source_base = models.ForeignKey('MetricsSourceBase', on_delete=models.CASCADE)
def __unicode__(self):
return '{} ({}, {})'.format(self.grafana_source_name, self.metrics_source_base.name,
self.grafana_instance.name)
class GrafanaPanel(models.Model):
"""
Data about a Grafana panel.
"""
class Meta:
app_label = 'metricsapp'
@property
def modifiable_url(self):
"""Url with modifiable time range, dashboard link, etc"""
if self.panel_url:
return '{}&fullscreen'.format(self.panel_url.replace('dashboard-solo', 'dashboard'))
return None
def get_rendered_image(self):
"""Get a .png image of this panel"""
# GrafanaInstance.get_request only takes the path
panel_url = self.panel_url.replace(urlparse.urljoin(self.grafana_instance.url, '/'), '')
rendered_image_url = urlparse.urljoin('render/', panel_url)
rendered_image_url = '{}&width={}&height={}'.format(rendered_image_url,
defs.GRAFANA_RENDERED_IMAGE_WIDTH,
defs.GRAFANA_RENDERED_IMAGE_HEIGHT)
# Unfortunately "$__all" works for the normal image but not render
rendered_image_url = rendered_image_url.replace('$__all', 'All')
try:
image_request = self.grafana_instance.get_request(rendered_image_url)
image_request.raise_for_status()
return image_request.content
except requests.exceptions.RequestException:
logger.error('Failed to get Grafana panel image')
return None
grafana_instance = models.ForeignKey('GrafanaInstance', on_delete=models.CASCADE)
dashboard_uri = models.CharField(max_length=100)
panel_id = models.IntegerField()
series_ids = models.CharField(max_length=50)
selected_series = models.CharField(max_length=50)
panel_url = models.CharField(max_length=2500, null=True)
def build_grafana_panel_from_session(session):
"""Returns an (unsaved!) GrafanaPanel model instance for use with rendering or to save to the DB"""
grafana_panel = GrafanaPanel()
set_grafana_panel_from_session(grafana_panel, session)
return grafana_panel
def set_grafana_panel_from_session(grafana_panel, session):
"""
Update a GrafanaPanel model with data based on session vars
Note that this does not update the DB - call grafana_panel.save() yourself if you want to do that
"""
instance = GrafanaInstance.objects.get(id=session['instance_id'])
dashboard_uri = session['dashboard_uri']
panel_url = get_panel_url(instance.url, dashboard_uri, session['panel_id'], session['templating_dict'])
grafana_panel.grafana_instance = instance
grafana_panel.dashboard_uri = dashboard_uri
grafana_panel.panel_id = int(session['panel_id'])
grafana_panel.series_ids = get_series_ids(session['panel_info'])
grafana_panel.selected_series = '_'.join(session['series'])
grafana_panel.panel_url = panel_url
| {
"content_hash": "44309148e1161d38bc641a622235aa95",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 107,
"avg_line_length": 36.49333333333333,
"alnum_prop": 0.6470588235294118,
"repo_name": "Affirm/cabot",
"id": "9da6a80b418bef92dd47fd69255de6e0f5e12fe7",
"size": "5474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cabot/metricsapp/models/grafana.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23051"
},
{
"name": "Dockerfile",
"bytes": "1254"
},
{
"name": "Groovy",
"bytes": "3630"
},
{
"name": "HTML",
"bytes": "63833"
},
{
"name": "JavaScript",
"bytes": "371544"
},
{
"name": "Less",
"bytes": "509"
},
{
"name": "Procfile",
"bytes": "143"
},
{
"name": "Python",
"bytes": "512051"
},
{
"name": "Shell",
"bytes": "7702"
}
],
"symlink_target": ""
} |
"""
Various functions.
by Matt Hall, Agile Geoscience, 2016
"""
from io import BytesIO
import datetime
import sys
import numpy as np
from PIL import ImageStat
from sklearn.cluster import KMeans
from sklearn.utils import shuffle
from sklearn.neighbors import BallTree
from scipy.spatial.distance import pdist, squareform
from scipy.spatial import cKDTree
from pytsp import run, dumps_matrix
import boto3
from obspy.core import Trace, Stream
from obspy.io.segy.segy import SEGYTraceHeader
from obspy.core import AttribDict
from obspy.io.segy.segy import SEGYBinaryFileHeader
from flask import send_file
def get_params(request):
# Get raw parameters.
params = {'url': request.args.get('url')}
params['n_colours'] = request.args.get('n_colours') or '128'
params['interval'] = request.args.get('interval') or '0,1'
params['region'] = request.args.get('region') or ''
params['sampling'] = request.args.get('sampling') or 'random'
params['cool_point'] = request.args.get('cool_point') or None
params['prod'] = request.args.get('prod') or ''
params['recover'] = request.args.get('recover') or ''
params['format'] = request.args.get('format') or 'PNG'
params['return_cmap'] = request.args.get('return_cmap') or ''
params['hull'] = request.args.get('hull') or ''
# Condition parameters.
params['n_colours'] = int(params['n_colours'])
params['prod'] = False if params['prod'].lower() in ['false', 'no', '0'] else True
params['recover'] = False if params['recover'].lower() in ['false', 'no', '0'] else True
params['hull'] = False if params['hull'].lower() in ['false', 'no', '0'] else True
params['return_cmap'] = True if params['return_cmap'].lower() in ['true', 'yes', '1', 'on', 'ok'] else False
params['interval'] = [int(n) for n in params['interval'].split(',')]
if params['cool_point'] is not None:
try:
cool_point = [int(n) for n in params['cool_point'].split(',')]
except:
cool_point = None
params['cool_point'] = cool_point
return params
def serve_pil_image(img):
img_io = BytesIO()
img.save(img_io, 'PNG')
img_io.seek(0)
return send_file(img_io, mimetype='image/png')
def is_greyscale(img):
"""
Decide if an image is greyscale or not.
"""
stat = ImageStat.Stat(img)
if sum(stat.sum[:3])/3 == stat.sum[0]:
return True
return False
def get_imarray(img):
"""
Turns a PIL image into an array in [0, 1] with shape (h*w, 3).
Args:
img (Image): a PIL image.
Returns:
ndarray.
"""
rgbimg = img.convert('RGB')
return np.asarray(rgbimg)[..., :3] / 255.
def mask_colours(a, tree, colours, tolerance=1e-6, leave=0):
"""
Remove particular colours from the palette.
TODO
Only remove them if they aren't in the colourmap... i.e. if they are
out on their own.
"""
target = tree.query_radius(colours, tolerance)
mask = np.ones(a.shape[0], dtype=bool)
end = None if leave < 2 else 1 - leave
print(leave, end)
for t in target:
mask[t[leave:end]] = False
return a[mask]
def isolate_black(a, tree):
count = min([6, a.shape[0]//15])
distances, indices = tree.query([[0,0,0]], count)
for (dist, idx) in zip(distances, indices):
tol = np.diff(dist[1:]).mean()
if dist[0] < tol / 3:
# Then there's effectively a point
# at the target colour.
if dist[1] > 3 * tol:
# Point is prolly not in cbar
# so we eliminate.
a = np.delete(a, idx[0], axis=0)
else:
# Colour is part of colourbar.
# If it's right at black, eliminate it.
if dist[0] < tol / 30:
a = np.delete(a, idx[0], axis=0)
else:
# There's no point that colour. Add one.
pass
return a
def isolate_white(a, tree):
count = min([6, a.shape[0]//15])
distances, indices = tree.query([[1, 1, 1]], count)
for (dist, idx) in zip(distances, indices):
tol = np.diff(dist[1:]).mean()
if dist[0] < tol / 3:
# Then there's effectively a point
# at the target colour.
if dist[1] > 3 * tol:
# Point is prolly not in cbar
# so we eliminate.
a = np.delete(a, idx[0], axis=0)
return a
def remove_duplicates(a, tree, tolerance=1e-6):
"""
Remove all duplicate points, within the given tolerance.
"""
for c in a:
a = mask_colours(a, tree, [c], leave=1)
return a
def remove_isolates(a, tree, min_neighbours):
"""
Remove all points with fewer than 2r neighbours in a radius of r,
where r is the median of all nearest neighbour distances.
"""
radius = (min_neighbours + 1) / 2
d, _ = tree.query(a, 2)
tol = np.median(d[:, 1]) * radius
i = tree.query_radius(a, tol)
indices_of_isolates = [j for j, k in enumerate(i) if k.size < 2*radius]
return np.delete(a, indices_of_isolates, axis=0)
def get_quanta(imarray, n_colours=256, sampling=None, min_neighbours=6):
"""
Reduces the colours in the image array down to some specified number,
default 256. Usually you'll want at least 100, at most 500. Returns
an unsorted colour table (codebook) for the colours.
Call via get_codebook.
Args:
imarray (ndarray): The array from ``get_imarray()``.
n_colours (int): The number of colours to reduce to.
sampling (str): 'random' for random pixels from the image.
'columns' for random columns (eg for seismic data).
'rows' for random rows.
min_neighbours (int): The minimum number of neighbours a point
should have.
Returns:
ndarray. An array of size (n_colours, 3).
"""
h, w, c = imarray.shape
# Define training set.
n = min(h*w//10, n_colours*100)
if sampling == 'rows':
nrow = n // imarray.shape[0] # How many traces do we need?
data = imarray[np.random.randint(0, imarray.shape[1], nrow)]
elif sampling == 'columns':
ntr = n // imarray.shape[1] # How many traces do we need?
data = imarray[np.random.randint(0, imarray.shape[0], ntr)]
else: # random
im_ = imarray.reshape((-1, c))
data = shuffle(im_, random_state=0)[:n]
sample = data.reshape((-1, c))
# Fit the data.
kmeans = KMeans(n_clusters=n_colours).fit(sample)
quanta = kmeans.cluster_centers_
# Regularization.
# For some reason this first bit seems to be necessary sometimes
quanta[quanta > 1] = 1
quanta[quanta < 0] = 0
# tree = BallTree(quanta)
# quanta = remove_duplicates(quanta, tree)
# tree = BallTree(quanta)
# quanta = remove_isolates(quanta, tree, min_neighbours)
return quanta
def get_distances(quanta, cool_point=None):
"""
Makes the complete distance matrix that the TSP solver needs. The
adjustments are (1) adding the cool-point to start at, and (2) adding
the zero-point to avoid creating a closed loop and make a path instead.
Call via get_codebook.
Args:
quanta (ndarray): The array from ``get_quanta()``.
cool_point (ndarray): The point to use as the starting point, e.g.
[[0, 0, 0.5]], [[0.25, 0, 0.5]], or [[0, 0, 0]], or even [[1, 1, 1]].
Returns:
ndarray. A matrix of size (n_colours+2, n_colours+2).
"""
# Add cool-point.
cool_point = cool_point or [[0.0, 0.0, 0.0]]
p = np.vstack([cool_point, quanta])
# Make distance matrix.
dists = squareform(pdist(p, 'euclidean'))
# The values in `dists` are floats in the range 0 to sqrt(3).
# Normalize the values to int16s.
d = 32767 * dists / np.sqrt(3)
d = d.astype(np.int16)
# To use a TSP algo to solve the shortest Hamiltonian path problem,
# we need to add a point that is zero units from every other point.
row, col = d.shape
d = np.insert(d, row, 0, axis=0)
d = np.insert(d, col, 0, axis=1)
return d
def sort_quanta(distances):
"""
Solves the travelling salesman problem, with a magic zero-point, to
find the shortest Hamiltonian path through the points. Returns the
indices of the points in their sorted order.
Call via get_codebook.
Args:
distances (ndarray): The distance matrix from ``get_distances()``.
Returns:
ndarray. A 1D array of size (n_colours).
"""
# Set up the file describing the problem.
outf = "/tmp/route.tsp"
with open(outf, 'w') as f:
f.write(dumps_matrix(distances, name="Route"))
# Run the solver.
tour = run(outf, start=0, solver="LKH")
result = np.array(tour['tour'])
# Slice off the initial value and the last value to account for the added
# colours. Then subtract one to shift indices back to proper range.
return result[1:-1] - 1
def get_codebook(imarray, n_colours=128, sampling=None, cool_point=None):
"""
Finds and then sorts the colour table (aka codebook or palette). Wraps
get_quanta, get_distances, and sort_quanta.
Args:
imarray (ndarray): The image array from ``get_imarray()``.
n_colours (int): The number of colours to reduce to.
sampling (str): The way to sample the image (default 'random')
cool_point (ndarray): The point to use as the starting point.
Returns:
ndarray. A matrix of size (n_colours+2, n_colours+2).
"""
q = get_quanta(imarray, n_colours, sampling)
d = get_distances(q, cool_point)
r = sort_quanta(d)
print(len(r), r)
# Compute the dataspace.
dataspace = np.concatenate([[0], np.cumsum([d[p, q] for p, q in zip(r, r[1:])])])
return q[r], dataspace
def recover_data(imarray, colours, dataspace=None):
"""
Given a sorted colour table, convert an image array into a data array in
the closed interval [0, 1].
Args:
imarray (ndarray): The array of pixel data, as RGB triples.
colours (ndarray): The array of sorted RGB triples.
Returns:
ndarray. The recovered data, the same shape as the input imarray.
"""
if dataspace is None:
dataspace = np.arange(0, len(colours), dtype=np.float)
kdtree = cKDTree(colours)
dx, ix = kdtree.query(imarray)
data = dataspace[ix]
print(dataspace)
# Scale.
out = data.astype(np.float)
out /= np.amax(out)
# Remove anything that maps too far.
out[dx > np.sqrt(3)/8] = np.nan
return out
def scale_data(data, interval):
"""
Scale data to a new interval.
Args:
data (ndarray): The data to scale, in the closed interval [0,1].
interval (tuple): A tuple of numbers to scale to.
Returns:
ndarray. The same shape as the input data.
"""
mi, ma = interval
return data * (ma-mi) + mi
def image_to_data(img, n_colours=128, sampling=None, cool_point=None, interval=None):
"""
Does everything.
Args:
img (Image): The image to convert.
n_colours (int): The number of colours to reduce to.
Returns:
ndarray. The recovered data. [0-1]
"""
interval = interval or [0, 1]
imarray = get_imarray(img)
colours, dataspace = get_codebook(imarray,
n_colours=n_colours,
sampling=sampling,
cool_point=cool_point
)
recovered = recover_data(imarray, colours, dataspace)
return scale_data(recovered, interval), colours
def get_url(databytes, ext, uuid1):
"""
Upload to AWS S3 storage and collect URL.
"""
file_link = ''
now = datetime.datetime.now()
expires = now + datetime.timedelta(minutes=240)
success = False
try:
from secrets import KEY, SECRET
session = boto3.session.Session(aws_access_key_id=KEY,
aws_secret_access_key=SECRET,
region_name='us-east-1'
)
client = session.client('s3')
key = uuid1 + '.' + ext
bucket = 'keats'
acl = 'public-read' # For public file.
params = {'Body': databytes,
'Expires': expires,
'Bucket': bucket,
'Key': key,
'ACL': acl,
}
r = client.put_object(**params)
success = r['ResponseMetadata']['HTTPStatusCode'] == 200
except:
print('Upload to S3 failed')
if success:
# Only do this if successfully uploaded, because
# you always get a link, even if no file.
if acl == 'public-read':
file_link = 'https://s3.amazonaws.com/{}/{}'.format(bucket, key)
else:
try:
params = {'Bucket': bucket,
'Key': key}
file_link = client.generate_presigned_url('get_object',
Params=params,
ExpiresIn=3600)
except:
print('Retrieval of S3 link failed')
return file_link
def write_segy(f, data):
"""
Write a 2D NumPY array to an open file handle f.
"""
stream = Stream()
# Data is in [0, 1] so rescale to 8-bit.
# USING 16-bit because can't save as 8-bit int in ObsPy.
data = np.int16((data-0.5)*255)
for i, trace in enumerate(data):
# Make the trace.
tr = Trace(trace)
# Add required data.
tr.stats.delta = 0.004
# Add yet more to the header (optional).
tr.stats.segy = {'trace_header': SEGYTraceHeader()}
tr.stats.segy.trace_header.trace_sequence_number_within_line = i + 1
tr.stats.segy.trace_header.receiver_group_elevation = 0
# Append the trace to the stream.
stream.append(tr)
# Text header.
stream.stats = AttribDict()
stream.stats.textual_file_header = '{:80s}'.format('Generated by Keats.').encode()
stream.stats.textual_file_header += '{:80s}'.format('Sample interval unknown.').encode()
stream.stats.textual_file_header += '{:80s}'.format('IEEE floats.').encode()
# Binary header.
stream.stats.binary_file_header = SEGYBinaryFileHeader()
stream.stats.binary_file_header.trace_sorting_code = 4
stream.stats.binary_file_header.seg_y_format_revision_number = 0x0100
# Write the data.
# Encoding should be 8, but that doesn't work.
stream.write(f, format='SEGY', data_encoding=3, byteorder=sys.byteorder)
return f
| {
"content_hash": "8e81df09bb7db1164390b1b0afbdd9ea",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 112,
"avg_line_length": 31.448936170212765,
"alnum_prop": 0.5892023543738584,
"repo_name": "kwinkunks/rainbow",
"id": "5e021955cc500d08c22c2011d11e0ba3f2873251",
"size": "14805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2992"
},
{
"name": "Jupyter Notebook",
"bytes": "21384018"
},
{
"name": "Python",
"bytes": "36678"
}
],
"symlink_target": ""
} |
"""Unit tests for OpenStack Cinder volume drivers."""
import mock
from hplefthandclient import exceptions as hpexceptions
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder import units
from cinder.volume.drivers.san.hp import hp_lefthand_iscsi
from cinder.volume.drivers.san.hp import hp_lefthand_rest_proxy
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class HPLeftHandBaseDriver():
cluster_id = 1
volume_name = "fakevolume"
volume_id = 1
volume = {
'name': volume_name,
'provider_location': ('10.0.1.6 iqn.2003-10.com.lefthandnetworks:'
'group01:25366:fakev 0'),
'id': volume_id,
'provider_auth': None,
'size': 1}
serverName = 'fakehost'
server_id = 0
snapshot_name = "fakeshapshot"
snapshot_id = 3
snapshot = {
'name': snapshot_name,
'volume_name': volume_name}
cloned_volume_name = "clone_volume"
cloned_volume = {'name': cloned_volume_name}
cloned_snapshot_name = "clonedshapshot"
cloned_snapshot_id = 5
cloned_snapshot = {
'name': cloned_snapshot_name,
'volume_name': volume_name}
volume_type_id = 4
init_iqn = 'iqn.1993-08.org.debian:01:222'
connector = {
'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'host': serverName}
driver_startup_call_stack = [
mock.call.login('foo1', 'bar2'),
mock.call.getClusterByName('CloudCluster1'),
mock.call.getCluster(1)]
class TestHPLeftHandCLIQISCSIDriver(HPLeftHandBaseDriver, test.TestCase):
def _fake_cliq_run(self, verb, cliq_args, check_exit_code=True):
"""Return fake results for the various methods."""
def create_volume(cliq_args):
"""Create volume CLIQ input for test.
input = "createVolume description="fake description"
clusterName=Cluster01 volumeName=fakevolume
thinProvision=0 output=XML size=1GB"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['thinProvision'], '1')
self.assertEqual(cliq_args['size'], '1GB')
return output, None
def delete_volume(cliq_args):
"""Delete volume CLIQ input for test.
input = "deleteVolume volumeName=fakevolume prompt=false
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="164" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['prompt'], 'false')
return output, None
def extend_volume(cliq_args):
"""Extend volume CLIQ input for test.
input = "modifyVolume description="fake description"
volumeName=fakevolume
output=XML size=2GB"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['size'], '2GB')
return output, None
def assign_volume(cliq_args):
"""Assign volume CLIQ input for test.
input = "assignVolumeToServer volumeName=fakevolume
serverName=fakehost
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="174" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['serverName'],
self.connector['host'])
return output, None
def unassign_volume(cliq_args):
"""Unassign volume CLIQ input for test.
input = "unassignVolumeToServer volumeName=fakevolume
serverName=fakehost output=XML
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="205" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['volumeName'], self.volume_name)
self.assertEqual(cliq_args['serverName'],
self.connector['host'])
return output, None
def create_snapshot(cliq_args):
"""Create snapshot CLIQ input for test.
input = "createSnapshot description="fake description"
snapshotName=fakesnapshot
volumeName=fakevolume
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['snapshotName'], self.snapshot_name)
self.assertEqual(cliq_args['volumeName'], self.volume_name)
return output, None
def delete_snapshot(cliq_args):
"""Delete shapshot CLIQ input for test.
input = "deleteSnapshot snapshotName=fakesnapshot prompt=false
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="164" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['snapshotName'], self.snapshot_name)
self.assertEqual(cliq_args['prompt'], 'false')
return output, None
def create_volume_from_snapshot(cliq_args):
"""Create volume from snapshot CLIQ input for test.
input = "cloneSnapshot description="fake description"
snapshotName=fakesnapshot
volumeName=fakevolume
output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded."
name="CliqSuccess" processingTime="181" result="0"/>
</gauche>"""
self.assertEqual(cliq_args['snapshotName'], self.snapshot_name)
self.assertEqual(cliq_args['volumeName'], self.volume_name)
return output, None
def get_cluster_info(cliq_args):
"""Get cluster info CLIQ input for test.
input = "getClusterInfo clusterName=Cluster01 searchDepth=1
verbose=0 output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="1164" result="0">
<cluster blockSize="1024" description=""
maxVolumeSizeReplication1="622957690"
maxVolumeSizeReplication2="311480287"
minVolumeSize="262144" name="Cluster01"
pageSize="262144" spaceTotal="633697992"
storageNodeCount="2" unprovisionedSpace="622960574"
useVip="true">
<nsm ipAddress="10.0.1.7" name="111-vsa"/>
<nsm ipAddress="10.0.1.8" name="112-vsa"/>
<vip ipAddress="10.0.1.6" subnetMask="255.255.255.0"/>
</cluster></response></gauche>"""
return output, None
def get_volume_info(cliq_args):
"""Get volume info CLIQ input for test.
input = "getVolumeInfo volumeName=fakevolume output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="87" result="0">
<volume autogrowPages="4" availability="online"
blockSize="1024" bytesWritten="0" checkSum="false"
clusterName="Cluster01" created="2011-02-08T19:56:53Z"
deleting="false" description="" groupName="Group01"
initialQuota="536870912" isPrimary="true"
iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:fakev"
maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
minReplication="1" name="vol-b" parity="0" replication="2"
reserveQuota="536870912" scratchQuota="4194304"
serialNumber="9fa5c8b2cca54b2948a63d8"
size="1073741824" stridePages="32" thinProvision="true">
<status description="OK" value="2"/>
<permission access="rw" authGroup="api-1"
chapName="chapusername" chapRequired="true"
id="25369" initiatorSecret="" iqn=""
iscsiEnabled="true" loadBalance="true"
targetSecret="supersecret"/>
</volume></response></gauche>"""
return output, None
def get_snapshot_info(cliq_args):
"""Get snapshot info CLIQ input for test.
input = "getSnapshotInfo snapshotName=fakesnapshot output=XML"
"""
output = """<gauche version="1.0">
<response description="Operation succeeded." name="CliqSuccess"
processingTime="87" result="0">
<snapshot applicationManaged="false" autogrowPages="32768"
automatic="false" availability="online" bytesWritten="0"
clusterName="CloudCluster1" created="2013-08-26T07:03:44Z"
deleting="false" description="" groupName="CloudGroup1"
id="730" initialQuota="536870912" isPrimary="true"
iscsiIqn="iqn.2003-10.com.lefthandnetworks:cloudgroup1:73"
md5="a64b4f850539c07fb5ce3cee5db1fcce" minReplication="1"
name="snapshot-7849288e-e5e8-42cb-9687-9af5355d674b"
replication="2" reserveQuota="536870912" scheduleId="0"
scratchQuota="4194304" scratchWritten="0"
serialNumber="a64b4f850539c07fb5ce3cee5db1fcce"
size="2147483648" stridePages="32"
volumeSerial="a64b4f850539c07fb5ce3cee5db1fcce">
<status description="OK" value="2"/>
<permission access="rw"
authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
chapName="chapusername" chapRequired="true" id="25369"
initiatorSecret="" iqn="" iscsiEnabled="true"
loadBalance="true" targetSecret="supersecret"/>
</snapshot></response></gauche>"""
return output, None
def get_server_info(cliq_args):
"""Get server info CLIQ input for test.
input = "getServerInfo serverName=fakeName"
"""
output = """<gauche version="1.0"><response result="0"/>
</gauche>"""
return output, None
def create_server(cliq_args):
"""Create server CLIQ input for test.
input = "createServer serverName=fakeName initiator=something"
"""
output = """<gauche version="1.0"><response result="0"/>
</gauche>"""
return output, None
def test_error(cliq_args):
output = """<gauche version="1.0">
<response description="Volume '134234' not found."
name="CliqVolumeNotFound" processingTime="1083"
result="8000100c"/>
</gauche>"""
return output, None
def test_paramiko_1_13_0(cliq_args):
# paramiko 1.13.0 now returns unicode
output = unicode(
'<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n'
'<gauche version="1.0">\n\n <response description="Operation'
' succeeded." name="CliqSuccess" processingTime="423" '
'result="0">\n <cluster adaptiveOptimization="false" '
'blockSize="1024" description="" maxVolumeSizeReplication1='
'"114594676736" minVolumeSize="262144" name="clusterdemo" '
'pageSize="262144" spaceTotal="118889644032" storageNodeCount='
'"1" unprovisionedSpace="114594676736" useVip="true">\n'
' <nsm ipAddress="10.10.29.102" name="lefdemo1"/>\n'
' <vip ipAddress="10.10.22.87" subnetMask='
'"255.255.224.0"/>\n </cluster>\n </response>\n\n'
'</gauche>\n ')
return output, None
def test_paramiko_1_10_0(cliq_args):
# paramiko 1.10.0 returns python default encoding.
output = (
'<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n'
'<gauche version="1.0">\n\n <response description="Operation'
' succeeded." name="CliqSuccess" processingTime="423" '
'result="0">\n <cluster adaptiveOptimization="false" '
'blockSize="1024" description="" maxVolumeSizeReplication1='
'"114594676736" minVolumeSize="262144" name="clusterdemo" '
'pageSize="262144" spaceTotal="118889644032" storageNodeCount='
'"1" unprovisionedSpace="114594676736" useVip="true">\n'
' <nsm ipAddress="10.10.29.102" name="lefdemo1"/>\n'
' <vip ipAddress="10.10.22.87" subnetMask='
'"255.255.224.0"/>\n </cluster>\n </response>\n\n'
'</gauche>\n ')
return output, None
self.assertEqual(cliq_args['output'], 'XML')
try:
verbs = {'createVolume': create_volume,
'deleteVolume': delete_volume,
'modifyVolume': extend_volume,
'assignVolumeToServer': assign_volume,
'unassignVolumeToServer': unassign_volume,
'createSnapshot': create_snapshot,
'deleteSnapshot': delete_snapshot,
'cloneSnapshot': create_volume_from_snapshot,
'getClusterInfo': get_cluster_info,
'getVolumeInfo': get_volume_info,
'getSnapshotInfo': get_snapshot_info,
'getServerInfo': get_server_info,
'createServer': create_server,
'testError': test_error,
'testParamiko_1.10.1': test_paramiko_1_10_0,
'testParamiko_1.13.1': test_paramiko_1_13_0}
except KeyError:
raise NotImplementedError()
return verbs[verb](cliq_args)
def setUp(self):
super(TestHPLeftHandCLIQISCSIDriver, self).setUp()
self.properties = {
'target_discoverd': True,
'target_portal': '10.0.1.6:3260',
'target_iqn':
'iqn.2003-10.com.lefthandnetworks:group01:25366:fakev',
'volume_id': self.volume_id}
def tearDown(self):
super(TestHPLeftHandCLIQISCSIDriver, self).tearDown()
def default_mock_conf(self):
mock_conf = mock.Mock()
mock_conf.san_ip = '10.10.10.10'
mock_conf.san_login = 'foo'
mock_conf.san_password = 'bar'
mock_conf.san_ssh_port = 16022
mock_conf.san_clustername = 'CloudCluster1'
mock_conf.hplefthand_api_url = None
return mock_conf
def setup_driver(self, config=None):
if config is None:
config = self.default_mock_conf()
self.driver = hp_lefthand_iscsi.HPLeftHandISCSIDriver(
configuration=config)
self.driver.do_setup(None)
self.driver.proxy._cliq_run = mock.Mock(
side_effect=self._fake_cliq_run)
return self.driver.proxy._cliq_run
def test_create_volume(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name, 'size': 1}
model_update = self.driver.create_volume(volume)
expected_iqn = "iqn.2003-10.com.lefthandnetworks:group01:25366:fakev 0"
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(model_update['provider_location'], expected_location)
expected = [
mock.call(
'createVolume', {
'clusterName': 'CloudCluster1',
'volumeName': 'fakevolume',
'thinProvision': '1',
'output': 'XML',
'size': '1GB'},
True),
mock.call(
'getVolumeInfo', {
'volumeName': 'fakevolume',
'output': 'XML'},
True),
mock.call(
'getClusterInfo', {
'clusterName': 'Cluster01',
'searchDepth': '1',
'verbose': '0',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_delete_volume(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name}
self.driver.delete_volume(volume)
expected = [
mock.call(
'getVolumeInfo', {
'volumeName': 'fakevolume',
'output': 'XML'},
True),
mock.call(
'deleteVolume', {
'volumeName': 'fakevolume',
'prompt': 'false',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_extend_volume(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name}
self.driver.extend_volume(volume, 2)
expected = [
mock.call(
'modifyVolume', {
'volumeName': 'fakevolume',
'output': 'XML',
'size': '2GB'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_initialize_connection(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
self.driver.proxy._get_iscsi_properties = mock.Mock(
return_value=self.properties)
volume = {'name': self.volume_name}
result = self.driver.initialize_connection(volume,
self.connector)
self.assertEqual(result['driver_volume_type'], 'iscsi')
self.assertDictMatch(result['data'], self.properties)
expected = [
mock.call(
'getServerInfo', {
'output': 'XML',
'serverName': 'fakehost'},
False),
mock.call(
'assignVolumeToServer', {
'volumeName': 'fakevolume',
'serverName': 'fakehost',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_terminate_connection(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name}
self.driver.terminate_connection(volume, self.connector)
expected = [
mock.call(
'unassignVolumeToServer', {
'volumeName': 'fakevolume',
'serverName': 'fakehost',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_create_snapshot(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
snapshot = {'name': self.snapshot_name,
'volume_name': self.volume_name}
self.driver.create_snapshot(snapshot)
expected = [
mock.call(
'createSnapshot', {
'snapshotName': 'fakeshapshot',
'output': 'XML',
'inheritAccess': 1,
'volumeName': 'fakevolume'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_delete_snapshot(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
snapshot = {'name': self.snapshot_name}
self.driver.delete_snapshot(snapshot)
expected = [
mock.call(
'getSnapshotInfo', {
'snapshotName': 'fakeshapshot',
'output': 'XML'},
True),
mock.call(
'deleteSnapshot', {
'snapshotName': 'fakeshapshot',
'prompt': 'false',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_create_volume_from_snapshot(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume = {'name': self.volume_name}
snapshot = {'name': self.snapshot_name}
model_update = self.driver.create_volume_from_snapshot(volume,
snapshot)
expected_iqn = "iqn.2003-10.com.lefthandnetworks:group01:25366:fakev 0"
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(model_update['provider_location'], expected_location)
expected = [
mock.call(
'cloneSnapshot', {
'snapshotName': 'fakeshapshot',
'output': 'XML',
'volumeName': 'fakevolume'},
True),
mock.call(
'getVolumeInfo', {
'volumeName': 'fakevolume',
'output': 'XML'},
True),
mock.call(
'getClusterInfo', {
'clusterName': 'Cluster01',
'searchDepth': '1',
'verbose': '0',
'output': 'XML'},
True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_get_volume_stats(self):
# set up driver with default config
mock_cliq_run = self.setup_driver()
volume_stats = self.driver.get_volume_stats(True)
self.assertEqual(volume_stats['vendor_name'], 'Hewlett-Packard')
self.assertEqual(volume_stats['storage_protocol'], 'iSCSI')
expected = [
mock.call('getClusterInfo', {
'searchDepth': 1,
'clusterName': 'CloudCluster1',
'output': 'XML'}, True)]
# validate call chain
mock_cliq_run.assert_has_calls(expected)
def test_cliq_run_xml_paramiko_1_13_0(self):
# set up driver with default config
self.setup_driver()
xml = self.driver.proxy._cliq_run_xml('testParamiko_1.13.1', {})
self.assertIsNotNone(xml)
def test_cliq_run_xml_paramiko_1_10_0(self):
# set up driver with default config
self.setup_driver()
xml = self.driver.proxy._cliq_run_xml('testParamiko_1.10.1', {})
self.assertIsNotNone(xml)
class TestHPLeftHandRESTISCSIDriver(HPLeftHandBaseDriver, test.TestCase):
driver_startup_call_stack = [
mock.call.login('foo1', 'bar2'),
mock.call.getClusterByName('CloudCluster1'),
mock.call.getCluster(1)]
def setUp(self):
super(TestHPLeftHandRESTISCSIDriver, self).setUp()
def tearDown(self):
super(TestHPLeftHandRESTISCSIDriver, self).tearDown()
def default_mock_conf(self):
mock_conf = mock.Mock()
mock_conf.hplefthand_api_url = 'http://fake.foo:8080/lhos'
mock_conf.hplefthand_username = 'foo1'
mock_conf.hplefthand_password = 'bar2'
mock_conf.hplefthand_iscsi_chap_enabled = False
mock_conf.hplefthand_debug = False
mock_conf.hplefthand_clustername = "CloudCluster1"
return mock_conf
@mock.patch('hplefthandclient.client.HPLeftHandClient', spec=True)
def setup_driver(self, _mock_client, config=None):
if config is None:
config = self.default_mock_conf()
_mock_client.return_value.getClusterByName.return_value = {
'id': 1, 'virtualIPAddresses': [{'ipV4Address': '10.0.1.6'}]}
_mock_client.return_value.getCluster.return_value = {
'spaceTotal': units.GiB * 500,
'spaceAvailable': units.GiB * 250}
self.driver = hp_lefthand_iscsi.HPLeftHandISCSIDriver(
configuration=config)
self.driver.do_setup(None)
return _mock_client.return_value
def test_create_volume(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of createVolume
mock_client.createVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
# execute driver
volume_info = self.driver.create_volume(self.volume)
self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0',
volume_info['provider_location'])
expected = self.driver_startup_call_stack + [
mock.call.createVolume(
'fakevolume',
1,
units.GiB,
{'isThinProvisioned': True, 'clusterName': 'CloudCluster1'})]
mock_client.assert_has_calls(expected)
# mock HTTPServerError
mock_client.createVolume.side_effect = hpexceptions.HTTPServerError()
# ensure the raised exception is a cinder exception
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, self.volume)
@mock.patch.object(
volume_types,
'get_volume_type',
return_value={'extra_specs': {'hplh:provisioning': 'full'}})
def test_create_volume_with_es(self, _mock_volume_type):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = 1
# mock return value of createVolume
mock_client.createVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
# execute creat_volume
volume_info = self.driver.create_volume(volume_with_vt)
self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0',
volume_info['provider_location'])
expected = self.driver_startup_call_stack + [
mock.call.createVolume(
'fakevolume',
1,
units.GiB,
{'isThinProvisioned': False, 'clusterName': 'CloudCluster1'})]
mock_client.assert_has_calls(expected)
def test_delete_volume(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# execute delete_volume
self.driver.delete_volume(self.volume)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.deleteVolume(self.volume_id)]
mock_client.assert_has_calls(expected)
# mock HTTPNotFound (volume not found)
mock_client.getVolumeByName.side_effect = hpexceptions.HTTPNotFound()
# no exception should escape method
self.driver.delete_volume(self.volume)
# mock HTTPConflict
mock_client.deleteVolume.side_effect = hpexceptions.HTTPConflict()
# ensure the raised exception is a cinder exception
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume, self.volume_id)
def test_extend_volume(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# execute extend_volume
self.driver.extend_volume(self.volume, 2)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(1, {'size': 2 * units.GiB})]
# validate call chain
mock_client.assert_has_calls(expected)
# mock HTTPServerError (array failure)
mock_client.modifyVolume.side_effect = hpexceptions.HTTPServerError()
# ensure the raised exception is a cinder exception
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume, self.volume, 2)
def test_initialize_connection(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getServerByName.side_effect = hpexceptions.HTTPNotFound()
mock_client.createServer.return_value = {'id': self.server_id}
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# execute initialize_connection
result = self.driver.initialize_connection(
self.volume,
self.connector)
# validate
self.assertEqual(result['driver_volume_type'], 'iscsi')
self.assertEqual(result['data']['target_discovered'], False)
self.assertEqual(result['data']['volume_id'], self.volume_id)
self.assertTrue('auth_method' not in result['data'])
expected = self.driver_startup_call_stack + [
mock.call.getServerByName('fakehost'),
mock.call.createServer
(
'fakehost',
'iqn.1993-08.org.debian:01:222',
None
),
mock.call.getVolumeByName('fakevolume'),
mock.call.addServerAccess(1, 0)]
# validate call chain
mock_client.assert_has_calls(expected)
# mock HTTPServerError (array failure)
mock_client.createServer.side_effect = hpexceptions.HTTPServerError()
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.initialize_connection, self.volume, self.connector)
def test_initialize_connection_with_chaps(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
# mock return value of getVolumeByName
mock_client.getServerByName.side_effect = hpexceptions.HTTPNotFound()
mock_client.createServer.return_value = {
'id': self.server_id,
'chapAuthenticationRequired': True,
'chapTargetSecret': 'dont_tell'}
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# execute initialize_connection
result = self.driver.initialize_connection(
self.volume,
self.connector)
# validate
self.assertEqual(result['driver_volume_type'], 'iscsi')
self.assertEqual(result['data']['target_discovered'], False)
self.assertEqual(result['data']['volume_id'], self.volume_id)
self.assertEqual(result['data']['auth_method'], 'CHAP')
expected = self.driver_startup_call_stack + [
mock.call.getServerByName('fakehost'),
mock.call.createServer
(
'fakehost',
'iqn.1993-08.org.debian:01:222',
None
),
mock.call.getVolumeByName('fakevolume'),
mock.call.addServerAccess(1, 0)]
# validate call chain
mock_client.assert_has_calls(expected)
def test_terminate_connection(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
mock_client.getServerByName.return_value = {'id': self.server_id}
# execute terminate_connection
self.driver.terminate_connection(self.volume, self.connector)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.getServerByName('fakehost'),
mock.call.removeServerAccess(1, 0)]
# validate call chain
mock_client.assert_has_calls(expected)
mock_client.getVolumeByName.side_effect = hpexceptions.HTTPNotFound()
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.terminate_connection,
self.volume,
self.connector)
def test_create_snapshot(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# execute create_snapshot
self.driver.create_snapshot(self.snapshot)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.createSnapshot(
'fakeshapshot',
1,
{'inheritAccess': True})]
# validate call chain
mock_client.assert_has_calls(expected)
# mock HTTPServerError (array failure)
mock_client.getVolumeByName.side_effect = hpexceptions.HTTPNotFound()
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_snapshot, self.snapshot)
def test_delete_snapshot(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id}
# execute delete_snapshot
self.driver.delete_snapshot(self.snapshot)
expected = self.driver_startup_call_stack + [
mock.call.getSnapshotByName('fakeshapshot'),
mock.call.deleteSnapshot(3)]
# validate call chain
mock_client.assert_has_calls(expected)
mock_client.getSnapshotByName.side_effect = hpexceptions.HTTPNotFound()
# no exception is thrown, just error msg is logged
self.driver.delete_snapshot(self.snapshot)
# mock HTTPServerError (array failure)
ex = hpexceptions.HTTPServerError({'message': 'Some message.'})
mock_client.getSnapshotByName.side_effect = ex
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.delete_snapshot,
self.snapshot)
# mock HTTPServerError because the snap is in use
ex = hpexceptions.HTTPServerError({
'message':
'Hey, dude cannot be deleted because it is a clone point duh.'})
mock_client.getSnapshotByName.side_effect = ex
# ensure the raised exception is a cinder exception
self.assertRaises(
exception.SnapshotIsBusy,
self.driver.delete_snapshot,
self.snapshot)
def test_create_volume_from_snapshot(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getSnapshotByName.return_value = {'id': self.snapshot_id}
mock_client.cloneSnapshot.return_value = {
'iscsiIqn': self.connector['initiator']}
# execute create_volume_from_snapshot
model_update = self.driver.create_volume_from_snapshot(
self.volume, self.snapshot)
expected_iqn = 'iqn.1993-08.org.debian:01:222 0'
expected_location = "10.0.1.6:3260,1 %s" % expected_iqn
self.assertEqual(model_update['provider_location'], expected_location)
expected = self.driver_startup_call_stack + [
mock.call.getSnapshotByName('fakeshapshot'),
mock.call.cloneSnapshot('fakevolume', 3)]
# validate call chain
mock_client.assert_has_calls(expected)
def test_create_cloned_volume(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
# execute create_cloned_volume
self.driver.create_cloned_volume(
self.cloned_volume, self.volume)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.cloneVolume('clone_volume', 1)]
# validate call chain
mock_client.assert_has_calls(expected)
@mock.patch.object(volume_types, 'get_volume_type')
def test_extra_spec_mapping(self, _mock_get_volume_type):
# setup drive with default configuration
self.setup_driver()
# 2 extra specs we don't care about, and
# 1 that will get mapped
_mock_get_volume_type.return_value = {
'extra_specs': {
'foo:bar': 'fake',
'bar:foo': 1234,
'hplh:provisioning': 'full'}}
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = self.volume_type_id
# get the extra specs of interest from this volume's volume type
volume_extra_specs = self.driver.proxy._get_volume_extra_specs(
volume_with_vt)
extra_specs = self.driver.proxy._get_lh_extra_specs(
volume_extra_specs,
hp_lefthand_rest_proxy.extra_specs_key_map.keys())
# map the extra specs key/value pairs to key/value pairs
# used as optional configuration values by the LeftHand backend
optional = self.driver.proxy._map_extra_specs(extra_specs)
self.assertDictMatch({'isThinProvisioned': False}, optional)
@mock.patch.object(volume_types, 'get_volume_type')
def test_extra_spec_mapping_invalid_value(self, _mock_get_volume_type):
# setup drive with default configuration
self.setup_driver()
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = self.volume_type_id
_mock_get_volume_type.return_value = {
'extra_specs': {
# r-07 is an invalid value for hplh:ao
'hplh:data_pl': 'r-07',
'hplh:ao': 'true'}}
# get the extra specs of interest from this volume's volume type
volume_extra_specs = self.driver.proxy._get_volume_extra_specs(
volume_with_vt)
extra_specs = self.driver.proxy._get_lh_extra_specs(
volume_extra_specs,
hp_lefthand_rest_proxy.extra_specs_key_map.keys())
# map the extra specs key/value pairs to key/value pairs
# used as optional configuration values by the LeftHand backend
optional = self.driver.proxy._map_extra_specs(extra_specs)
# {'hplh:ao': 'true'} should map to
# {'isAdaptiveOptimizationEnabled': True}
# without hplh:data_pl since r-07 is an invalid value
self.assertDictMatch({'isAdaptiveOptimizationEnabled': True}, optional)
def test_retype_with_no_LH_extra_specs(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
ctxt = context.get_admin_context()
host = {'host': self.serverName}
key_specs_old = {'foo': False, 'bar': 2, 'error': True}
key_specs_new = {'foo': True, 'bar': 5, 'error': False}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = dict.copy(self.volume)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.retype(ctxt, volume, new_type, diff, host)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume')]
# validate call chain
mock_client.assert_has_calls(expected)
def test_retype_with_only_LH_extra_specs(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
ctxt = context.get_admin_context()
host = {'host': self.serverName}
key_specs_old = {'hplh:provisioning': 'thin'}
key_specs_new = {'hplh:provisioning': 'full', 'hplh:ao': 'true'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = dict.copy(self.volume)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.retype(ctxt, volume, new_type, diff, host)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(
1, {
'isThinProvisioned': False,
'isAdaptiveOptimizationEnabled': True})]
# validate call chain
mock_client.assert_has_calls(expected)
def test_retype_with_both_extra_specs(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
ctxt = context.get_admin_context()
host = {'host': self.serverName}
key_specs_old = {'hplh:provisioning': 'full', 'foo': 'bar'}
key_specs_new = {'hplh:provisioning': 'thin', 'foo': 'foobar'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = dict.copy(self.volume)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.retype(ctxt, volume, new_type, diff, host)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(1, {'isThinProvisioned': True})]
# validate call chain
mock_client.assert_has_calls(expected)
def test_retype_same_extra_specs(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
ctxt = context.get_admin_context()
host = {'host': self.serverName}
key_specs_old = {'hplh:provisioning': 'full', 'hplh:ao': 'true'}
key_specs_new = {'hplh:provisioning': 'full', 'hplh:ao': 'false'}
old_type_ref = volume_types.create(ctxt, 'old', key_specs_old)
new_type_ref = volume_types.create(ctxt, 'new', key_specs_new)
diff, equal = volume_types.volume_types_diff(ctxt, old_type_ref['id'],
new_type_ref['id'])
volume = dict.copy(self.volume)
old_type = volume_types.get_volume_type(ctxt, old_type_ref['id'])
volume['volume_type'] = old_type
volume['host'] = host
new_type = volume_types.get_volume_type(ctxt, new_type_ref['id'])
self.driver.retype(ctxt, volume, new_type, diff, host)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
mock.call.modifyVolume(
1,
{'isAdaptiveOptimizationEnabled': False})]
# validate call chain
mock_client.assert_has_calls(expected)
def test_migrate_no_location(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
host = {'host': self.serverName, 'capabilities': {}}
(migrated, update) = self.driver.migrate_volume(
None,
self.volume,
host)
self.assertFalse(migrated)
# only startup code is called
mock_client.assert_has_calls(self.driver_startup_call_stack)
# and nothing else
self.assertEqual(
len(self.driver_startup_call_stack),
len(mock_client.method_calls))
def test_migrate_incorrect_vip(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getClusterByName.return_value = {
"virtualIPAddresses": [{
"ipV4Address": "10.10.10.10",
"ipV4NetMask": "255.255.240.0"}]}
mock_client.getVolumeByName.return_value = {'id': self.volume_id}
location = (self.driver.proxy.DRIVER_LOCATION % {
'cluster': 'New_CloudCluster',
'vip': '10.10.10.111'})
host = {
'host': self.serverName,
'capabilities': {'location_info': location}}
(migrated, update) = self.driver.migrate_volume(
None,
self.volume,
host)
self.assertFalse(migrated)
expected = self.driver_startup_call_stack + [
mock.call.getClusterByName('New_CloudCluster')]
mock_client.assert_has_calls(expected)
# and nothing else
self.assertEqual(
len(expected),
len(mock_client.method_calls))
def test_migrate_with_location(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getClusterByName.return_value = {
"virtualIPAddresses": [{
"ipV4Address": "10.10.10.111",
"ipV4NetMask": "255.255.240.0"}]}
mock_client.getVolumeByName.return_value = {'id': self.volume_id,
'iscsiSessions': None}
mock_client.getVolume.return_value = {'snapshots': {
'resource': None}}
location = (self.driver.proxy.DRIVER_LOCATION % {
'cluster': 'New_CloudCluster',
'vip': '10.10.10.111'})
host = {
'host': self.serverName,
'capabilities': {'location_info': location}}
(migrated, update) = self.driver.migrate_volume(
None,
self.volume,
host)
self.assertTrue(migrated)
expected = self.driver_startup_call_stack + [
mock.call.getClusterByName('New_CloudCluster'),
mock.call.getVolumeByName('fakevolume'),
mock.call.getVolume(
1,
'fields=snapshots,snapshots[resource[members[name]]]'),
mock.call.modifyVolume(1, {'clusterName': 'New_CloudCluster'})]
mock_client.assert_has_calls(expected)
# and nothing else
self.assertEqual(
len(expected),
len(mock_client.method_calls))
def test_migrate_with_Snapshots(self):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
mock_client.getClusterByName.return_value = {
"virtualIPAddresses": [{
"ipV4Address": "10.10.10.111",
"ipV4NetMask": "255.255.240.0"}]}
mock_client.getVolumeByName.return_value = {
'id': self.volume_id,
'iscsiSessions': None}
mock_client.getVolume.return_value = {'snapshots': {
'resource': 'snapfoo'}}
location = (self.driver.proxy.DRIVER_LOCATION % {
'cluster': 'New_CloudCluster',
'vip': '10.10.10.111'})
host = {
'host': self.serverName,
'capabilities': {'location_info': location}}
(migrated, update) = self.driver.migrate_volume(
None,
self.volume,
host)
self.assertFalse(migrated)
expected = self.driver_startup_call_stack + [
mock.call.getClusterByName('New_CloudCluster'),
mock.call.getVolumeByName('fakevolume'),
mock.call.getVolume(
1,
'fields=snapshots,snapshots[resource[members[name]]]')]
mock_client.assert_has_calls(expected)
# and nothing else
self.assertEqual(
len(expected),
len(mock_client.method_calls))
@mock.patch.object(volume_types, 'get_volume_type',
return_value={'extra_specs': {'hplh:ao': 'true'}})
def test_create_volume_with_ao_true(self, _mock_volume_type):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = 1
# mock return value of createVolume
mock_client.createVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
volume_info = self.driver.create_volume(volume_with_vt)
self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0',
volume_info['provider_location'])
# make sure createVolume is called without
# isAdaptiveOptimizationEnabled == true
expected = self.driver_startup_call_stack + [
mock.call.createVolume(
'fakevolume',
1,
units.GiB,
{'isThinProvisioned': True, 'clusterName': 'CloudCluster1'})]
mock_client.assert_has_calls(expected)
@mock.patch.object(volume_types, 'get_volume_type',
return_value={'extra_specs': {'hplh:ao': 'false'}})
def test_create_volume_with_ao_false(self, _mock_volume_type):
# setup drive with default configuration
# and return the mock HTTP LeftHand client
mock_client = self.setup_driver()
volume_with_vt = self.volume
volume_with_vt['volume_type_id'] = 1
# mock return value of createVolume
mock_client.createVolume.return_value = {
'iscsiIqn': self.connector['initiator']}
volume_info = self.driver.create_volume(volume_with_vt)
self.assertEqual('10.0.1.6:3260,1 iqn.1993-08.org.debian:01:222 0',
volume_info['provider_location'])
# make sure createVolume is called with
# isAdaptiveOptimizationEnabled == false
expected = self.driver_startup_call_stack + [
mock.call.createVolume(
'fakevolume',
1,
units.GiB,
{'isThinProvisioned': True,
'clusterName': 'CloudCluster1',
'isAdaptiveOptimizationEnabled': False})]
mock_client.assert_has_calls(expected)
| {
"content_hash": "7d337aad355f27e014e41126734f2335",
"timestamp": "",
"source": "github",
"line_count": 1403,
"max_line_length": 79,
"avg_line_length": 38.54383464005702,
"alnum_prop": 0.5680418662277863,
"repo_name": "NeCTAR-RC/cinder",
"id": "d9ad1a8e096b873e8f64207d062cf68b2bccafef",
"size": "54745",
"binary": false,
"copies": "1",
"ref": "refs/heads/nectar/icehouse",
"path": "cinder/tests/test_hplefthand.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "9824"
},
{
"name": "Python",
"bytes": "6176241"
},
{
"name": "Shell",
"bytes": "15237"
}
],
"symlink_target": ""
} |
import logging
import os
import sqlite3
from adb_super import app_data
class DatabaseManager:
def __init__(
self,
app_data_dir_path=app_data.AppDataDirManager().app_data_dir_path
):
self.file_name = "database.db"
self.schema_version = "0.1"
self.app_data_dir_path = app_data_dir_path
self.db_file_path = self.app_data_dir_path + self.file_name
self.connect()
self.disconnect()
def connect(self):
self.conn = sqlite3.connect(self.db_file_path)
logging.debug(
"{db_name} database connection opened."
.format(db_name=self.db_file_path))
self.c = self.conn.cursor()
logging.debug("Cursor for connection created.")
def disconnect(self):
self.conn.close()
logging.debug("Database connection closed.")
def create_db(self):
self.connect()
with self.conn:
self.c.execute(
"CREATE TABLE versioning(version TEXT NOT NULL)")
self.c.execute(
"INSERT INTO versioning VALUES (:version)",
{'version': self.schema_version})
self.c.execute(
"CREATE TABLE devices(" +
"id INT PRIMARY KEY NOT NULL, " +
"friendly_name TEXT NOT NULL)"
)
self.conn.commit()
logging.info(
"Version {db_version} database created."
.format(db_version=self.schema_version))
self.disconnect()
def delete_database_file(self):
os.remove(os.path.abspath(self.db_file_path))
logging.debug("Database deleted.")
def is_database_file_existing(self):
return os.path.isfile(os.path.abspath(self.db_file_path))
def is_versioning_table_existing(self):
self.connect()
self.c.execute(
"SELECT name " +
"FROM sqlite_master " +
"WHERE type='table' AND name='versioning'"
)
if self.c.fetchone() is None:
logging.debug("Database did not exist.")
self.disconnect()
return False
else:
logging.debug("Database did exist.")
self.disconnect()
return True
def get_db_version(self):
self.connect()
self.c.execute(
"SELECT version " +
"FROM versioning"
)
version = self.c.fetchone()[0]
self.disconnect()
return version
def connect_safely(self):
# Checks if Database already existed, if not creates database.
if not self.is_versioning_table_existing():
self.create_db()
else:
# Checks if existing database schema is up to date.
# If not database will be dropped and clean one will be
# re-created.
self.connect()
self.c.execute("SELECT * FROM versioning")
current_db_version = self.c.fetchone()[0]
self.disconnect()
logging.debug(
"Current database version: {db_version}."
.format(db_version=current_db_version))
if current_db_version != self.schema_version:
self.delete_database_file()
logging.info("Old database ({current_db_version}) dropped."
.format(current_db_version=current_db_version))
self.create_db()
else:
logging.debug("Valid database already exists.")
| {
"content_hash": "a903cd99298fa6f1ae463e50b2de0572",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 76,
"avg_line_length": 33.405660377358494,
"alnum_prop": 0.5506918949449308,
"repo_name": "Kregap/ADB-Super",
"id": "1c6bf916dbacd1f3863f746964fe7ef744b0f2c8",
"size": "3541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adb_super/database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1234"
},
{
"name": "HTML",
"bytes": "5051"
},
{
"name": "Python",
"bytes": "11765"
},
{
"name": "Ruby",
"bytes": "1868"
},
{
"name": "Shell",
"bytes": "351"
}
],
"symlink_target": ""
} |
"""Make the custom certificate and private key files used by TLS tests.
Code heavily borrowed from Lib/tests/make_ssl_certs.py in CPython.
"""
from __future__ import annotations
import os
import shutil
import subprocess
import tempfile
req_template = """
[req]
distinguished_name = req_distinguished_name
x509_extensions = req_x509_extensions
prompt = no
[req_distinguished_name]
C = XY
L = Dask-distributed
O = Dask
CN = {hostname}
[req_x509_extensions]
subjectAltName = @san
[san]
DNS.1 = {hostname}
[ca]
default_ca = CA_default
[CA_default]
dir = cadir
database = $dir/index.txt
crlnumber = $dir/crl.txt
default_md = sha256
default_days = 360000
default_crl_days = 360000
certificate = tls-ca-cert.pem
private_key = tls-ca-key.pem
serial = $dir/serial
RANDFILE = $dir/.rand
policy = policy_match
[policy_match]
countryName = match
stateOrProvinceName = optional
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[policy_anything]
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[v3_ca]
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer
basicConstraints = CA:true
"""
here = os.path.abspath(os.path.dirname(__file__))
def make_cert_key(hostname, sign=False):
print("creating cert for " + hostname)
tempnames = []
for _ in range(3):
with tempfile.NamedTemporaryFile(delete=False) as f:
tempnames.append(f.name)
req_file, cert_file, key_file = tempnames
try:
req = req_template.format(hostname=hostname)
with open(req_file, "w") as f:
f.write(req)
args = [
"req",
"-new",
"-days",
"365242",
"-nodes",
"-newkey",
"rsa:2048",
"-keyout",
key_file,
"-config",
req_file,
]
if sign:
with tempfile.NamedTemporaryFile(delete=False) as f:
tempnames.append(f.name)
reqfile = f.name
args += ["-out", reqfile]
else:
args += ["-x509", "-out", cert_file]
subprocess.check_call(["openssl"] + args)
if sign:
args = [
"ca",
"-config",
req_file,
"-out",
cert_file,
"-outdir",
"cadir",
"-policy",
"policy_anything",
"-batch",
"-infiles",
reqfile,
]
subprocess.check_call(["openssl"] + args)
with open(cert_file) as f:
cert = f.read()
with open(key_file) as f:
key = f.read()
return cert, key
finally:
for name in tempnames:
os.remove(name)
TMP_CADIR = "cadir"
def unmake_ca():
shutil.rmtree(TMP_CADIR)
def make_ca():
os.mkdir(TMP_CADIR)
with open(os.path.join("cadir", "index.txt"), "a+") as f:
pass # empty file
# with open(os.path.join('cadir','crl.txt'),'a+') as f:
# f.write("00")
with open(os.path.join("cadir", "index.txt.attr"), "w+") as f:
f.write("unique_subject = no")
with tempfile.NamedTemporaryFile("w") as t:
t.write(req_template.format(hostname="our-ca-server"))
t.flush()
with tempfile.NamedTemporaryFile() as f:
args = [
"req",
"-new",
"-days",
"365242",
"-extensions",
"v3_ca",
"-nodes",
"-newkey",
"rsa:2048",
"-keyout",
"tls-ca-key.pem",
"-out",
f.name,
"-subj",
"/C=XY/L=Dask-distributed/O=Dask CA/CN=our-ca-server",
]
subprocess.check_call(["openssl"] + args)
args = [
"ca",
"-config",
t.name,
"-create_serial",
"-out",
"tls-ca-cert.pem",
"-batch",
"-outdir",
TMP_CADIR,
"-keyfile",
"tls-ca-key.pem",
"-days",
"365242",
"-selfsign",
"-extensions",
"v3_ca",
"-infiles",
f.name,
]
subprocess.check_call(["openssl"] + args)
# args = ['ca', '-config', t.name, '-gencrl', '-out', 'revocation.crl']
# subprocess.check_call(['openssl'] + args)
if __name__ == "__main__":
os.chdir(here)
cert, key = make_cert_key("localhost")
with open("tls-self-signed-cert.pem", "w") as f:
f.write(cert)
with open("tls-self-signed-key.pem", "w") as f:
f.write(key)
# For certificate matching tests
make_ca()
with open("tls-ca-cert.pem") as f:
ca_cert = f.read()
cert, key = make_cert_key("localhost", sign=True)
with open("tls-cert.pem", "w") as f:
f.write(cert)
with open("tls-cert-chain.pem", "w") as f:
f.write(cert)
f.write(ca_cert)
with open("tls-key.pem", "w") as f:
f.write(key)
with open("tls-key-cert.pem", "w") as f:
f.write(key)
f.write(cert)
unmake_ca()
| {
"content_hash": "e011ee8a1a885a53d9069a52f244eb19",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 83,
"avg_line_length": 26.493273542600896,
"alnum_prop": 0.47562626946513203,
"repo_name": "dask/distributed",
"id": "b963cb46c4a9631ff6e2bb9a8249eaeb0f9c63fd",
"size": "5908",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "distributed/tests/make_tls_certs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4220"
},
{
"name": "HTML",
"bytes": "16583"
},
{
"name": "JavaScript",
"bytes": "9337"
},
{
"name": "Jinja",
"bytes": "17081"
},
{
"name": "Python",
"bytes": "3746516"
},
{
"name": "Shell",
"bytes": "2030"
}
],
"symlink_target": ""
} |
import os
import re
from lxml import etree
from lxml.etree import set_default_parser
from lxml.etree import XMLParser
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = r'''
---
module: junitxml
short_description: JUnitXML editor
version_added: "2.9.17"
description: Edits JUnitXML files
options:
src:
description:
- The JUnitXML file with the content that need to be changed
required: true
type: str
dst:
description:
- The new JUnitXML file to create.
If not given, the source file will be updated with the changes.
(All parents directories will be created automatically)
required: false
type: str
indent:
description:
- The indentation that will be used when creating the XML file
required: false
type: str
prepend_classname:
description:
- Prepends the classname to test in each testcase
Example:
In: <testcase classname="X" test="Y" time=>"0.01"</testcase>
Out: <testcase classname="X" test="X.Y" time=>"0.01"</testcase>
default: true
type: bool
remove_testcase_id:
description:
- Removes the test ID from the 'name' attribute of all testcase
elements.
'Test ID' considered as square brackets and everything inside them.
Example:
In: <testcase ... name="testname[id-xxx-yyy-zzz]"><testcase>
Out: <testcase ... name="testname"><testcase>
default: false
type: bool
testcase_name_max_len:
description:
- The maximum length of the testcase name attribute.
(Value should be an integer greater than zero)
If the --testcase_name_max_len_postfix is given, it will be
included in the maximum length
Example:
testcase_name_max_len = 6
testcase_name_max_len_postfix = '...'
In: <testcase ... name="123456789"><testcase>
Out: <testcase ... name="123..."><testcase>
default: 0
type: int
testcase_name_max_len_postfix:
description:
- A postfix to add to the testcase name in case it's longer it's
longer the 'testcase_name_max_len'
default:'...'
type: str
testcase_prefix:
description:
- A prefix to add to the name attribute of each testcase element.
(Uses the 'testcase-prefix-sep' value as a separator)
Example:
In: <testcase ... name="testname"><testcase>
Out: <testcase ... name="myprefix-testname"><testcase>
type: str
required: false
testcase_prefix_sep:
description: The separator that will be used for testcase prefix.
type: str
default: '-'
testcase_prefix_no_dots:
description: Whether or not to remove dot ('.') chars from the given
testcase prefix.
type: bool
default: true
testsuite_prefix:
description:
- A comma separated value of prefixes to add to testsuite elements.
Please be aware of the following scenarios:
1. In case the number of testsuites and prefixes are equal, a
corresponding prefix will be added to each testsuite's name (index
based).
2. In case multiple testsuites exist and only one prefix is
given, a counter number will be add at the end of each prefix.
Attention: In any other case, a RunTime Error will be raised!
Examples:
1. Number of testsuites: 2, testsuite-prefixes=XXX,YYY
<testsuites>
<testsuite name='first_testsuite'><testsuite>
<testsuite name='second_testsuite'><testsuite>
</testsuites>
Outcome:
<testsuites>
<testsuite name='XXX-first_testsuite'><testsuite>
<testsuite name='YYY-second_testsuite'><testsuite>
</testsuites>
2. Number of testsuites: 3, testsuite-prefixes=ZZZ
<testsuites>
<testsuite name='first_testsuite'><testsuite>
<testsuite name='second_testsuite'><testsuite>
<testsuite name='third_testsuite'><testsuite>
</testsuites>
Outcome:
<testsuites>
<testsuite name='ZZZ1-first_testsuite'><testsuite>
<testsuite name='ZZZ2-second_testsuite'><testsuite>
<testsuite name='ZZZ3-third_testsuite'><testsuite>
</testsuites>
type: str
required: false
testcase_prefix_sep:
description: The separator that will be used for testsuite prefix.
type: str
default: '-'
remove_skipped:
description:
- Indicates whether or not to remove skipped testcases from the
result file.
type: bool
default: false
lxml_huge_tree:
description:
- When 'True', sets the default lxml XMLPrser with huge_tree=True.
That disable security restrictions and support very deep trees and
very long text content (only affects libxml2 2.7+)
type: bool
default: false
requirements:
- "lxml"
'''
EXAMPLES = r'''
# Create a new result file 'result.xml' without test IDs from src 'source.xml'
- name: Create a new result file without test IDs
junitxml:
src: source.xml
dst: result.xml
remove_testcase_id: true
# Remove test IDs from the source file (without creating a new file)
- name: Remove testcase test IDs from result file
junitxml:
src: source.xml
remove_testcase_id: true
# Create a new result file including parent directories with 'TestCasePrefix'
# prefix to all testcases in the file
- name: Remove testcase test IDs from result file
junitxml:
src: source.xml
dst: relative/path/new/dir/result.xml
testcase_prefix: TestCasePrefix
# Create a new result file, with the testcase class name attribute prepended to
# each testcase name and add the 'TestSuitePrefix' to the name of all test
# suites in the file.
# In case more there one testsuite exist, a counter digit will be added to the
# end of each name: TestSuitePrefix1, TestSuitePrefix2 ... TestSuitePrefixN
- name: Remove testcase test IDs from result file
junitxml:
src: source.xml
dst: /tmp/result.xml
testcase_prefix: TestSuitePrefix
'''
RETURN = r'''
changed:
description: Whether a change was made on the disk
type: bool
returned: always
sample: true
src_file:
description: Full path the to source file
type: str
returned: always
sample: '/home/user/test_results/source.xml'
dst_file:
description: Full path the to result file
type: str
returned: always
sample: '/home/user/test_results/result.xml'
content_changed:
description: Whether the result file is different from the source
type: bool
returned: always
sample: false
skipped_removed:
description: The number of skipped testcases that have been removed from
the result file
type: int
returned: When at least one skipped testcase was removed
sample: 3
'''
class JUnintXML(object):
def __init__(self, src_file, dst_file=None):
self.src_file = src_file
self.dst_file = dst_file
self.tree = etree.parse(src_file)
self.indent = ''
self.element_changed = False
self.file_changed = False
self.write_needed = False
self.skipped_removed = 0
@staticmethod
def __get_full_path(path):
return os.path.abspath(os.path.expanduser(path))
@property
def src_file(self):
return self.__class__.__get_full_path(self.__src_file)
@src_file.setter
def src_file(self, src_file):
self.__src_file = src_file
@property
def dst_file(self):
if self.__dst_file is None:
return None
return self.__class__.__get_full_path(self.__dst_file)
@dst_file.setter
def dst_file(self, dst_file):
self.__dst_file = dst_file
def limit_testcase_name_len(self, max_len, postfix=''):
"""Truncates the testcase name if longer than the given maximum length
:param max_len: The maximum length of the testcase name attribute
(including the prefix if given)
:param postfix: A postfix to add to the testcase name in case it longer
than the given maximum length
"""
def _cut_testcase_name_len(elem, _max_len, _postfix):
name = elem.get('name')
if len(name) <= _max_len:
return False
new_name = name[:_max_len - len(_postfix)] + _postfix
elem.set('name', new_name)
return True
self.__process(
action_func=_cut_testcase_name_len,
_max_len=max_len,
_postfix=postfix)
def prepend_classname_to_name(self):
"""Prepends the classname to the name attribute for each testcase"""
def _prepend_classname_to_name(elem):
classname = elem.get('classname')
if classname:
elem.set('name', f"{classname}.{elem.get('name')}")
return True
return False
self.__process(action_func=_prepend_classname_to_name)
def remove_id_from_testcase_name(self):
"""Removes the ID from testcases 'name' attribute"""
def _remove_id_from_name(elem):
name = elem.get('name')
new_name = re.sub(r'\[.*\]', '', name)
if new_name != name:
elem.set('name', new_name)
return True
return False
self.__process(action_func=_remove_id_from_name)
def remove_skipped_testcases(self):
"""Removes all skipped tests (testcases with 'skipped' elements)"""
def _remove_skipped_testcases(elem):
for child in list(elem):
if child.tag.lower() == 'skipped':
parent = elem.getparent()
# Update parent with correct total & skipped tests number
if parent.tag.lower() == 'testsuite':
for atr in ('tests', 'skipped'):
if atr in parent.keys():
parent.set(atr, str(int(parent.get(atr)) - 1))
parent.remove(elem)
self.skipped_removed += 1
break
else:
return False
return True
self.__process(action_func=_remove_skipped_testcases)
def add_prefix_to_testcase(self, tc_prefix, tc_prefix_sep):
"""Adds a prefix to each testcase 'name' attribute
:param tc_prefix: A prefix to add to the name attribute of each
testcase element
:param tc_prefix_sep: A separator between the prefix and the testcase
name
"""
def _add_prefix(elem, prefix, prefix_sep):
name = elem.get('name')
if prefix:
new_name = prefix + prefix_sep + name
elem.set('name', new_name)
return True
return False
self.__process(
action_func=_add_prefix,
prefix=tc_prefix,
prefix_sep=tc_prefix_sep)
def __process(self, action_func, **kwargs):
changed = self.__class__.__process_testcases(
self.tree.getroot(),
func=action_func,
**kwargs)
if changed:
self.element_changed = True
self.write_needed = True
@staticmethod
def __process_testcases(elem, func, **kwargs):
element_changed = False
if elem.tag == 'testcase':
element_changed = func(elem, **kwargs) or element_changed
for child in list(elem):
element_changed = JUnintXML.__process_testcases(
child, func, **kwargs) or element_changed
return element_changed
def add_testsuite_prefixes(self, prefixes, prefixes_sep):
"""Adds prefixes to the name of testsuite elements
:param prefixes: A comma separated string of prefixes
:param prefixes_sep: A separator between the prefix and the testsuite
name
"""
ts_list = []
prefix_list = prefixes.split(',')
def get_all_testsuites(elem):
if elem.tag == 'testsuite':
ts_list.append(elem)
for child in list(elem):
get_all_testsuites(child)
get_all_testsuites(self.tree.getroot())
len_ts = len(ts_list)
len_prefix = len(prefix_list)
if not len_ts:
return
cnt = 1
for idx, ts in enumerate(ts_list):
name = ts.get('name')
if len_ts == 1 and len_prefix == 1:
prefix = prefix_list[0]
elif len_ts > 1 and len_prefix == 1:
prefix = prefix_list[0] + str(cnt)
cnt += 1
elif len_ts == len_prefix:
prefix = prefix_list[idx]
else:
raise RuntimeError(
f"Mismatch number of Test Suites '{len_ts}' and prefixes "
f"'{len_prefix}' in '{self.src_file}'")
new_name = f"{prefix}{prefixes_sep}{name}"
ts.set('name', new_name)
self.element_changed = True
def write(self, dst_file=None):
"""Writes changes to a file
:param dst_file: A path to the output file (src_file will be updated if
not given)
"""
self.dst_file = dst_file if dst_file else self.src_file
dirname = os.path.dirname(self.dst_file)
if not os.path.exists(dirname):
os.makedirs(dirname, exist_ok=True)
self.file_changed = True
self.tree.write(self.dst_file)
if self.src_file != self.dst_file:
self.file_changed = True
elif self.element_changed:
self.file_changed = True
@property
def changed(self):
return self.file_changed
@property
def indent(self):
return self.__space
@indent.setter
def indent(self, space):
self.__space = space
etree.indent(self.tree, space=space)
def main():
module = AnsibleModule(
argument_spec=dict(
src=dict(required=True),
dst=dict(required=False),
indent=dict(required=False),
prepend_classname=dict(default=False, type='bool', required=False),
remove_testcase_id=dict(
default=False, type='bool', required=False),
testcase_name_max_len=dict(default=0, required=False, type='int'),
testcase_name_max_len_postfix=dict(default='...', required=False),
testcase_prefix=dict(required=False),
testcase_prefix_sep=dict(default='-', required=False),
testcase_prefix_no_dots=dict(
default=True, type='bool', required=False),
testsuite_prefixes=dict(required=False),
testsuite_prefixes_sep=dict(default='-', required=False),
remove_skipped=dict(default=False, required=False, type='bool'),
lxml_huge_tree=dict(default=False, type='bool', required=False),
)
)
try:
if module.params['lxml_huge_tree']:
set_default_parser(XMLParser(huge_tree=True))
juxml = JUnintXML(src_file=module.params['src'])
if module.params['remove_skipped']:
juxml.remove_skipped_testcases()
if module.params['remove_testcase_id']:
juxml.remove_id_from_testcase_name()
if module.params['prepend_classname']:
juxml.prepend_classname_to_name()
if module.params['testcase_prefix']:
tc_prefix = module.params['testcase_prefix']
if module.params['testcase_prefix_no_dots']:
tc_prefix = tc_prefix.replace('.', '')
juxml.add_prefix_to_testcase(
tc_prefix=tc_prefix,
tc_prefix_sep=module.params['testcase_prefix_sep'])
if module.params['testsuite_prefixes']:
juxml.add_testsuite_prefixes(
prefixes=module.params['testsuite_prefixes'],
prefixes_sep=module.params['testsuite_prefixes_sep'])
if module.params['testcase_name_max_len']:
juxml.limit_testcase_name_len(
max_len=module.params['testcase_name_max_len'],
postfix=module.params['testcase_name_max_len_postfix']
)
if module.params['indent'] is not None:
juxml.indent = module.params['indent']
if juxml.write_needed:
juxml.write(dst_file=module.params['dst'])
return_dict = dict(
changed=juxml.changed,
src_file=juxml.src_file,
dst_file=juxml.dst_file or juxml.src_file,
content_changed=juxml.element_changed and juxml.file_changed
)
# Returns the number of removed skipped testcases only if some were
# really removed
if juxml.skipped_removed:
return_dict['skipped_removed'] = juxml.skipped_removed
module.exit_json(**return_dict)
except Exception as ex:
module.fail_json(msg=ex)
if __name__ == '__main__':
main()
| {
"content_hash": "a8c51f400b3fdbb08965d8ebc15010d8",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 79,
"avg_line_length": 32.5462962962963,
"alnum_prop": 0.5820199146514936,
"repo_name": "redhat-openstack/infrared",
"id": "63ca08a4b537f8ace5047ed91adf23b70b61bd29",
"size": "18301",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "infrared/common/library/junitxml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "682993"
},
{
"name": "Makefile",
"bytes": "417"
},
{
"name": "Python",
"bytes": "485633"
},
{
"name": "Ruby",
"bytes": "1019"
},
{
"name": "Shell",
"bytes": "23282"
}
],
"symlink_target": ""
} |
import socket
import sys
## Function to process requests
def process(conn):
#conn.send("Welcome to banana's potassium calculater. Please send the number of bananas.\n")
# read userInput from client
userInput = conn.recv(BUFFER_SIZE)
response = ''
if not userInput:
print "Error reading message"
return
userInputs = userInput.split(' ')
if len(userInputs) != 3:
response = "Invalid Data"
else:
inputNum = userInputs[2]
if not inputNum.isdigit():
response = "Invalid Number"
else:
num = int(inputNum)
response = str(0.442 * num)
conn.send(response)
conn.close()
### Main code run when program is started
BUFFER_SIZE = 1024
interface = ""
# if input arguments are wrong, print out usage
if len(sys.argv) != 2:
print >> sys.stderr, "usage: python {0} portnum\n".format(sys.argv[0])
sys.exit(1)
portnum = int(sys.argv[1])
# create socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((interface, portnum))
s.listen(5)
while True:
# accept connection and print out info of client
conn, addr = s.accept()
print 'Accepted connection from client', addr
process(conn)
s.close()
| {
"content_hash": "d4857218f66c9fc06b6028919918dc04",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 96,
"avg_line_length": 22.59259259259259,
"alnum_prop": 0.6516393442622951,
"repo_name": "hantasm/dist-sys-exercises",
"id": "b0bbe600e34f43730175fec2820c06b70238c951",
"size": "1487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lec-2/conversions/templates/python/convServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3822"
},
{
"name": "Java",
"bytes": "6021"
},
{
"name": "Python",
"bytes": "1696"
}
],
"symlink_target": ""
} |
from .WaveObject import WaveObject, Vertex, FaceVertex, Face, Normal, Text_Coord
from .ObjParser import ObjParser
from .Scene import Scene
from .ObjWriter import ObjWriter
| {
"content_hash": "e3680ecf4100a92a436fd3b57178df05",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 80,
"avg_line_length": 43,
"alnum_prop": 0.8197674418604651,
"repo_name": "tretum/Avorion-Obj-Converter",
"id": "e29d709b51b13cdaadc14788b692dc2173306238",
"size": "172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "waveobjparser/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21734"
}
],
"symlink_target": ""
} |
import happybase
connection = happybase.Connection('192.168.1.200')
print(connection.tables())
families = {
'c': dict(), # use defaults
}
#connection.create_table('travelapi_sabre_airlines', families)
print(connection.tables()) | {
"content_hash": "0d63b6f8bb57c38726c681fba9ae1fe2",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 62,
"avg_line_length": 21.363636363636363,
"alnum_prop": 0.7276595744680852,
"repo_name": "bundgus/python-playground",
"id": "e61f30cfc5b1c8cfec6dba01cdd3e48fbf8c90c1",
"size": "235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hbase-playground/happybase_create_table_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68"
},
{
"name": "Gherkin",
"bytes": "897"
},
{
"name": "HTML",
"bytes": "22309040"
},
{
"name": "Jupyter Notebook",
"bytes": "666681"
},
{
"name": "Python",
"bytes": "1046557"
},
{
"name": "Thrift",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""Deals with correlations."""
import copy
import numpy
from scipy.stats import pearsonr
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import permutation
from gewittergefahr.deep_learning import permutation_utils
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
def _linear_idx_to_matrix_channel_idxs(linear_index, num_predictors_by_matrix):
"""Converts linear predictor index to matrix channel indices.
T = number of input tensors to the model
:param linear_index: Linear predictor index.
:param num_predictors_by_matrix: length-T numpy array with number of
predictors (channels) in each input matrix.
:return: matrix_index: Matrix index.
:return: channel_index: Channel index.
"""
cumsum_predictors_by_matrix = numpy.cumsum(num_predictors_by_matrix)
matrix_index = numpy.where(linear_index < cumsum_predictors_by_matrix)[0][0]
if matrix_index == 0:
channel_index = linear_index
else:
channel_index = (
linear_index - cumsum_predictors_by_matrix[matrix_index - 1]
)
return matrix_index, channel_index
def _take_spatial_mean(data_matrix):
"""Takes spatial mean over data matrix.
E = number of examples
:param data_matrix: numpy array, where the first axis has length E and all
other axes represent spatial dimensions.
:return: mean_values: length-E numpy array of means.
"""
num_spatial_dim = len(data_matrix.shape) - 1
these_axes = numpy.linspace(
1, num_spatial_dim, num=num_spatial_dim, dtype=int
).tolist()
return numpy.mean(data_matrix, axis=tuple(these_axes))
def get_pearson_correlations(predictor_matrices, cnn_metadata_dict,
separate_radar_heights=False):
"""Computes Pearson correlation between each pair of predictors.
P = total number of predictors (over all matrices)
:param predictor_matrices: See doc for
`permutation.create_nice_predictor_names`.
:param cnn_metadata_dict: Same.
:param separate_radar_heights: Same.
:return: correlation_matrix: P-by-P numpy array of Pearson correlations.
:return: predictor_names: length-P list of predictor names.
"""
error_checking.assert_is_boolean(separate_radar_heights)
first_num_dimensions = len(predictor_matrices[0].shape)
separate_radar_heights = (
separate_radar_heights and first_num_dimensions == 5
)
predictor_names_by_matrix = permutation.create_nice_predictor_names(
predictor_matrices=predictor_matrices,
cnn_metadata_dict=cnn_metadata_dict,
separate_radar_heights=separate_radar_heights)
num_matrices = len(predictor_names_by_matrix)
for i in range(num_matrices):
print('Predictors in {0:d}th matrix:\n{1:s}\n'.format(
i + 1, str(predictor_names_by_matrix[i])
))
print(SEPARATOR_STRING)
predictor_matrices_to_use = copy.deepcopy(predictor_matrices)
if separate_radar_heights:
predictor_matrices_to_use[0] = permutation_utils.flatten_last_two_dim(
predictor_matrices_to_use[0]
)[0]
num_predictors_by_matrix = numpy.array(
[len(n) for n in predictor_names_by_matrix], dtype=int
)
predictor_names = sum(predictor_names_by_matrix, [])
num_predictors = len(predictor_names)
correlation_matrix = numpy.full((num_predictors, num_predictors), numpy.nan)
for i in range(num_predictors):
for j in range(i, num_predictors):
if i == j:
correlation_matrix[i, j] = 1.
continue
i_matrix, i_channel = _linear_idx_to_matrix_channel_idxs(
linear_index=i,
num_predictors_by_matrix=num_predictors_by_matrix)
j_matrix, j_channel = _linear_idx_to_matrix_channel_idxs(
linear_index=j,
num_predictors_by_matrix=num_predictors_by_matrix)
these_first_values = _take_spatial_mean(
predictor_matrices_to_use[i_matrix][..., i_channel]
)
these_second_values = _take_spatial_mean(
predictor_matrices_to_use[j_matrix][..., j_channel]
)
correlation_matrix[i, j] = pearsonr(
these_first_values, these_second_values
)[0]
correlation_matrix[j, i] = correlation_matrix[i, j]
return correlation_matrix, predictor_names
| {
"content_hash": "862cab359a40e88e1b01a5df0a9ab497",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 80,
"avg_line_length": 34.36153846153846,
"alnum_prop": 0.6516677859861204,
"repo_name": "thunderhoser/GewitterGefahr",
"id": "fa3e3cf9648d94b8a20ed16c29fb430a1f542c03",
"size": "4467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gewittergefahr/deep_learning/correlation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "31275"
},
{
"name": "Python",
"bytes": "5661041"
}
],
"symlink_target": ""
} |
'''
Created on 25-08-2011
'''
class DataValidator:
"""Class used to validate data in csv file. Uses schema to complete
the task. Can check number of values in rows and their types. Also checks
if each obligatory field has not empty value. Enables rule mechanism that
allows correctly validating data with some exceptions in it, e.g. field
nr 2 is obligatory, but should be empty if field 3 is 'abc'. If any error
is found, logs it.
Used schema must have at least following form:
{
"fields": [
{
"label": string,
"type": string,
"obligatory": bool,
},
...
]
}
Other keys are ignored.
"""
def __init__(self, csv_data, schema_descr):
"""Initiates object.
Arguments:
csv_data -- CsvData object
schema_descr -- description of schema, must have following keys:
fields -- objects describing fields in data,
important_order -- if order
"""
self.data = csv_data
self.fields_descr = schema_descr['fields']
self.row_len = len(schema_descr['fields'])
self.header_errors = []
self.data_errors = []
self.empty_fields = []
self.rules = []
self.add_rules()
# TODO: change it so that it accepts rules saved in a file
def add_rules(self):
"""Adds rules. Now it's hardcoded method and should be changed so that
rules can be read from a file.
"""
# example of a hardcoded rule
"""caly_kraj_cond = {
6: ['Cały kraj', 'Dolnośląskie', 'Kujawsko-pomorskie', 'Lubelskie',
'Lubuskie', 'Łódzkie', 'Małopolskie', 'Mazowieckie', 'Opolskie',
'Podkarpackie', 'Podlaskie', 'Pomorskie', 'Śląskie',
'Warmińsko-mazurskie', 'Wielkopolskie', 'Zachodniopomorskie'
]
}
caly_kraj_exp_vals = {
7: [''],
8: ['']
}
caly_kraj_rule = Rule(caly_kraj_cond, caly_kraj_exp_vals)
self.rules.append(caly_kraj_rule)
"""
def check_all(self):
"""Clears lists of errors and checks if header in the file is correct.
If there are no errors in header, then data is checked.
"""
self.header_errors = []
self.data_errors = []
self.empty_fields = []
self.check_header()
if self.is_header_correct():
self.check_data()
def is_header_correct(self):
"""Returns True if header is correct, False if not."""
return self.header_errors == []
def is_data_correct(self):
"""Returns True is data is correct, False if not."""
return self.data_errors == []
def is_all_correct(self):
"""Returns True if both header and data are correct, False if not."""
return self.is_header_correct() and self.is_data_correct()
def get_errors_log(self):
"""Returns string representing list of errors in header and data.
Errors are separated by new line.
"""
return '\n'.join(self.header_errors + self.data_errors)
def check_header(self):
"""Catches errors in header and saves them. Gets expected names of
fields in header from schema. Checks if:
- all fields from schema are in the header
- there is no unexpected fields(that were not described in schema)
- order of fields in header is the same as order in schema
"""
header = self.data.get_header()
fields_names = [field_descr['label'] for field_descr in self.fields_descr]
for name in fields_names:
if name not in header:
self.header_errors.append('HEADER: missing field %s' % name)
header_copy = header[:]
for name in fields_names:
if name in header_copy:
header_copy.remove(name)
if header_copy != []:
for name in header_copy:
self.header_errors.append('HEADER: unknown field %s' % name)
if self.is_header_correct():
i = 0
next_header_copy = header[:]
for name in next_header_copy:
if name != fields_names[i]:
self.header_errors.append('HEADER: field nr %d is %s, %s expected' %
(name, i, fields_names[i]))
i += 1
else:
self.header_errors.append('HEADER: will not check fields order due to previous errors')
def check_data(self):
"""Checks data row by row. Checks if:
- each row has expected number of fields
- if fields are consistent with schema or rules
"""
row = self.data.get_next_row(row_type='list')
i = 1
while row is not None:
if len(row) != self.row_len:
self.data_errors.append('ROW nr %d: unexpected length of the row: %d' % (i, len(row)))
field_nr = 0
for field in row:
expected = self.get_expected_values(row, field_nr)
self.check_field(field, i, field_nr, expected)
field_nr += 1
row = self.data.get_next_row(row_type='list')
i += 1
def check_field(self, value, row_nr, field_nr, exp_values=[]):
"""Tries to validate field in a row. Checks if:
- value is in the list of expected values(if it is not empty)
- field is expected(its number is consistent with length of fields
in schema), it is ok as long as rows have correct length
- field has nonempty value when it is obligatory
- float and int values can be correctly casted
Arguments:
value -- value in the field
row_nr -- number of the row
field_nr -- number of the field in the row
exp_values -- list of expected values, implicitly it is empty(if there
are no rules)
"""
if exp_values != []:
if value not in exp_values:
self.data_errors.append('ROW nr %d, field nr %d(%s): value is %s, expected value from list: %s' %
(row_nr, field_nr, value, exp_values, self.fields_descr[field_nr]['label']))
return
try:
expected_type = self.fields_descr[field_nr]['type']
obligatory = self.fields_descr[field_nr]['obligatory']
except IndexError:
self.data_errors.append('ROW nr %d, field nr %d(%s): unexpected field' %
(row_nr, field_nr, self.fields_descr[field_nr]['label']))
return
if value == '':
if obligatory:
self.data_errors.append('ROW nr %d, field nr %d(%s): missing value(empty field)' %
(row_nr, field_nr, self.fields_descr[field_nr]['label']))
if field_nr not in self.empty_fields:
self.empty_fields.append(field_nr)
else:
if expected_type == 'string':
pass
elif expected_type == 'float':
try:
float(value.replace(',','')) # removing commas from number(if any exist)
except ValueError:
self.data_errors.append('ROW nr %d, field nr %d(%s): value type is string, float expected' %
(row_nr, field_nr, self.fields_descr[field_nr]['label']))
elif expected_type == 'int':
try:
int(value.replace(',',''))
except ValueError:
value_type = 'float'
try:
float(value.replace(',',''))
except ValueError:
value_type = 'string'
self.data_errors.append('ROW nr %d, field nr %d(%s): value type is %s, int expected' %
(row_nr, field_nr, value_type, self.fields_descr[field_nr]['label']))
def get_empty_fields(self):
"""Returns list of fields' numbers that are obligatory, but empty
in any row.
"""
return self.empty_fields
def get_expected_values(self, row, field_nr):
"""Uses list of rules to check, if the row accepts any rules.
Returns list of expected values for field nr field_nr in the row.
Arguments:
row -- row of data
field_nr -- number of field that is checked
"""
expected_values = []
for rule in self.rules:
if rule.conditions_met(row):
expected_values.extend(rule.get_expected_values(field_nr))
return expected_values
class Rule:
"""Class describing rules(exceptions to schema) that might appear in data.
Each rule has conditions and values. They form is the same and following:
{
(int)field_nr: [ list of values ],
...
}
Rule is accepted when for each field_nr, value number field_nr in row is
in that list. Then expected values of fields of the row are in the values
dict[field_nr], e.g.
conds = {
1: ['abc', 'def'],
3: ['qwe']
}
values = {
2: ['', 'zxc'],
4: ['']
}
If row[1] is 'abc' or 'def' and row[3] == 'qwe',
then row[2] should be in '' or 'zxc' and row[4] == ''.
"""
def __init__(self, conditions, values):
"""Initiates rule with conditions and expected values.
Arguments:
conditions -- conditions needed to accept this Rule
values -- expected values of fields in row
"""
self.conditions = conditions
self.values = values
def conditions_met(self, row):
"""Returns True if this Rule can be accepted by the given row,
False if not.
Arguments:
row -- row to check
"""
for field, field_values in self.conditions.iteritems():
row_value = row[field]
if row_value not in field_values:
return False
return True
def get_expected_values(self, field_nr):
"""Returns list of expected values for the field of number field_nr."""
try:
return self.values[field_nr]
except KeyError:
return []
| {
"content_hash": "fe2020fd4358b769b088363a1f96329d",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 116,
"avg_line_length": 38.51086956521739,
"alnum_prop": 0.5325994919559696,
"repo_name": "CCLab/Raw-Salad",
"id": "c136f00717596486b8bf546a0cee03c3f2c9ddca",
"size": "10663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/autoupload/data_validator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "496399"
},
{
"name": "Python",
"bytes": "477320"
},
{
"name": "Shell",
"bytes": "734"
}
],
"symlink_target": ""
} |
"""
jsonapi.flask
=============
The *py-jsonapi* extension for flask. Binding the API to a flask application
is dead simple:
.. code-block:: python3
import flask
import jsonapi
import jsonapi.flask
app = flask.Flask(__name__)
api = jsonapi.flask.FlaskAPI("/api", db=..., flask_app=app)
# You can also initialize the API with the flask application using the
# *init_app()* method:
api.init_app(app)
You can add the models to the API as usual. They will be available under
``/api``.
current_api
-----------
You can access the current APi via the *extensions* dictionary of the flask
application:
.. code-block:: python3
app.extensions["jsonapi"]
or you use the global variable ``current_api``:
.. code-block:: python3
from jsonapi.flask import current_api
The API instance is also added to the jinja environment:
.. code-block:: html
<p>
You can download your profile
<a href="{{ jsonapi.reverse_url('User', 'resource', id=current_user.id) }}">
here
</a>
</p>
API
---
.. autoclass:: FlaskAPI
.. autodata:: current_api
"""
# local
from .api import FlaskAPI, current_api
| {
"content_hash": "120ea4a8798b23c8c4308feefac82b1e",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 84,
"avg_line_length": 19.83050847457627,
"alnum_prop": 0.647008547008547,
"repo_name": "benediktschmitt/py-jsonapi",
"id": "0a9bf4cd54329cdb9b8df51094473c183078ee28",
"size": "2316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsonapi/flask/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "266616"
}
],
"symlink_target": ""
} |
from org.sikuli.basics import Debug
from org.sikuli.script import Region as JRegion
from org.sikuli.script.Constants import *
import sys
import inspect
DEBUG=False
class Region(JRegion):
# support for with:
# override all global sikuli functions by this region's methods.
def __enter__(self):
exclude_list = [ 'ROI' ]
if DEBUG: print "with: entering *****", self
self._global_funcs = {}
dict = sys.modules['__main__'].__dict__
for name in dir(self):
if name in exclude_list: continue
try:
if not inspect.ismethod(getattr(self,name)):
continue
except:
continue
if dict.has_key(name):
self._global_funcs[name] = dict[name]
if DEBUG and name == 'checkWith': print "with: save %s ( %s )"%(name, str(dict[name])[1:])
dict[name] = eval("self."+name)
if DEBUG and name == 'checkWith': print "with: is now: %s"%(str(dict[name])[1:])
return self
def __exit__(self, type, value, traceback):
if DEBUG: print "with: exiting ****", self
dict = sys.modules['__main__'].__dict__
for name in self._global_funcs.keys():
dict[name] = self._global_funcs[name]
if DEBUG and name == 'checkWith':
print "with restore: %s"%(str(dict[name])[1:])
self._global_funcs = None
#######################################################################
#---- SIKULI PUBLIC API
#######################################################################
# Python wait() needs to be here because Java Object has a final method: wait(long timeout).
# If we want to let Sikuli users use wait(int/long timeout), we need this Python method.
def wait(self, target, timeout=None):
if isinstance(target, int) or isinstance(target, long):
target = float(target)
if timeout == None:
return JRegion.wait(self, target)
else:
return JRegion.wait(self, target, timeout)
# the new Region.text() feature (Tesseract 3) returns utf8
def text(self):
return JRegion.text(self).encode("utf8")
# still needed, to be backwards compatible
def observe(self, waitTime = FOREVER, background = False):
return self.observeJ(waitTime, background)
| {
"content_hash": "82d766c0e98814edeab487c1bccc8c12",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 106,
"avg_line_length": 38.967213114754095,
"alnum_prop": 0.5523769457299117,
"repo_name": "henriqueguchi/SikuliServer",
"id": "4d66efa8f768ea5e25d54c6fc7eb5d6f561a8650",
"size": "2482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "new/Lib/sikuli/Region.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1672"
},
{
"name": "C",
"bytes": "3771"
},
{
"name": "CSS",
"bytes": "21689"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "HTML",
"bytes": "73016"
},
{
"name": "Java",
"bytes": "1185124"
},
{
"name": "JavaScript",
"bytes": "45235"
},
{
"name": "Python",
"bytes": "2372565"
},
{
"name": "Ruby",
"bytes": "8679"
},
{
"name": "Shell",
"bytes": "1990"
}
],
"symlink_target": ""
} |
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# Allow module docs to build without having sqlalchemy-migrate installed:
sys.path.append(os.path.dirname(os.path.abspath('.')))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinxcontrib.issuetracker']
# link to sqlalchemy docs
intersphinx_mapping = {
'sqlalchemy': ('http://www.sqlalchemy.org/docs/', None),
'python': ('http://docs.python.org/2.7', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SQLAlchemy Migrate'
copyright = u'2011, Evan Rosson, Jan Dittberner, Domen Kožar, Chris Withers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7.2'
# The full version, including alpha/beta/rc tags.
release = '0.7.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for sphinxcontrib.issuetracker
# --------------------------------------
issuetracker = 'google code'
issuetracker_project = 'sqlalchemy-migrate'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'SQLAlchemyMigratedoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'SQLAlchemyMigrate.tex', ur'SQLAlchemy Migrate Documentation',
ur'Evan Rosson, Jan Dittberner, Domen Kožar', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| {
"content_hash": "89639f663c76e21302f902782813de20",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 91,
"avg_line_length": 32.4331550802139,
"alnum_prop": 0.7167353668590272,
"repo_name": "ioram7/keystone-federado-pgid2013",
"id": "ffc7c1eb4db832500f33729e7bf64e5244fe06ef",
"size": "6659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/sqlalchemy-migrate/docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1841"
},
{
"name": "C",
"bytes": "10584735"
},
{
"name": "C++",
"bytes": "19231"
},
{
"name": "CSS",
"bytes": "172341"
},
{
"name": "JavaScript",
"bytes": "530938"
},
{
"name": "Python",
"bytes": "26306359"
},
{
"name": "Shell",
"bytes": "38138"
},
{
"name": "XSLT",
"bytes": "306125"
}
],
"symlink_target": ""
} |
"""
The most basic (working) CherryPy 3.1 Windows service possible.
Requires Mark Hammond's pywin32 package.
"""
import os
import cherrypy
import win32service
import win32serviceutil
from rfid import RFIDClient, ten_digit_to_comma_format
ip_address = "192.168.1.20"
controller_serial = 11111111
client = RFIDClient(ip_address, controller_serial)
class RootServer:
@cherrypy.expose
def index(self, apiKey=None, action=None, badge=None):
if apiKey == "secret":
if badge:
badge = ten_digit_to_comma_format(int(badge))
if action == "remove":
try:
client.remove_user(badge)
return "User Removed Successfully"
except:
return "Failed To Remove User"
elif action == "add":
try:
client.add_user(badge, [1, 2])
return "User Added Successfully"
except:
return "Failed To Add User"
else:
return "must specify an action"
else:
return "no badge number entered"
else:
return "" # return nothing when no API key is entered
class MyService(win32serviceutil.ServiceFramework):
"""NT Service."""
_svc_name_ = "CherryPyService"
_svc_display_name_ = "CherryPy Service"
def SvcDoRun(self):
cherrypy.tree.mount(RootServer(), "/accessControlApi")
# in practice, you will want to specify a value for
# log.error_file below or in your config file. If you
# use a config file, be sure to use an absolute path to
# it, as you can't be assured what path your service
# will run in.
cherrypy.config.update(
{
"global": {
"server.socket_host": "0.0.0.0",
"server.socket_port": 443,
"server.ssl_module": "pyopenssl",
"server.ssl_certificate": "server.crt",
"server.ssl_private_key": "server.key",
"log.access_file": os.path.join("access.log"),
}
}
)
cherrypy.engine.start()
cherrypy.engine.block()
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
cherrypy.engine.exit()
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
# very important for use with py2exe
# otherwise the Service Controller never knows that it is stopped !
if __name__ == "__main__":
win32serviceutil.HandleCommandLine(MyService)
| {
"content_hash": "d5440a9c9cf1203e957da31e889327e4",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 75,
"avg_line_length": 32.03529411764706,
"alnum_prop": 0.5534337128167462,
"repo_name": "pawl/Chinese-RFID-Access-Control-Library",
"id": "ebcb99eb6d6e9d7bb78ed1a6b1eacd46140893bc",
"size": "2723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/windows_webservice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9322"
}
],
"symlink_target": ""
} |
from samplics.datasets.datasets import (
load_auto,
load_birth,
load_county_crop,
load_county_crop_means,
load_expenditure_milk,
load_nhanes2,
load_nhanes2brr,
load_nhanes2jk,
load_nmhis,
load_psu_frame,
load_psu_sample,
load_ssu_sample,
)
from samplics.categorical import CrossTabulation, Tabulation, Ttest
from samplics.estimation import ReplicateEstimator, TaylorEstimator
from samplics.sae import EblupAreaModel, EblupUnitModel, EbUnitModel, EllUnitModel
from samplics.sampling import OneMeanSampleSize, SampleSelection, SampleSize, allocate
from samplics.utils.basic_functions import transform
from samplics.utils.formats import array_to_dict
from samplics.weighting import ReplicateWeight, SampleWeight
__all__ = [
"allocate",
"array_to_dict",
"CrossTabulation",
"Tabulation",
"Ttest",
"EblupAreaModel",
"EblupUnitModel",
"EbUnitModel",
"EllUnitModel",
"load_auto",
"load_birth",
"load_county_crop",
"load_county_crop_means",
"load_expenditure_milk",
"load_nhanes2",
"load_nhanes2brr",
"load_nhanes2jk",
"load_nmhis",
"load_psu_frame",
"load_psu_sample",
"load_ssu_sample",
"OneMeanSampleSize",
"SampleSelection",
"SampleSize",
"SampleWeight",
"ReplicateWeight",
"ReplicateEstimator",
"TaylorEstimator",
"transform",
]
__version__ = "0.3.8"
| {
"content_hash": "6e2855a4a18990b8b9472d810e92034c",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 86,
"avg_line_length": 25.25,
"alnum_prop": 0.6895332390381895,
"repo_name": "survey-methods/samplics",
"id": "97d0d093a2a659c8196d9214ec0624eb4ae4a52a",
"size": "1414",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/samplics/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "389217"
},
{
"name": "TeX",
"bytes": "2327"
}
],
"symlink_target": ""
} |
from timeseries import TimeSeries
import numpy as np
from scipy.stats import norm
from tsdb import *
import time
identity = lambda x: x
schema = {
'pk': {'convert': identity, 'index': None},
'ts': {'convert': identity, 'index': None},
'order': {'convert': int, 'index': 1},
'blarg': {'convert': int, 'index': 1},
'useless': {'convert': identity, 'index': None},
'mean': {'convert': float, 'index': 1},
'std': {'convert': float, 'index': 1},
'vp': {'convert': bool, 'index': 1},
'deleted': {'convert': bool, 'index': 1}
}
def tsmaker(m, s, j):
'''
Helper function: randomly generates a time series for testing.
Parameters
----------
m : float
Mean value for generating time series data
s : float
Standard deviation value for generating time series data
j : float
Quantifies the "jitter" to add to the time series data
Returns
-------
A time series and associated meta data.
'''
# generate metadata
meta = {}
meta['order'] = int(np.random.choice(
[-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]))
meta['blarg'] = int(np.random.choice([1, 2]))
meta['vp'] = False # initialize vantage point indicator as negative
# generate time series data
t = np.arange(0.0, 1.0, 0.01)
v = norm.pdf(t, m, s) + j * np.random.randn(100)
# return time series and metadata
return meta, TimeSeries(t, v)
def test_server():
########################################
#
# set up
#
########################################
# initialize database
db = DictDB(schema, 'pk')
# initialize server
server = TSDBServer(db)
assert server.db == db
assert server.port == 9999
# initialize protocol
protocol = TSDBProtocol(server)
assert protocol.server == server
# parameters for testing
num_ts = 25
num_vps = 5
########################################
#
# create dummy data for testing
#
########################################
# a manageable number of test time series
mus = np.random.uniform(low=0.0, high=1.0, size=num_ts)
sigs = np.random.uniform(low=0.05, high=0.4, size=num_ts)
jits = np.random.uniform(low=0.05, high=0.2, size=num_ts)
# initialize dictionaries for time series and their metadata
tsdict = {}
metadict = {}
# fill dictionaries with randomly generated entries for database
for i, m, s, j in zip(range(num_ts), mus, sigs, jits):
meta, tsrs = tsmaker(m, s, j) # generate data
pk = "ts-{}".format(i) # generate primary key
tsdict[pk] = tsrs # store time series data
metadict[pk] = meta # store metadata
# for testing later on
ts_keys = sorted(tsdict.keys())
########################################
#
# for all tests below:
# - package the operation
# - test that this is packaged as expected
# - run the operation
# - unpack the results of running the operation
# - test that the return values are as expected
#
########################################
########################################
#
# test time series insert
#
########################################
for k in tsdict:
# package the operation
op = {'op': 'insert_ts', 'pk': k, 'ts': tsdict[k]}
# test that this is packaged as expected
assert op == TSDBOp_InsertTS(k, tsdict[k])
# run operation
result = protocol._insert_ts(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
idx = np.random.choice(list(tsdict.keys()))
# try to insert a duplicate primary key
op = {'op': 'insert_ts', 'pk': idx, 'ts': tsdict[idx]}
# test that this is packaged as expected
assert op == TSDBOp_InsertTS(idx, tsdict[idx])
# run operation
result = protocol._insert_ts(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.INVALID_KEY
assert payload is None
########################################
#
# test time series deletion
#
########################################
idx = np.random.choice(list(tsdict.keys()))
# delete a valid time series
# package the operation
op = {'op': 'delete_ts', 'pk': idx}
# test that this is packaged as expected
assert op == TSDBOp_DeleteTS(idx)
# run operation
result = protocol._delete_ts(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
# check that it isn't present any more
# package the operation
op = {'op': 'select', 'md': {'pk': idx}, 'fields': None,
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({'pk': idx}, None, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert len(payload) == 0
# add it back in
# package the operation
op = {'op': 'insert_ts', 'pk': idx, 'ts': tsdict[idx]}
# test that this is packaged as expected
assert op == TSDBOp_InsertTS(idx, tsdict[idx])
# run operation
result = protocol._insert_ts(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
# check that it's present now
# package the operation
op = {'op': 'select', 'md': {'pk': idx}, 'fields': None,
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({'pk': idx}, None, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert len(payload) == 1
# delete an invalid time series
# package the operation
op = {'op': 'delete_ts', 'pk': 'mistake'}
# test that this is packaged as expected
assert op == TSDBOp_DeleteTS('mistake')
# run operation
result = protocol._delete_ts(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.INVALID_KEY
assert payload is None
########################################
#
# test metadata upsert
#
########################################
for k in metadict:
# package the operation
op = {'op': 'upsert_meta', 'pk': k, 'md': metadict[k]}
# test that this is packaged as expected
assert op == TSDBOp_UpsertMeta(k, metadict[k])
# run operation
result = protocol._upsert_meta(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
########################################
#
# test select operations
#
########################################
# select all database entries; no metadata fields
# package the operation
op = {'op': 'select', 'md': {}, 'fields': None, 'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, None, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
if len(payload) > 0:
assert list(payload[list(payload.keys())[0]].keys()) == []
assert sorted(payload.keys()) == ts_keys
# select all database entries; no metadata fields; sort by primary key
# package the operation
op = {'op': 'select', 'md': {}, 'fields': None,
'additional': {'sort_by': '+pk'}}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, None, {'sort_by': '+pk'})
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
if len(payload) > 0:
assert list(payload[list(payload.keys())[0]].keys()) == []
assert list(payload.keys()) == ts_keys
# select all database entries; all metadata fields
# package the operation
op = {'op': 'select', 'md': {}, 'fields': [], 'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, [], None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
if len(payload) > 0:
assert (sorted(list(payload[list(payload.keys())[0]].keys())) ==
['blarg', 'order', 'pk', 'vp'])
assert sorted(payload.keys()) == ts_keys
# select all database entries; all invalid metadata fields
# package the operation
op = {'op': 'select', 'md': {}, 'fields': ['wrong', 'oops'],
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, ['wrong', 'oops'], None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
if len(payload) > 0:
assert sorted(list(payload[list(payload.keys())[0]].keys())) == []
assert sorted(payload.keys()) == ts_keys
# select all database entries; some invalid metadata fields
# package the operation
op = {'op': 'select', 'md': {}, 'fields': ['not_there', 'blarg'],
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, ['not_there', 'blarg'], None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
if len(payload) > 0:
assert list(payload[list(payload.keys())[0]].keys()) == ['blarg']
assert sorted(payload.keys()) == ts_keys
# select all database entries; specific metadata fields
# package the operation
op = {'op': 'select', 'md': {}, 'fields': ['blarg', 'order'],
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, ['blarg', 'order'], None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
if len(payload) > 0:
assert (sorted(list(payload[list(payload.keys())[0]].keys())) ==
['blarg', 'order'])
assert sorted(payload.keys()) == ts_keys
# not present based on how time series were generated
# package the operation
op = {'op': 'select', 'md': {'order': 10}, 'fields': None,
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({'order': 10}, None, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert len(payload) == 0
# not present based on how time series were generated
# package the operation
op = {'op': 'select', 'md': {'blarg': 0}, 'fields': None,
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({'blarg': 0}, None, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert len(payload) == 0
# multiple select criteria
# not present based on how time series were generated
# package the operation
op = {'op': 'select', 'md': {'order': 10, 'blarg': 0}, 'fields': None,
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({'order': 10, 'blarg': 0}, None, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert len(payload) == 0
# operator select criteria
# not present based on how time series were generated
# package the operation
op = {'op': 'select', 'md': {'order': {'>=': 10}}, 'fields': None,
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({'order': {'>=': 10}}, None, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert len(payload) == 0
# operator select criteria
# present based on how time series were generated
# package the operation
op = {'op': 'select', 'md': {'order': {'<': 10}}, 'fields': None,
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({'order': {'<': 10}}, None, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert len(payload) > 0
########################################
#
# test trigger operations
#
########################################
# add dummy trigger
# package the operation
op = {'op': 'add_trigger', 'proc': 'junk', 'onwhat': 'insert_ts',
'target': None, 'arg': 'db:one:ts'}
# test that this is packaged as expected
assert op == TSDBOp_AddTrigger('junk', 'insert_ts', None, 'db:one:ts')
# run operation
result = protocol._add_trigger(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
# add stats trigger
# package the operation
op = {'op': 'add_trigger', 'proc': 'stats', 'onwhat': 'insert_ts',
'target': ['mean', 'std'], 'arg': None}
# test that this is packaged as expected
assert op == TSDBOp_AddTrigger('stats', 'insert_ts', ['mean', 'std'], None)
# run operation
result = protocol._add_trigger(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
# try to add a trigger on an invalid event
# package the operation
op = {'op': 'add_trigger', 'proc': 'junk', 'onwhat': 'stuff_happening',
'target': None, 'arg': 'db:one:ts'}
# test that this is packaged as expected
assert op == TSDBOp_AddTrigger(
'junk', 'stuff_happening', None, 'db:one:ts')
# run operation
result = protocol._add_trigger(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.INVALID_OPERATION
assert payload is None
# try to add a trigger to an invalid field
# package the operation
op = {'op': 'add_trigger', 'proc': 'stats', 'onwhat': 'insert_ts',
'target': ['mean', 'wrong_one'], 'arg': None}
# test that this is packaged as expected
assert op == TSDBOp_AddTrigger(
'stats', 'insert_ts', ['mean', 'wrong_one'], None)
# run operation
result = protocol._add_trigger(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.INVALID_OPERATION
assert payload is None
# try to remove a trigger that doesn't exist
# package the operation
op = {'op': 'remove_trigger', 'proc': 'not_here', 'onwhat': 'insert_ts',
'target': None}
# test that this is packaged as expected
assert op == TSDBOp_RemoveTrigger('not_here', 'insert_ts', None)
# run operation
result = protocol._remove_trigger(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.INVALID_OPERATION
assert payload is None
# try to remove a trigger on an invalid event
# package the operation
op = {'op': 'remove_trigger', 'proc': 'stats', 'onwhat': 'stuff_happening',
'target': None}
# test that this is packaged as expected
assert op == TSDBOp_RemoveTrigger('stats', 'stuff_happening', None)
# run operation
result = protocol._remove_trigger(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.INVALID_OPERATION
assert payload is None
# try to remove a trigger associated with a particular target
# (used to delete vantage point representation)
# package the operation
op = {'op': 'remove_trigger', 'proc': 'stats', 'onwhat': 'insert_ts',
'target': ['mean', 'std']}
# test that this is packaged as expected
assert op == TSDBOp_RemoveTrigger('stats', 'insert_ts', ['mean', 'std'])
# run operation
result = protocol._remove_trigger(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
# add trigger back in
# package the operation
op = {'op': 'add_trigger', 'proc': 'stats', 'onwhat': 'insert_ts',
'target': ['mean', 'std'], 'arg': None}
# test that this is packaged as expected
assert op == TSDBOp_AddTrigger(
'stats', 'insert_ts', ['mean', 'std'], None)
# run operation
result = protocol._add_trigger(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
# check all triggers
triggers = [k for k, v in server.db.triggers.items() if len(v) > 0]
assert triggers == ['insert_ts']
assert (sorted([t[0] for t in server.db.triggers['insert_ts']]) ==
['junk', 'stats'])
########################################
#
# test augmented select operations
#
########################################
# remove trigger
op = {'op': 'remove_trigger', 'proc': 'stats', 'onwhat': 'insert_ts',
'target': None}
# test that this is packaged as expected
assert op == TSDBOp_RemoveTrigger('stats', 'insert_ts', None)
# run operation
result = protocol._remove_trigger(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
# add a new time series
# package the operation
op = {'op': 'insert_ts', 'pk': 'test', 'ts': tsdict['ts-1']}
# test that this is packaged as expected
assert op == TSDBOp_InsertTS('test', tsdict['ts-1'])
# run operation
result = protocol._insert_ts(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
########################################
#
# test augmented select
#
########################################
# package the operation
op = {'op': 'augmented_select', 'proc': 'stats', 'target': ['mean', 'std'],
'arg': None, 'md': {'pk': 'test'}, 'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_AugmentedSelect(
'stats', ['mean', 'std'], None, {'pk': 'test'}, None)
# run operation
result = protocol._augmented_select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert len(payload) == 1
payload_fields = list(payload[list(payload.keys())[0]].keys())
assert 'mean' in payload_fields
assert 'std' in payload_fields
assert (np.round(payload['test']['mean'], 4) ==
np.round(tsdict['ts-1'].mean(), 4))
assert (np.round(payload['test']['std'], 4) ==
np.round(tsdict['ts-1'].std(), 4))
########################################
#
# test vantage point representation
#
########################################
# pick a new time series to add as a vantage point
# randomly choose time series as vantage points
vpkeys = list(np.random.choice(ts_keys, size=num_vps, replace=False))
distkeys = sorted(['d_vp_' + i for i in vpkeys])
# add the time series as vantage points
for i in range(num_vps):
# package the operation
op = {'op': 'insert_vp', 'pk': vpkeys[i]}
# test that this is packaged as expected
assert op == TSDBOp_InsertVP(vpkeys[i])
# run operation
result = protocol._insert_vp(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
# check that the distance fields are now in the database
# package the operation
op = {'op': 'select', 'md': {}, 'fields': distkeys, 'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, distkeys, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
if len(payload) > 0:
assert (sorted(list(payload[list(payload.keys())[0]].keys())) ==
distkeys)
# try to add a time series that doesn't exist as a vantage point
# package the operation
op = {'op': 'insert_vp', 'pk': 'mistake'}
# test that this is packaged as expected
assert op == TSDBOp_InsertVP('mistake')
# run operation
result = protocol._insert_vp(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.INVALID_KEY
assert payload is None
# remove them all
for i in range(num_vps):
# package the operation
op = {'op': 'delete_vp', 'pk': vpkeys[i]}
# test that this is packaged as expected
assert op == TSDBOp_DeleteVP(vpkeys[i])
# run operation
result = protocol._delete_vp(op)
# # unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
# check that the distance fields are now not in the database
# package the operation
op = {'op': 'select', 'md': {}, 'fields': distkeys, 'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({}, distkeys, None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
if len(payload) > 0:
assert (list(payload[list(payload.keys())[0]].keys()) == [])
# try to delete a vantage point that doesn't exist
# package the operation
op = {'op': 'delete_vp', 'pk': 'mistake'}
# test that this is packaged as expected
assert op == TSDBOp_DeleteVP('mistake')
# run operation
result = protocol._delete_vp(op)
# # unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.INVALID_KEY
assert payload is None
# add them back in
for i in range(num_vps):
# package the operation
op = {'op': 'insert_vp', 'pk': vpkeys[i]}
# test that this is packaged as expected
assert op == TSDBOp_InsertVP(vpkeys[i])
# run operation
result = protocol._insert_vp(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload is None
########################################
#
# test vantage point similarity search
#
########################################
# first create a query time series
_, query = tsmaker(np.random.uniform(low=0.0, high=1.0),
np.random.uniform(low=0.05, high=0.4),
np.random.uniform(low=0.05, high=0.2))
# single closest time series
# package the operation
op = {'op': 'vp_similarity_search', 'query': query, 'top': 1}
# test that this is packaged as expected
assert op == TSDBOp_VPSimilaritySearch(query, 1)
# run operation
result = protocol._vp_similarity_search(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert len(payload) == 1
# 5 closest time series
# package the operation
op = {'op': 'vp_similarity_search', 'query': query, 'top': 5}
# test that this is packaged as expected
assert op == TSDBOp_VPSimilaritySearch(query, 5)
# run operation
result = protocol._vp_similarity_search(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert len(payload) <= 5
# run similarity search on an existing time series - should return itself
# pick a random time series
idx = np.random.choice(list(tsdict.keys()))
# package the operation
op = {'op': 'vp_similarity_search', 'query': tsdict[idx], 'top': 1}
# test that this is packaged as expected
assert op == TSDBOp_VPSimilaritySearch(tsdict[idx], 1)
# run operation
result = protocol._vp_similarity_search(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
# recover the time series for comparison
closest_ts = list(payload)[0]
# package the operation
op = {'op': 'select', 'md': {'pk': closest_ts}, 'fields': ['ts'],
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({'pk': closest_ts}, ['ts'], None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload[closest_ts]['ts'] == tsdict[idx]
########################################
#
# test isax functions
#
########################################
# run similarity search on an existing time series - should return itself
# pick a random time series
idx = np.random.choice(list(tsdict.keys()))
# package the operation
op = {'op': 'isax_similarity_search', 'query': tsdict[idx]}
# test that this is packaged as expected
assert op == TSDBOp_iSAXSimilaritySearch(tsdict[idx])
# run operation
result = protocol._isax_similarity_search(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
# recover the time series for comparison
closest_ts = list(payload)[0]
# package the operation
op = {'op': 'select', 'md': {'pk': closest_ts}, 'fields': ['ts'],
'additional': None}
# test that this is packaged as expected
assert op == TSDBOp_Select({'pk': closest_ts}, ['ts'], None)
# run operation
result = protocol._select(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert status == TSDBStatus.OK
assert payload[closest_ts]['ts'] == tsdict[idx]
# visualize tree representation
# package the operation
op = {'op': 'isax_tree'}
# test that this is packaged as expected
assert op == TSDBOp_iSAXTree()
# run operation
result = protocol._isax_tree(op)
# unpack results
status, payload = result['status'], result['payload']
# test that return values are as expected
assert isinstance(payload, str)
########################################
#
# tear down
#
########################################
db = None
server = None
protocol = None
| {
"content_hash": "75827c3f05f7433244f0bb7cd55f74b7",
"timestamp": "",
"source": "github",
"line_count": 876,
"max_line_length": 79,
"avg_line_length": 34.04680365296804,
"alnum_prop": 0.5909807208717519,
"repo_name": "Mynti207/cs207project",
"id": "22d3323eb2bb6351a88656b13a8d99c4a0b5d5f2",
"size": "29825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tsdb_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39173"
}
],
"symlink_target": ""
} |
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
class CompoundFileError(IOError):
"""
Base class for exceptions arising from reading compound documents.
"""
class CompoundFileHeaderError(CompoundFileError):
"""
Base class for exceptions caused by issues in the document header.
"""
class CompoundFileMasterFatError(CompoundFileError):
"""
Base class for exceptions caused by issues in the master FAT.
"""
class CompoundFileNormalFatError(CompoundFileError):
"""
Base class for exceptions caused by issues in the normal FAT.
"""
class CompoundFileMiniFatError(CompoundFileError):
"""
Base class for exceptions caused by issues in the mini FAT.
"""
class CompoundFileDirEntryError(CompoundFileError):
"""
Base class for errors caused by issues in directory entries.
"""
class CompoundFileInvalidMagicError(CompoundFileHeaderError):
"""
Error raised when a compound document has an invalid magic number.
"""
class CompoundFileInvalidBomError(CompoundFileHeaderError):
"""
Error raised when a compound document is anything other than little-endian.
"""
class CompoundFileLargeNormalFatError(CompoundFileNormalFatError):
"""
Error raised when the document has an excessively large FAT.
"""
class CompoundFileNormalLoopError(CompoundFileNormalFatError):
"""
Error raised when a cycle is detected in a FAT chain.
"""
class CompoundFileLargeMiniFatError(CompoundFileMiniFatError):
"""
Error raised when the document has an excessively large mini FAT.
"""
class CompoundFileNoMiniFatError(CompoundFileMiniFatError):
"""
Error raised when the document has no mini-FAT, but an attempt is made
to open a file that should belong to the mini-FAT.
"""
class CompoundFileMasterLoopError(CompoundFileMasterFatError):
"""
Error raised when a loop is detected in the master FAT.
"""
class CompoundFileDirLoopError(CompoundFileDirEntryError):
"""
Error raised when a loop is detected in the directory hierarchy.
"""
class CompoundFileNotFoundError(CompoundFileError):
"""
Error raised when a named stream/storage isn't found.
"""
class CompoundFileNotStreamError(CompoundFileError):
"""
Error raised when an attempt is made to open a storage.
"""
class CompoundFileWarning(Warning):
"""
Base class for warnings arising from reading compound documents.
"""
class CompoundFileHeaderWarning(CompoundFileWarning):
"""
Base class for warnings about header attributes.
"""
class CompoundFileMasterFatWarning(CompoundFileWarning):
"""
Base class for warnings about master FAT issues.
"""
class CompoundFileNormalFatWarning(CompoundFileWarning):
"""
Base class for warnings about normal FAT issues.
"""
class CompoundFileMiniFatWarning(CompoundFileWarning):
"""
Base class for warnings about mini FAT issues.
"""
class CompoundFileDirEntryWarning(CompoundFileWarning):
"""
Base class for warnings about directory entry issues.
"""
class CompoundFileSectorSizeWarning(CompoundFileHeaderWarning):
"""
Base class for warnings about strange sector sizes in compound documents.
"""
class CompoundFileVersionWarning(CompoundFileHeaderWarning):
"""
Warnings about unknown library versions.
"""
class CompoundFileMasterSectorWarning(CompoundFileNormalFatWarning):
"""
Warnings about mis-marked master FAT sectors.
"""
class CompoundFileNormalSectorWarning(CompoundFileNormalFatWarning):
"""
Warnings about mis-marked normal FAT sectors.
"""
class CompoundFileDirNameWarning(CompoundFileDirEntryWarning):
"""
Warnings about invalid directory entry names.
"""
class CompoundFileDirTypeWarning(CompoundFileDirEntryWarning):
"""
Warnings about invalid directory entry types.
"""
class CompoundFileDirIndexWarning(CompoundFileDirEntryWarning):
"""
Warnings about directory sibling or child indexes.
"""
class CompoundFileDirTimeWarning(CompoundFileDirEntryWarning):
"""
Warnings about directory entry timestamps.
"""
class CompoundFileDirSectorWarning(CompoundFileDirEntryWarning):
"""
Warnings about directory start sectors.
"""
class CompoundFileDirSizeWarning(CompoundFileDirEntryWarning):
"""
Warnings about directory size entries.
"""
class CompoundFileTruncatedWarning(CompoundFileWarning):
"""
Warning about a truncated compound file.
"""
class CompoundFileEmulationWarning(CompoundFileWarning):
"""
Warning about using an emulated memory-map.
"""
| {
"content_hash": "dbf7910cef386de95eec568f4096cd4a",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 79,
"avg_line_length": 26.176795580110497,
"alnum_prop": 0.7283663993246096,
"repo_name": "waveform80/compoundfiles",
"id": "aa6192b705cc0d348e41a48b729a8559a19bc109",
"size": "5985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compoundfiles/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6805"
},
{
"name": "Python",
"bytes": "103009"
}
],
"symlink_target": ""
} |
import os
import re
import json
import base64
import logging
import datetime
import time
import copy
import decimal
import cgi
import numpy
import pymongo
from lib import config, util, util_litecoin
D = decimal.Decimal
def get_market_price(price_data, vol_data):
assert len(price_data) == len(vol_data)
assert len(price_data) <= config.MARKET_PRICE_DERIVE_NUM_POINTS
market_price = numpy.average(price_data, weights=vol_data)
return market_price
def get_market_price_summary(asset1, asset2, with_last_trades=0, start_dt=None, end_dt=None):
"""Gets a synthesized trading "market price" for a specified asset pair (if available), as well as additional info.
If no price is available, False is returned.
"""
mongo_db = config.mongo_db
if not end_dt:
end_dt = datetime.datetime.utcnow()
if not start_dt:
start_dt = end_dt - datetime.timedelta(days=10) #default to 10 days in the past
#look for the last max 6 trades within the past 10 day window
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
base_asset_info = mongo_db.tracked_assets.find_one({'asset': base_asset})
quote_asset_info = mongo_db.tracked_assets.find_one({'asset': quote_asset})
if not isinstance(with_last_trades, int) or with_last_trades < 0 or with_last_trades > 30:
raise Exception("Invalid with_last_trades")
if not base_asset_info or not quote_asset_info:
raise Exception("Invalid asset(s)")
last_trades = mongo_db.trades.find({
"base_asset": base_asset,
"quote_asset": quote_asset,
'block_time': { "$gte": start_dt, "$lte": end_dt }
},
{'_id': 0, 'block_index': 1, 'block_time': 1, 'unit_price': 1, 'base_quantity_normalized': 1, 'quote_quantity_normalized': 1}
).sort("block_time", pymongo.DESCENDING).limit(max(config.MARKET_PRICE_DERIVE_NUM_POINTS, with_last_trades))
if not last_trades.count():
return None #no suitable trade data to form a market price (return None, NOT False here)
last_trades = list(last_trades)
last_trades.reverse() #from newest to oldest
market_price = get_market_price(
[last_trades[i]['unit_price'] for i in xrange(min(len(last_trades), config.MARKET_PRICE_DERIVE_NUM_POINTS))],
[(last_trades[i]['base_quantity_normalized'] + last_trades[i]['quote_quantity_normalized']) for i in xrange(min(len(last_trades), config.MARKET_PRICE_DERIVE_NUM_POINTS))])
result = {
'market_price': float(D(market_price)),
'base_asset': base_asset,
'quote_asset': quote_asset,
}
if with_last_trades:
#[0]=block_time, [1]=unit_price, [2]=base_quantity_normalized, [3]=quote_quantity_normalized, [4]=block_index
result['last_trades'] = [[
t['block_time'],
t['unit_price'],
t['base_quantity_normalized'],
t['quote_quantity_normalized'],
t['block_index']
] for t in last_trades]
else:
result['last_trades'] = []
return result
def calc_inverse(quantity):
return float( (D(1) / D(quantity) ))
def calc_price_change(open, close):
return float((D(100) * (D(close) - D(open)) / D(open)))
def get_price_primatives(start_dt=None, end_dt=None):
mps_xpt_ltc = get_market_price_summary(config.XPT, config.LTC, start_dt=start_dt, end_dt=end_dt)
xpt_ltc_price = mps_xpt_ltc['market_price'] if mps_xpt_ltc else None # == XPT/LTC
ltc_xpt_price = calc_inverse(mps_xpt_ltc['market_price']) if mps_xpt_ltc else None #LTC/XPT
return mps_xpt_ltc, xpt_ltc_price, ltc_xpt_price
def get_asset_info(asset, at_dt=None):
mongo_db = config.mongo_db
asset_info = mongo_db.tracked_assets.find_one({'asset': asset})
if asset not in (config.XPT, config.LTC) and at_dt and asset_info['_at_block_time'] > at_dt:
#get the asset info at or before the given at_dt datetime
for e in reversed(asset_info['_history']): #newest to oldest
if e['_at_block_time'] <= at_dt:
asset_info = e
break
else: #asset was created AFTER at_dt
asset_info = None
if asset_info is None: return None
assert asset_info['_at_block_time'] <= at_dt
#modify some of the properties of the returned asset_info for LTC and XPT
if asset == config.LTC:
if at_dt:
start_block_index, end_block_index = util.get_block_indexes_for_dates(end_dt=at_dt)
asset_info['total_issued'] = util_litecoin.get_ltc_supply(normalize=False, at_block_index=end_block_index)
asset_info['total_issued_normalized'] = util_litecoin.normalize_quantity(asset_info['total_issued'])
else:
asset_info['total_issued'] = util_litecoin.get_ltc_supply(normalize=False)
asset_info['total_issued_normalized'] = util_litecoin.normalize_quantity(asset_info['total_issued'])
elif asset == config.XPT:
#BUG: this does not take end_dt (if specified) into account. however, the deviation won't be too big
# as XPT doesn't deflate quickly at all, and shouldn't matter that much since there weren't any/much trades
# before the end of the burn period (which is what is involved with how we use at_dt with currently)
asset_info['total_issued'] = util.call_jsonrpc_api("get_xpt_supply", abort_on_error=True)['result']
asset_info['total_issued_normalized'] = util_litecoin.normalize_quantity(asset_info['total_issued'])
if not asset_info:
raise Exception("Invalid asset: %s" % asset)
return asset_info
def get_xpt_ltc_price_info(asset, mps_xpt_ltc, xpt_ltc_price, ltc_xpt_price, with_last_trades=0, start_dt=None, end_dt=None):
if asset not in [config.LTC, config.XPT]:
#get price data for both the asset with XPT, as well as LTC
price_summary_in_xpt = get_market_price_summary(asset, config.XPT,
with_last_trades=with_last_trades, start_dt=start_dt, end_dt=end_dt)
price_summary_in_ltc = get_market_price_summary(asset, config.LTC,
with_last_trades=with_last_trades, start_dt=start_dt, end_dt=end_dt)
#aggregated (averaged) price (expressed as XPT) for the asset on both the XPT and LTC markets
if price_summary_in_xpt: # no trade data
price_in_xpt = price_summary_in_xpt['market_price']
if xpt_ltc_price:
aggregated_price_in_xpt = float(((D(price_summary_in_xpt['market_price']) + D(xpt_ltc_price)) / D(2)))
else: aggregated_price_in_xpt = None
else:
price_in_xpt = None
aggregated_price_in_xpt = None
if price_summary_in_ltc: # no trade data
price_in_ltc = price_summary_in_ltc['market_price']
if ltc_xpt_price:
aggregated_price_in_ltc = float(((D(price_summary_in_ltc['market_price']) + D(ltc_xpt_price)) / D(2)))
else: aggregated_price_in_ltc = None
else:
aggregated_price_in_ltc = None
price_in_ltc = None
else:
#here we take the normal XPT/LTC pair, and invert it to LTC/XPT, to get XPT's data in terms of a LTC base
# (this is the only area we do this, as LTC/XPT is NOT standard pair ordering)
price_summary_in_xpt = mps_xpt_ltc #might be None
price_summary_in_ltc = copy.deepcopy(mps_xpt_ltc) if mps_xpt_ltc else None #must invert this -- might be None
if price_summary_in_ltc:
price_summary_in_ltc['market_price'] = calc_inverse(price_summary_in_ltc['market_price'])
price_summary_in_ltc['base_asset'] = config.LTC
price_summary_in_ltc['quote_asset'] = config.XPT
for i in xrange(len(price_summary_in_ltc['last_trades'])):
#[0]=block_time, [1]=unit_price, [2]=base_quantity_normalized, [3]=quote_quantity_normalized, [4]=block_index
price_summary_in_ltc['last_trades'][i][1] = calc_inverse(price_summary_in_ltc['last_trades'][i][1])
price_summary_in_ltc['last_trades'][i][2], price_summary_in_ltc['last_trades'][i][3] = \
price_summary_in_ltc['last_trades'][i][3], price_summary_in_ltc['last_trades'][i][2] #swap
if asset == config.XPT:
price_in_xpt = 1.0
price_in_ltc = price_summary_in_ltc['market_price'] if price_summary_in_ltc else None
aggregated_price_in_xpt = 1.0
aggregated_price_in_ltc = ltc_xpt_price #might be None
else:
assert asset == config.LTC
price_in_xpt = price_summary_in_xpt['market_price'] if price_summary_in_xpt else None
price_in_ltc = 1.0
aggregated_price_in_xpt = xpt_ltc_price #might be None
aggregated_price_in_ltc = 1.0
return (price_summary_in_xpt, price_summary_in_ltc, price_in_xpt, price_in_ltc, aggregated_price_in_xpt, aggregated_price_in_ltc)
def calc_market_cap(asset_info, price_in_xpt, price_in_ltc):
market_cap_in_xpt = float( (D(asset_info['total_issued_normalized']) / D(price_in_xpt))) if price_in_xpt else None
market_cap_in_ltc = float( (D(asset_info['total_issued_normalized']) / D(price_in_ltc))) if price_in_ltc else None
return market_cap_in_xpt, market_cap_in_ltc
def compile_summary_market_info(asset, mps_xpt_ltc, xpt_ltc_price, ltc_xpt_price):
"""Returns information related to capitalization, volume, etc for the supplied asset(s)
NOTE: in_ltc == base asset is LTC, in_xpt == base asset is XPT
@param assets: A list of one or more assets
"""
asset_info = get_asset_info(asset)
(price_summary_in_xpt, price_summary_in_ltc, price_in_xpt, price_in_ltc, aggregated_price_in_xpt, aggregated_price_in_ltc
) = get_xpt_ltc_price_info(asset, mps_xpt_ltc, xpt_ltc_price, ltc_xpt_price, with_last_trades=30)
market_cap_in_xpt, market_cap_in_ltc = calc_market_cap(asset_info, price_in_xpt, price_in_ltc)
return {
'price_in_{}'.format(config.XPT.lower()): price_in_xpt, #current price of asset vs XPT (e.g. how many units of asset for 1 unit XPT)
'price_in_{}'.format(config.LTC.lower()): price_in_ltc, #current price of asset vs LTC (e.g. how many units of asset for 1 unit LTC)
'price_as_{}'.format(config.XPT.lower()): calc_inverse(price_in_xpt) if price_in_xpt else None, #current price of asset AS XPT
'price_as_{}'.format(config.LTC.lower()): calc_inverse(price_in_ltc) if price_in_ltc else None, #current price of asset AS LTC
'aggregated_price_in_{}'.format(config.XPT.lower()): aggregated_price_in_xpt,
'aggregated_price_in_{}'.format(config.LTC.lower()): aggregated_price_in_ltc,
'aggregated_price_as_{}'.format(config.XPT.lower()): calc_inverse(aggregated_price_in_xpt) if aggregated_price_in_xpt else None,
'aggregated_price_as_{}'.format(config.LTC.lower()): calc_inverse(aggregated_price_in_ltc) if aggregated_price_in_ltc else None,
'total_supply': asset_info['total_issued_normalized'],
'market_cap_in_{}'.format(config.XPT.lower()): market_cap_in_xpt,
'market_cap_in_{}'.format(config.LTC.lower()): market_cap_in_ltc,
}
def compile_24h_market_info(asset):
asset_data = {}
start_dt_1d = datetime.datetime.utcnow() - datetime.timedelta(days=1)
mongo_db = config.mongo_db
#perform aggregation to get 24h statistics
#TOTAL volume and count across all trades for the asset (on ALL markets, not just XPT and LTC pairings)
_24h_vols = {'vol': 0, 'count': 0}
_24h_vols_as_base = mongo_db.trades.aggregate([
{"$match": {
"base_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_vols_as_base = {} if not _24h_vols_as_base['ok'] \
or not len(_24h_vols_as_base['result']) else _24h_vols_as_base['result'][0]
_24h_vols_as_quote = mongo_db.trades.aggregate([
{"$match": {
"quote_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"quote_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"vol": {"$sum": "quote_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_vols_as_quote = {} if not _24h_vols_as_quote['ok'] \
or not len(_24h_vols_as_quote['result']) else _24h_vols_as_quote['result'][0]
_24h_vols['vol'] = _24h_vols_as_base.get('vol', 0) + _24h_vols_as_quote.get('vol', 0)
_24h_vols['count'] = _24h_vols_as_base.get('count', 0) + _24h_vols_as_quote.get('count', 0)
#XPT market volume with stats
if asset != config.XPT:
_24h_ohlc_in_xpt = mongo_db.trades.aggregate([
{"$match": {
"base_asset": config.XPT,
"quote_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"open": {"$first": "$unit_price"},
"high": {"$max": "$unit_price"},
"low": {"$min": "$unit_price"},
"close": {"$last": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_ohlc_in_xpt = {} if not _24h_ohlc_in_xpt['ok'] \
or not len(_24h_ohlc_in_xpt['result']) else _24h_ohlc_in_xpt['result'][0]
if _24h_ohlc_in_xpt: del _24h_ohlc_in_xpt['_id']
else:
_24h_ohlc_in_xpt = {}
#LTC market volume with stats
if asset != config.LTC:
_24h_ohlc_in_ltc = mongo_db.trades.aggregate([
{"$match": {
"base_asset": config.LTC,
"quote_asset": asset,
"block_time": {"$gte": start_dt_1d } }},
{"$project": {
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": 1,
"open": {"$first": "$unit_price"},
"high": {"$max": "$unit_price"},
"low": {"$min": "$unit_price"},
"close": {"$last": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}}
])
_24h_ohlc_in_ltc = {} if not _24h_ohlc_in_ltc['ok'] \
or not len(_24h_ohlc_in_ltc['result']) else _24h_ohlc_in_ltc['result'][0]
if _24h_ohlc_in_ltc: del _24h_ohlc_in_ltc['_id']
else:
_24h_ohlc_in_ltc = {}
return {
'24h_summary': _24h_vols,
#^ total quantity traded of that asset in all markets in last 24h
'24h_ohlc_in_{}'.format(config.XPT.lower()): _24h_ohlc_in_xpt,
#^ quantity of asset traded with LTC in last 24h
'24h_ohlc_in_{}'.format(config.LTC.lower()): _24h_ohlc_in_ltc,
#^ quantity of asset traded with XPT in last 24h
'24h_vol_price_change_in_{}'.format(config.XPT.lower()): calc_price_change(_24h_ohlc_in_xpt['open'], _24h_ohlc_in_xpt['close'])
if _24h_ohlc_in_xpt else None,
#^ aggregated price change from 24h ago to now, expressed as a signed float (e.g. .54 is +54%, -1.12 is -112%)
'24h_vol_price_change_in_{}'.format(config.LTC.lower()): calc_price_change(_24h_ohlc_in_ltc['open'], _24h_ohlc_in_ltc['close'])
if _24h_ohlc_in_ltc else None,
}
def compile_7d_market_info(asset):
mongo_db = config.mongo_db
start_dt_7d = datetime.datetime.utcnow() - datetime.timedelta(days=7)
#get XPT and LTC market summarized trades over a 7d period (quantize to hour long slots)
_7d_history_in_xpt = None # xpt/asset market (or xpt/ltc for xpt or ltc)
_7d_history_in_ltc = None # ltc/asset market (or ltc/xpt for xpt or ltc)
if asset not in [config.LTC, config.XPT]:
for a in [config.XPT, config.LTC]:
_7d_history = mongo_db.trades.aggregate([
{"$match": {
"base_asset": a,
"quote_asset": asset,
"block_time": {"$gte": start_dt_7d }
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$sort": {"block_time": pymongo.ASCENDING}},
{"$group": {
"_id": {"year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"price": {"$avg": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
}},
])
_7d_history = [] if not _7d_history['ok'] else _7d_history['result']
if a == config.XPT: _7d_history_in_xpt = _7d_history
else: _7d_history_in_ltc = _7d_history
else: #get the XPT/LTC market and invert for LTC/XPT (_7d_history_in_ltc)
_7d_history = mongo_db.trades.aggregate([
{"$match": {
"base_asset": config.XPT,
"quote_asset": config.LTC,
"block_time": {"$gte": start_dt_7d }
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$sort": {"block_time": pymongo.ASCENDING}},
{"$group": {
"_id": {"year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"price": {"$avg": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
}},
])
_7d_history = [] if not _7d_history['ok'] else _7d_history['result']
_7d_history_in_xpt = _7d_history
_7d_history_in_ltc = copy.deepcopy(_7d_history_in_xpt)
for i in xrange(len(_7d_history_in_ltc)):
_7d_history_in_ltc[i]['price'] = calc_inverse(_7d_history_in_ltc[i]['price'])
_7d_history_in_ltc[i]['vol'] = calc_inverse(_7d_history_in_ltc[i]['vol'])
for l in [_7d_history_in_xpt, _7d_history_in_ltc]:
for e in l: #convert our _id field out to be an epoch ts (in ms), and delete _id
e['when'] = time.mktime(datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day'], e['_id']['hour']).timetuple()) * 1000
del e['_id']
return {
'7d_history_in_{}'.format(config.XPT.lower()): [[e['when'], e['price']] for e in _7d_history_in_xpt],
'7d_history_in_{}'.format(config.LTC.lower()): [[e['when'], e['price']] for e in _7d_history_in_ltc],
}
def compile_asset_pair_market_info():
"""Compiles the pair-level statistics that show on the View Prices page of paytokenswallet, for instance"""
#loop through all open orders, and compile a listing of pairs, with a count of open orders for each pair
mongo_db = config.mongo_db
end_dt = datetime.datetime.utcnow()
start_dt = end_dt - datetime.timedelta(days=1)
start_block_index, end_block_index = util.get_block_indexes_for_dates(start_dt=start_dt, end_dt=end_dt)
open_orders = util.call_jsonrpc_api("get_orders",
{ 'filters': [
{'field': 'give_remaining', 'op': '>', 'value': 0},
{'field': 'get_remaining', 'op': '>', 'value': 0},
{'field': 'fee_required_remaining', 'op': '>=', 'value': 0},
{'field': 'fee_provided_remaining', 'op': '>=', 'value': 0},
],
'status': 'open',
'show_expired': False,
}, abort_on_error=True)['result']
pair_data = {}
asset_info = {}
def get_price(base_quantity_normalized, quote_quantity_normalized):
return float(D(quote_quantity_normalized / base_quantity_normalized ))
#COMPOSE order depth, lowest ask, and highest bid column data
for o in open_orders:
(base_asset, quote_asset) = util.assets_to_asset_pair(o['give_asset'], o['get_asset'])
pair = '%s/%s' % (base_asset, quote_asset)
base_asset_info = asset_info.get(base_asset, mongo_db.tracked_assets.find_one({ 'asset': base_asset }))
if base_asset not in asset_info: asset_info[base_asset] = base_asset_info
quote_asset_info = asset_info.get(quote_asset, mongo_db.tracked_assets.find_one({ 'asset': quote_asset }))
if quote_asset not in asset_info: asset_info[quote_asset] = quote_asset_info
pair_data.setdefault(pair, {'open_orders_count': 0, 'lowest_ask': None, 'highest_bid': None,
'completed_trades_count': 0, 'vol_base': 0, 'vol_quote': 0})
#^ highest ask = open order selling base, highest bid = open order buying base
#^ we also initialize completed_trades_count, vol_base, vol_quote because every pair inited here may
# not have cooresponding data out of the trades_data_by_pair aggregation below
pair_data[pair]['open_orders_count'] += 1
base_quantity_normalized = util_litecoin.normalize_quantity(o['give_quantity'] if base_asset == o['give_asset'] else o['get_quantity'], base_asset_info['divisible'])
quote_quantity_normalized = util_litecoin.normalize_quantity(o['give_quantity'] if quote_asset == o['give_asset'] else o['get_quantity'], quote_asset_info['divisible'])
order_price = get_price(base_quantity_normalized, quote_quantity_normalized)
if base_asset == o['give_asset']: #selling base
if pair_data[pair]['lowest_ask'] is None or order_price < pair_data[pair]['lowest_ask']:
pair_data[pair]['lowest_ask'] = order_price
elif base_asset == o['get_asset']: #buying base
if pair_data[pair]['highest_bid'] is None or order_price > pair_data[pair]['highest_bid']:
pair_data[pair]['highest_bid'] = order_price
#COMPOSE volume data (in XPT and LTC), and % change data
#loop through all trade volume over the past 24h, and match that to the open orders
trades_data_by_pair = mongo_db.trades.aggregate([
{"$match": {
"block_time": {"$gte": start_dt, "$lte": end_dt } }
},
{"$project": {
"base_asset": 1,
"quote_asset": 1,
"base_quantity_normalized": 1, #to derive base volume
"quote_quantity_normalized": 1 #to derive quote volume
}},
{"$group": {
"_id": {"base_asset": "$base_asset", "quote_asset": "$quote_asset"},
"vol_base": {"$sum": "$base_quantity_normalized"},
"vol_quote": {"$sum": "$quote_quantity_normalized"},
"count": {"$sum": 1},
}}
])
trades_data_by_pair = [] if not trades_data_by_pair['ok'] else trades_data_by_pair['result']
for e in trades_data_by_pair:
pair = '%s/%s' % (e['_id']['base_asset'], e['_id']['quote_asset'])
pair_data.setdefault(pair, {'open_orders_count': 0, 'lowest_ask': None, 'highest_bid': None})
#^ initialize an empty pair in the event there are no open orders for that pair, but there ARE completed trades for it
pair_data[pair]['completed_trades_count'] = e['count']
pair_data[pair]['vol_base'] = e['vol_base']
pair_data[pair]['vol_quote'] = e['vol_quote']
#compose price data, relative to LTC and XPT
mps_xpt_ltc, xpt_ltc_price, ltc_xpt_price = get_price_primatives()
for pair, e in pair_data.iteritems():
base_asset, quote_asset = pair.split('/')
_24h_vol_in_ltc = None
_24h_vol_in_xpt = None
#derive asset price data, expressed in LTC and XPT, for the given volumes
if base_asset == config.XPT:
_24h_vol_in_xpt = e['vol_base']
_24h_vol_in_ltc = util_litecoin.round_out(e['vol_base'] * xpt_ltc_price) if xpt_ltc_price else 0
elif base_asset == config.LTC:
_24h_vol_in_xpt = util_litecoin.round_out(e['vol_base'] * ltc_xpt_price) if ltc_xpt_price else 0
_24h_vol_in_ltc = e['vol_base']
else: #base is not XPT or LTC
price_summary_in_xpt, price_summary_in_ltc, price_in_xpt, price_in_ltc, aggregated_price_in_xpt, aggregated_price_in_ltc = \
get_xpt_ltc_price_info(base_asset, mps_xpt_ltc, xpt_ltc_price, ltc_xpt_price, with_last_trades=0, start_dt=start_dt, end_dt=end_dt)
if price_in_xpt:
_24h_vol_in_xpt = util_litecoin.round_out(e['vol_base'] * price_in_xpt)
if price_in_ltc:
_24h_vol_in_ltc = util_litecoin.round_out(e['vol_base'] * price_in_ltc)
if _24h_vol_in_xpt is None or _24h_vol_in_ltc is None:
#the base asset didn't have price data against LTC or XPT, or both...try against the quote asset instead
price_summary_in_xpt, price_summary_in_ltc, price_in_xpt, price_in_ltc, aggregated_price_in_xpt, aggregated_price_in_ltc = \
get_xpt_ltc_price_info(quote_asset, mps_xpt_ltc, xpt_ltc_price, ltc_xpt_price, with_last_trades=0, start_dt=start_dt, end_dt=end_dt)
if _24h_vol_in_xpt is None and price_in_xpt:
_24h_vol_in_xpt = util_litecoin.round_out(e['vol_quote'] * price_in_xpt)
if _24h_vol_in_ltc is None and price_in_ltc:
_24h_vol_in_ltc = util_litecoin.round_out(e['vol_quote'] * price_in_ltc)
pair_data[pair]['24h_vol_in_{}'.format(config.XPT.lower())] = _24h_vol_in_xpt #might still be None
pair_data[pair]['24h_vol_in_{}'.format(config.LTC.lower())] = _24h_vol_in_ltc #might still be None
#get % change stats -- start by getting the first trade directly before the 24h period starts
prev_trade = mongo_db.trades.find({
"base_asset": base_asset,
"quote_asset": quote_asset,
"block_time": {'$lt': start_dt}}).sort('block_time', pymongo.DESCENDING).limit(1)
latest_trade = mongo_db.trades.find({
"base_asset": base_asset,
"quote_asset": quote_asset}).sort('block_time', pymongo.DESCENDING).limit(1)
if not prev_trade.count(): #no previous trade before this 24hr period
pair_data[pair]['24h_pct_change'] = None
else:
prev_trade = prev_trade[0]
latest_trade = latest_trade[0]
prev_trade_price = get_price(prev_trade['base_quantity_normalized'], prev_trade['quote_quantity_normalized'])
latest_trade_price = get_price(latest_trade['base_quantity_normalized'], latest_trade['quote_quantity_normalized'])
pair_data[pair]['24h_pct_change'] = ((latest_trade_price - prev_trade_price) / prev_trade_price) * 100
pair_data[pair]['last_updated'] = end_dt
#print "PRODUCED", pair, pair_data[pair]
mongo_db.asset_pair_market_info.update( {'base_asset': base_asset, 'quote_asset': quote_asset}, {"$set": pair_data[pair]}, upsert=True)
#remove any old pairs that were not just updated
mongo_db.asset_pair_market_info.remove({'last_updated': {'$lt': end_dt}})
logging.info("Recomposed 24h trade statistics for %i asset pairs: %s" % (len(pair_data), ', '.join(pair_data.keys())))
def compile_asset_market_info():
"""Run through all assets and compose and store market ranking information."""
mongo_db = config.mongo_db
if not config.CAUGHT_UP:
logging.warn("Not updating asset market info as CAUGHT_UP is false.")
return False
#grab the last block # we processed assets data off of
last_block_assets_compiled = mongo_db.app_config.find_one()['last_block_assets_compiled']
last_block_time_assets_compiled = util.get_block_time(last_block_assets_compiled)
#logging.debug("Comping info for assets traded since block %i" % last_block_assets_compiled)
current_block_index = config.CURRENT_BLOCK_INDEX #store now as it may change as we are compiling asset data :)
current_block_time = util.get_block_time(current_block_index)
if current_block_index == last_block_assets_compiled:
#all caught up -- call again in 10 minutes
return True
mps_xpt_ltc, xpt_ltc_price, ltc_xpt_price = get_price_primatives()
all_traded_assets = list(set(list([config.LTC, config.XPT]) + list(mongo_db.trades.find({}, {'quote_asset': 1, '_id': 0}).distinct('quote_asset'))))
#######################
#get a list of all assets with a trade within the last 24h (not necessarily just against XPT and LTC)
# ^ this is important because compiled market info has a 24h vol parameter that designates total volume for the asset across ALL pairings
start_dt_1d = datetime.datetime.utcnow() - datetime.timedelta(days=1)
assets = list(set(
list(mongo_db.trades.find({'block_time': {'$gte': start_dt_1d}}).distinct('quote_asset'))
+ list(mongo_db.trades.find({'block_time': {'$gte': start_dt_1d}}).distinct('base_asset'))
))
for asset in assets:
market_info_24h = compile_24h_market_info(asset)
mongo_db.asset_market_info.update({'asset': asset}, {"$set": market_info_24h})
#for all others (i.e. no trade in the last 24 hours), zero out the 24h trade data
non_traded_assets = list(set(all_traded_assets) - set(assets))
mongo_db.asset_market_info.update( {'asset': {'$in': non_traded_assets}}, {"$set": {
'24h_summary': {'vol': 0, 'count': 0},
'24h_ohlc_in_{}'.format(config.XPT.lower()): {},
'24h_ohlc_in_{}'.format(config.LTC.lower()): {},
'24h_vol_price_change_in_{}'.format(config.XPT.lower()): None,
'24h_vol_price_change_in_{}'.format(config.LTC.lower()): None,
}}, multi=True)
logging.info("Block: %s -- Calculated 24h stats for: %s" % (current_block_index, ', '.join(assets)))
#######################
#get a list of all assets with a trade within the last 7d up against XPT and LTC
start_dt_7d = datetime.datetime.utcnow() - datetime.timedelta(days=7)
assets = list(set(
list(mongo_db.trades.find({'block_time': {'$gte': start_dt_7d}, 'base_asset': {'$in': [config.XPT, config.LTC]}}).distinct('quote_asset'))
+ list(mongo_db.trades.find({'block_time': {'$gte': start_dt_7d}}).distinct('base_asset'))
))
for asset in assets:
market_info_7d = compile_7d_market_info(asset)
mongo_db.asset_market_info.update({'asset': asset}, {"$set": market_info_7d})
non_traded_assets = list(set(all_traded_assets) - set(assets))
mongo_db.asset_market_info.update( {'asset': {'$in': non_traded_assets}}, {"$set": {
'7d_history_in_{}'.format(config.XPT.lower()): [],
'7d_history_in_{}'.format(config.LTC.lower()): [],
}}, multi=True)
logging.info("Block: %s -- Calculated 7d stats for: %s" % (current_block_index, ', '.join(assets)))
#######################
#update summary market data for assets traded since last_block_assets_compiled
#get assets that were traded since the last check with either LTC or XPT, and update their market summary data
assets = list(set(
list(mongo_db.trades.find({'block_index': {'$gt': last_block_assets_compiled}, 'base_asset': {'$in': [config.XPT, config.LTC]}}).distinct('quote_asset'))
+ list(mongo_db.trades.find({'block_index': {'$gt': last_block_assets_compiled}}).distinct('base_asset'))
))
#update our storage of the latest market info in mongo
for asset in assets:
logging.info("Block: %s -- Updating asset market info for %s ..." % (current_block_index, asset))
summary_info = compile_summary_market_info(asset, mps_xpt_ltc, xpt_ltc_price, ltc_xpt_price)
mongo_db.asset_market_info.update( {'asset': asset}, {"$set": summary_info}, upsert=True)
#######################
#next, compile market cap historicals (and get the market price data that we can use to update assets with new trades)
#NOTE: this algoritm still needs to be fleshed out some...I'm not convinced it's laid out/optimized like it should be
#start by getting all trades from when we last compiled this data
trades = mongo_db.trades.find({'block_index': {'$gt': last_block_assets_compiled}}).sort('block_index', pymongo.ASCENDING)
trades_by_block = [] #tracks assets compiled per block, as we only want to analyze any given asset once per block
trades_by_block_mapping = {}
#organize trades by block
for t in trades:
if t['block_index'] in trades_by_block_mapping:
assert trades_by_block_mapping[t['block_index']]['block_index'] == t['block_index']
assert trades_by_block_mapping[t['block_index']]['block_time'] == t['block_time']
trades_by_block_mapping[t['block_index']]['trades'].append(t)
else:
e = {'block_index': t['block_index'], 'block_time': t['block_time'], 'trades': [t,]}
trades_by_block.append(e)
trades_by_block_mapping[t['block_index']] = e
for t_block in trades_by_block:
#reverse the tradelist per block, and ensure that we only process an asset that hasn't already been processed for this block
# (as there could be multiple trades in a single block for any specific asset). we reverse the list because
# we'd rather process a later trade for a given asset, as the market price for that will take into account
# the earlier trades on that same block for that asset, and we don't want/need multiple cap points per block
assets_in_block = {}
mps_xpt_ltc, xpt_ltc_price, ltc_xpt_price = get_price_primatives(end_dt=t_block['block_time'])
for t in reversed(t_block['trades']):
assets = []
if t['base_asset'] not in assets_in_block:
assets.append(t['base_asset'])
assets_in_block[t['base_asset']] = True
if t['quote_asset'] not in assets_in_block:
assets.append(t['quote_asset'])
assets_in_block[t['quote_asset']] = True
if not len(assets): continue
for asset in assets:
#recalculate the market cap for the asset this trade is for
asset_info = get_asset_info(asset, at_dt=t['block_time'])
(price_summary_in_xpt, price_summary_in_ltc, price_in_xpt, price_in_ltc, aggregated_price_in_xpt, aggregated_price_in_ltc
) = get_xpt_ltc_price_info(asset, mps_xpt_ltc, xpt_ltc_price, ltc_xpt_price, with_last_trades=0, end_dt=t['block_time'])
market_cap_in_xpt, market_cap_in_ltc = calc_market_cap(asset_info, price_in_xpt, price_in_ltc)
#^ this will get price data from the block time of this trade back the standard number of days and trades
# to determine our standard market price, relative (anchored) to the time of this trade
for market_cap_as in (config.XPT, config.LTC):
market_cap = market_cap_in_xpt if market_cap_as == config.XPT else market_cap_in_ltc
#if there is a previously stored market cap for this asset, add a new history point only if the two caps differ
prev_market_cap_history = mongo_db.asset_marketcap_history.find({'market_cap_as': market_cap_as, 'asset': asset,
'block_index': {'$lt': t['block_index']}}).sort('block_index', pymongo.DESCENDING).limit(1)
prev_market_cap_history = list(prev_market_cap_history)[0] if prev_market_cap_history.count() == 1 else None
if market_cap and (not prev_market_cap_history or prev_market_cap_history['market_cap'] != market_cap):
mongo_db.asset_marketcap_history.insert({
'block_index': t['block_index'],
'block_time': t['block_time'],
'asset': asset,
'market_cap': market_cap,
'market_cap_as': market_cap_as,
})
logging.info("Block %i -- Calculated market cap history point for %s as %s (mID: %s)" % (t['block_index'], asset, market_cap_as, t['message_index']))
mongo_db.app_config.update({}, {'$set': {'last_block_assets_compiled': current_block_index}})
return True
| {
"content_hash": "f8169675c4c4631185b420c2c6e25296",
"timestamp": "",
"source": "github",
"line_count": 659,
"max_line_length": 179,
"avg_line_length": 56.420333839150224,
"alnum_prop": 0.589790484387187,
"repo_name": "Paytokens/payblockd",
"id": "e44493287f29cc0fb5b30aace38c67b57ae34455",
"size": "37181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/components/assets_trading.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "305333"
}
],
"symlink_target": ""
} |
from azure.cli.core.util import sdk_no_wait
from azure.mgmt.databoxedge.models import Sku
def databoxedge_device_list(client,
resource_group_name=None,
expand=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name,
expand=expand)
return client.list_by_subscription(expand=expand)
def databoxedge_device_show(client,
device_name,
resource_group_name):
return client.get(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_create(client,
device_name,
resource_group_name,
location,
tags=None,
sku=None,
etag=None,
data_box_edge_device_status=None,
description=None,
model_description=None,
friendly_name=None,
no_wait=False):
data_box_edge_device = {}
data_box_edge_device['location'] = location
if tags is not None:
data_box_edge_device['tags'] = tags
if sku is not None:
data_box_edge_device['sku'] = Sku(name=sku)
if etag is not None:
data_box_edge_device['etag'] = etag
if data_box_edge_device is not None:
data_box_edge_device['data_box_edge_device_status'] = data_box_edge_device_status
if description is not None:
data_box_edge_device['description'] = description
if model_description is not None:
data_box_edge_device['model_description'] = model_description
if friendly_name is not None:
data_box_edge_device['friendly_name'] = friendly_name
return sdk_no_wait(no_wait,
client.begin_create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
data_box_edge_device=data_box_edge_device)
def databoxedge_device_update(client,
device_name,
resource_group_name,
tags=None):
parameters = {}
if tags is not None:
parameters['tags'] = tags
return client.update(device_name=device_name,
resource_group_name=resource_group_name,
parameters=parameters)
def databoxedge_device_delete(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_download_update(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_download_updates,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_install_update(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_install_updates,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_scan_for_update(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_scan_for_updates,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_device_show_update_summary(client,
device_name,
resource_group_name):
return client.get_update_summary(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_alert_list(client,
device_name,
resource_group_name):
return client.list_by_data_box_edge_device(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_alert_show(client,
device_name,
name,
resource_group_name):
return client.get(device_name=device_name,
name=name,
resource_group_name=resource_group_name)
def databoxedge_bandwidth_schedule_list(client,
device_name,
resource_group_name):
return client.list_by_data_box_edge_device(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_bandwidth_schedule_show(client,
device_name,
name,
resource_group_name):
return client.get(device_name=device_name,
name=name,
resource_group_name=resource_group_name)
def databoxedge_bandwidth_schedule_create(client,
device_name,
name,
resource_group_name,
start,
stop,
rate_in_mbps,
days,
no_wait=False):
parameters = {}
parameters['start'] = start
parameters['stop'] = stop
parameters['rate_in_mbps'] = rate_in_mbps
parameters['days'] = days
return sdk_no_wait(no_wait,
client.begin_create_or_update,
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
parameters=parameters)
def databoxedge_bandwidth_schedule_update(instance,
device_name,
name,
resource_group_name,
start,
stop,
rate_in_mbps,
days,
no_wait=False):
if start is not None:
instance.start = start
if stop is not None:
instance.stop = stop
if rate_in_mbps is not None:
instance.rate_in_mbps = rate_in_mbps
if days is not None:
instance.days = days
return instance
def databoxedge_bandwidth_schedule_delete(client,
device_name,
name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
device_name=device_name,
name=name,
resource_group_name=resource_group_name)
def databoxedge_show_job(client,
device_name,
name,
resource_group_name):
return client.get(device_name=device_name,
name=name,
resource_group_name=resource_group_name)
def databoxedge_list_node(client,
device_name,
resource_group_name):
return client.list_by_data_box_edge_device(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_order_list(client,
device_name,
resource_group_name):
return client.list_by_data_box_edge_device(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_order_show(client,
device_name,
resource_group_name):
return client.get(device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_order_create(client,
device_name,
resource_group_name,
address_line1,
postal_code,
city,
state,
country,
contact_person,
company_name,
phone,
email_list,
status,
comments=None,
address_line2=None,
address_line3=None,
no_wait=False):
order = {}
order['current_status'] = {}
order['current_status']['status'] = status
if comments is not None:
order['current_status']['comments'] = comments
order['shipping_address'] = {}
order['shipping_address']['address_line1'] = address_line1
if address_line2 is not None:
order['shipping_address']['address_line2'] = address_line2
if address_line3 is not None:
order['shipping_address']['address_line3'] = address_line3
order['shipping_address']['postal_code'] = postal_code
order['shipping_address']['city'] = city
order['shipping_address']['state'] = state
order['shipping_address']['country'] = country
order['contact_information'] = {}
order['contact_information']['contact_person'] = contact_person
order['contact_information']['company_name'] = company_name
order['contact_information']['phone'] = phone
order['contact_information']['email_list'] = email_list
return sdk_no_wait(no_wait,
client.begin_create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
order=order)
def databoxedge_order_update(instance,
device_name,
resource_group_name,
status=None,
comments=None,
address_line1=None,
address_line2=None,
address_line3=None,
postal_code=None,
city=None,
state=None,
country=None,
contact_person=None,
company_name=None,
phone=None,
email_list=None,
no_wait=False):
if status is not None:
instance.current_status.status = status
if comments is not None:
instance.current_status.comments = comments
if address_line1 is not None:
instance.shipping_address.address_line1 = address_line1
if address_line2 is not None:
instance.shipping_address.address_line2 = address_line2
if address_line3 is not None:
instance.shipping_address.address_line3 = address_line3
if postal_code is not None:
instance.shipping_address.postal_code = postal_code
if city is not None:
instance.shipping_address.city = city
if state is not None:
instance.shipping_address.state = state
if country is not None:
instance.shipping_address.country = country
if contact_person is not None:
instance.contact_information.contact_person = contact_person
if company_name is not None:
instance.contact_information.company_name = company_name
if phone is not None:
instance.contact_information.phone = phone
if email_list is not None:
instance.contact_information.email_list = email_list
return instance
def databoxedge_order_delete(client,
device_name,
resource_group_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
device_name=device_name,
resource_group_name=resource_group_name)
def databoxedge_list_sku(client,
filter_=None):
return client.list(filter=filter_)
| {
"content_hash": "b42d0fdddbdffe2ff1a2941d3b9c6c92",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 89,
"avg_line_length": 40.24024024024024,
"alnum_prop": 0.473955223880597,
"repo_name": "yugangw-msft/azure-cli",
"id": "b9690cb791fe4059b000497fb5c0e985955f4e06",
"size": "13926",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/databoxedge/manual/custom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from authdata.forms import UserCreationForm, UserChangeForm
from authdata.models import Municipality
from authdata.models import School
from authdata.models import Role
from authdata.models import Attendance
from authdata.models import Source
from authdata.models import User
from authdata.models import Attribute
from authdata.models import UserAttribute
class MunicipalityAdmin(admin.ModelAdmin):
"""Manages Municipality objects"""
list_display = ('name',)
class SchoolAdmin(admin.ModelAdmin):
"""SchoolAdmin"""
list_display = ('name',)
class RoleAdmin(admin.ModelAdmin):
"""RoleAdmin"""
list_display = ('name',)
class AttendanceAdmin(admin.ModelAdmin):
"""AttendanceAdmin"""
list_display = ('user', 'school', 'role', 'group', 'data_source')
list_filter = ('role', 'data_source')
search_fields = ('school__school_id', 'school__name', 'school__municipality__name', 'user__username', 'group',)
class AttributeAdmin(admin.ModelAdmin):
"""AttributeAdmin"""
list_display = ('name',)
class UserAttributeAdmin(admin.ModelAdmin):
"""UserAttributeAdmin"""
list_display = ('user', 'attribute', 'value')
list_filter = ('attribute',)
search_fields = ('user__username', 'value')
class SourceAdmin(admin.ModelAdmin):
"""SourceAdmin"""
list_display = ('name',)
class UserAttributeInline(admin.TabularInline):
model = UserAttribute
extra = 0
class AttendanceInline(admin.TabularInline):
model = Attendance
extra = 0
class UserAdmin(DjangoUserAdmin):
"""UserAdmin"""
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
(_('External source'), {'fields': ('external_source', 'external_id')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username',),
}),
)
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff', 'external_source', 'external_id')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups', 'external_source')
search_fields = ('username', 'first_name', 'last_name', 'email', 'external_id')
ordering = ('username',)
filter_horizontal = ('groups', 'user_permissions')
inlines = [UserAttributeInline, AttendanceInline]
form = UserChangeForm
add_form = UserCreationForm
admin.site.register(Municipality, MunicipalityAdmin)
admin.site.register(School, SchoolAdmin)
admin.site.register(Role, RoleAdmin)
admin.site.register(Attendance, AttendanceAdmin)
admin.site.register(Source, SourceAdmin)
admin.site.register(User, UserAdmin)
admin.site.register(Attribute, AttributeAdmin)
admin.site.register(UserAttribute, UserAttributeAdmin)
| {
"content_hash": "9e2419dc4dd15f7f662b11f00a8b4759",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 115,
"avg_line_length": 32.33673469387755,
"alnum_prop": 0.6708740927737457,
"repo_name": "educloudalliance/eca-auth-data",
"id": "cd0c819f51326a6d89a2bea5f46be271b83b6d09",
"size": "4334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "authdata/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "170600"
},
{
"name": "Shell",
"bytes": "2618"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import logging
import math
import sys
import numpy as np
import numpy.lib.recfunctions
import scipy.linalg
import scipy.stats
from sklearn.linear_model import LassoLarsIC # , Lasso, LassoCV, LassoLarsCV
import statsmodels.api as sm
from errors import TSLengthException
from masking import smooth_mask, multitemp_mask
from regression.glmnet_fit import GLMLasso
from regression import robust_fit as rlm
from utils import date2index
class YATSM(object):
"""Initialize a YATSM model for data X (spectra) and Y (dates)
YATSM model based off of tests for structural changes from the
econometrics literature including the MOSUM or CUMSUM (Chu et al,
Zeileis, and others) as implemented in a remote sensing context by
BFAST (Verbesselt, et al. 2012) and CCDC (Zhu and Woodcock, 2014). This
effort is not intended as a direct port of either algorithms.
Args:
X (ndarray): Independent variable matrix
Y (ndarray): Dependent variable matrix
consecutive (int): Consecutive observations to trigger change
threshold (float): Test statistic threshold for change
min_obs (int): Minimum observations in model
min_rmse (float): Minimum RMSE for models during testing
fit_indices (ndarray): Indices of Y to fit models for
test_indices (ndarray): Indices of Y to test for change with
retrain_time (float): Number of days between model fit updates during
monitoring period
screening (str): Style of prescreening of the timeseries
for noise. Options are 'RLM' or 'LOWESS'
screening_crit (float, optional): critical value for multitemporal
noise screening
green_band (int, optional): Index of green band in Y for
multitemporal masking (default: 1)
swir1_band (int, optional): Index of first SWIR band in Y for
multitemporal masking (default: 4)
remove_noise (bool, optional): Remove observation if change is not
detected but first observation is above threshold (if it looks like
noise) (default: True)
dynamic_rmse (bool, optional): Vary RMSE as a function of day of year (
default: False)
slope_test (float or bool, optional): Use an additional slope test to
assess the suitability of the training period. A value of True
enables the test and uses the `threshold` parameter as the test
criterion. False turns off the test or a float value enables the test
but overrides the test criterion threshold. (default: False)
lassocv (bool, optional): Use scikit-learn LarsLassoCV over glmnet
design_info (patsy.DesignInfo, optional): design information for X, if
X is created using Patsy
px (int, optional): X (column) pixel reference
py (int, optional): Y (row) pixel reference
logger (logging.Logger, optional): Specific logger to use, else get one
"""
ndays = 365.25
green_band = 1
swir1_band = 4
screening_types = ['RLM', 'LOWESS']
def __init__(self, X, Y,
consecutive=5, threshold=2.56, min_obs=None, min_rmse=None,
fit_indices=None, test_indices=None, retrain_time=ndays,
screening='RLM', screening_crit=400.0,
green_band=green_band, swir1_band=swir1_band,
remove_noise=True, dynamic_rmse=False, slope_test=False,
lassocv=False,
design_info=None, px=0, py=0,
logger=None):
# Store data
self.X = X
self.Y = Y
# Setup logger
self.logger = logger or logging.getLogger('yatsm')
# Setup slope test
self.slope_test = slope_test
if self.slope_test is True:
self.slope_test = threshold
# Configure which implementation of LASSO we're using
self.lassocv = lassocv
if self.lassocv:
self.fit_models = self.fit_models_LassoCV
self.logger.info('Using LassoCV from sklearn')
else:
self.fit_models = self.fit_models_GLMnet
self.logger.info('Using Lasso from GLMnet (lambda = 20)')
# Find column index of X containing date from Patsy
if design_info:
self.design_info = design_info
if 'x' not in design_info.term_name_slices.keys():
raise AttributeError('Design info must specify "x" (slope)')
self._jx = design_info.term_name_slices['x'].start
else:
self._jx = 1
# Default fitted and tested indices to all, except last band
if fit_indices is None:
self.fit_indices = np.arange(Y.shape[0])
else:
if max(fit_indices) < Y.shape[0]:
self.fit_indices = fit_indices
else:
raise IndexError('Specified fit_indices larger than Y matrix')
if test_indices is None:
self.test_indices = np.arange(Y.shape[0])
else:
if max(test_indices) < Y.shape[0]:
self.test_indices = test_indices
else:
raise IndexError('Specified test_indices larger than Y matrix')
self.retrain_time = retrain_time
# Type of noise screening
if screening not in self.screening_types:
raise TypeError('Unknown screening type')
# Define method according to type
if screening == 'RLM':
self.screen_timeseries = self.screen_timeseries_RLM
self.logger.debug('Using RLM for screening')
elif screening == 'LOWESS':
self.screen_timeseries = self.screen_timeseries_LOWESS
self.logger.debug('Using LOWESS for screening')
# Keep track if timeseries has been screened for full-TS LOWESS
self.screened = False
self.green_band = green_band
self.swir1_band = swir1_band
self.screening_crit = screening_crit
self.remove_noise = remove_noise
if dynamic_rmse:
self.get_rmse = self.get_dynamic_rmse
else:
self.get_rmse = self.get_model_rmse
# Attributes
self.n_band = Y.shape[0]
self.n_coef = X.shape[1]
# Store parameters
self.consecutive = consecutive
self.threshold = threshold
if min_obs is None:
self.min_obs = int(self.n_coef * 1.5)
else:
self.min_obs = min_obs
# Minimum RMSE to prevent being overly sensitive to changes
if min_rmse:
self.min_rmse = min_rmse
else:
# if None, set to max float size so it never is minimum
self.min_rmse = sys.float_info.min
# Index of time segment location
self.start = 0
self.here = self.min_obs
self._here = self.here
if self.X.shape[0] < self.here + self.consecutive:
raise TSLengthException('Not enough observations (n = {n})'.format(
n=self.X.shape[0]))
# Record if model has been trained
self.monitoring = False
# Record if model has been ran
self.ran = False
# Store array of time series model (GLMnet or LassoCV)
self.models = []
self.n_record = 0
self.record_template = np.zeros(1, dtype=[
('start', 'i4'),
('end', 'i4'),
('break', 'i4'),
('coef', 'float32', (self.n_coef, len(self.fit_indices))),
('rmse', 'float32', len(self.fit_indices)),
('px', 'u2'),
('py', 'u2'),
('magnitude', 'float32', len(self.fit_indices))
])
self.record_template['px'][0] = px
self.record_template['py'][0] = py
self.record = np.copy(self.record_template)
# POST-PROCESSING
def commission_test(self, alpha=0.001):
""" Merge adjacent records based on Chow Tests for nested models
Use Chow Test to find false positive, spurious, or unnecessary breaks
in the timeseries by comparing the effectiveness of two separate
adjacent models with one single model that spans the entire time
period.
Chow test is described:
.. math::
\\frac{[RSS_r - (RSS_1 + RSS_2)] / k}{(RSS_1 + RSS_2) / (n - 2k)}
where
- :math:`RSS_u` is the RSS of the combined, or, restricted model
- :math:`RSS_1` is the RSS of the first model
- :math:`RSS_2` is the RSS of the second model
- :math:`k` is the number of model parameters
- :math:`n` is the number of total observations
Because we look for change in multiple bands, the RSS used to compare
the unrestricted versus restricted models is the L2 norm of RSS
values from `self.test_indices`.
Args:
alpha (float): significance level for F-statistic (default: 0.01)
Returns:
np.ndarray: updated copy of `self.models` with spurious models
combined into unified model
"""
if self.record.size == 1:
return self.record
k = self.n_coef
models = []
merged = False
for i in range(len(self.record) - 1):
if merged:
m_1 = models[-1]
else:
m_1 = self.record[i]
m_2 = self.record[i + 1]
m_1_start = date2index(self.X[:, self._jx], m_1['start'])
m_1_end = date2index(self.X[:, self._jx], m_1['end'])
m_2_start = date2index(self.X[:, self._jx], m_2['start'])
m_2_end = date2index(self.X[:, self._jx], m_2['end'])
m_r_start = m_1_start
m_r_end = m_2_end
n = m_r_end - m_r_start
F_crit = scipy.stats.f.ppf(1 - alpha, k, n - 2 * k)
m_1_rss = np.zeros(self.test_indices.size)
m_2_rss = np.zeros(self.test_indices.size)
m_r_rss = np.zeros(self.test_indices.size)
for i_b, b in enumerate(self.test_indices):
m_1_rss[i_b] = scipy.linalg.lstsq(
self.X[m_1_start:m_1_end, :],
self.Y[b, m_1_start:m_1_end])[1]
m_2_rss[i_b] = scipy.linalg.lstsq(
self.X[m_2_start:m_2_end, :],
self.Y[b, m_2_start:m_2_end])[1]
m_r_rss[i_b] = scipy.linalg.lstsq(
self.X[m_r_start:m_r_end, :],
self.Y[b, m_r_start:m_r_end])[1]
m_1_rss = np.linalg.norm(m_1_rss)
m_2_rss = np.linalg.norm(m_2_rss)
m_r_rss = np.linalg.norm(m_r_rss)
F = ((m_r_rss - (m_1_rss + m_2_rss)) / k) / \
((m_1_rss + m_2_rss) / (n - 2 * k))
if F > F_crit:
# Reject H0 and retain change
# Only add in previous model if first model
if i == 0:
models.append(m_1)
models.append(m_2)
merged = False
else:
# Fail to reject H0 -- ignore change and merge
m_new = np.copy(self.record_template)[0]
# Remove last previously added model from list to merge
if i != 0:
del models[-1]
m_new['start'] = m_1['start']
m_new['end'] = m_2['end']
m_new['break'] = m_2['break']
_models = self.fit_models(
self.X[m_r_start:m_r_end, :],
self.Y[:, m_r_start:m_r_end])
for i_m, _m in enumerate(_models):
m_new['coef'][:, i_m] = _m.coef
m_new['rmse'][i_m] = _m.rmse
# Preserve magnitude from 2nd model that was merged
m_new['magnitude'] = m_2['magnitude']
models.append(m_new)
merged = True
return np.array(models)
def omission_test(self, crit=0.05, behavior='ANY',
indices=None):
""" Add omitted breakpoint into records based on residual stationarity
Uses recursive residuals within a CUMSUM test to check if each model
has omitted a "structural change" (e.g., land cover change). Returns
an array of True or False for each timeseries segment record depending
on result from `statsmodels.stats.diagnostic.breaks_cusumolsresid`.
Args:
crit (float, optional): Critical p-value for rejection of null
hypothesis that data contain no structural change
behavior (str, optional): Method for dealing with multiple
`test_indices`. `ANY` will return True if any one test index
rejects the null hypothesis. `ALL` will only return True if ALL
test indices reject the null hypothesis.
indices (np.ndarray, optional): Array indices to test. User provided
indices must be a subset of `self.test_indices`.
Returns:
np.ndarray: Array of True or False for each record where
True indicates omitted break point
"""
if behavior.lower() not in ['any', 'all']:
raise ValueError('`behavior` must be "any" or "all"')
if not indices:
indices = self.test_indices
if not np.all(np.in1d(indices, self.test_indices)):
raise ValueError('`indices` must be a subset of '
'`self.test_indices`')
if not self.ran:
return np.empty(0, dtype=bool)
omission = np.zeros((self.record.size, len(indices)),
dtype=bool)
for i, r in enumerate(self.record):
# Skip if no model fit
if r['start'] == 0 or r['end'] == 0:
continue
# Find matching X and Y in data
index = np.where(
(self.X[:, self._jx] >= min(r['start'], r['end'])) &
(self.X[:, self._jx] <= max(r['end'], r['start'])))[0]
# Grab matching X and Y
_X = self.X[index, :]
_Y = self.Y[:, index]
for i_b, b in enumerate(indices):
# Create OLS regression
ols = sm.OLS(_Y[b, :], _X).fit()
# Perform CUMSUM test on residuals
test = sm.stats.diagnostic.breaks_cusumolsresid(
ols.resid, _X.shape[1])
if test[1] < crit:
omission[i, i_b] = True
else:
omission[i, i_b] = False
# Collapse band answers according to `behavior`
if behavior.lower() == 'any':
return np.any(omission, 1)
else:
return np.all(omission, 1)
@property
def robust_record(self):
""" Returns a copy of YATSM record output with robustly fitted models
After YATSM has been run, take each time segment and re-fit the model
using robust iteratively reweighted least squares (RIRLS) regression.
RIRLS will only be performed using non-zero coefficients from original
regression.
The returned model results should be more representative of the
signal found because it will remove influence of outlying observations,
such as clouds or shadows.
If YATSM has not yet been run, returns None
"""
if not self.ran:
return None
# Create new array for robust coefficients and RMSE
robust = np.zeros(self.record.shape[0], dtype=[
('robust_coef', 'float32', (self.n_coef, len(self.fit_indices))),
('robust_rmse', 'float32', len(self.fit_indices)),
])
# Update to robust model
for i, r in enumerate(self.record):
# Find matching X and Y in data
index = np.where(
(self.X[:, self._jx] >= min(r['start'], r['end'])) &
(self.X[:, self._jx] <= max(r['end'], r['start'])))[0]
# Grab matching X and Y
_X = self.X[index, :]
_Y = self.Y[:, index]
# Refit each band
for i_b, b in enumerate(self.fit_indices):
# Find nonzero
nonzero = np.nonzero(self.record[i]['coef'][:, i_b])[0]
if nonzero.size == 0:
continue
# Setup model
rirls_model = rlm.RLM(_Y[b, :], _X[:, nonzero],
M=rlm.bisquare)
# Fit
fit = rirls_model.fit()
# Store updated coefficients
robust[i]['robust_coef'][nonzero, i_b] = fit.coefs
# Update RMSE
robust[i]['robust_rmse'][i_b] = \
math.sqrt(rirls_model.rss / index.size)
self.logger.debug('Updated record {i} to robust results'.
format(i=i))
# Merge
robust_record = np.lib.recfunctions.merge_arrays((self.record, robust),
flatten=True)
return robust_record
def reset(self):
""" Resets 'start' and 'here' indices """
self.n_record = 0
self.record = np.copy(self.record_template)
self.start = 0
self.here = self.min_obs
self._here = self.here
self.ran = False
# HELPER PROPERTIES
@property
def span_time(self):
""" Return time span (in days) between start and end of model """
return abs(self.X[self.here, self._jx] -
self.X[self.start, self._jx])
@property
def span_index(self):
""" Return time span (in index) between start and end of model """
return (self.here - self.start)
@property
def running(self):
""" Determine if timeseries can run """
return self.here < self.X.shape[0]
@property
def can_monitor(self):
""" Determine if timeseries can monitor the future consecutive obs """
return self.here < self.X.shape[0] - self.consecutive - 1
# MAIN LOOP
def run(self):
""" Run timeseries model """
# Record date of last time model was trained
self.trained_date = 0
while self.running:
while not self.monitoring and self.can_monitor:
self.train()
self.here += 1
while self.monitoring and self.can_monitor:
# Update model if required
self.update_model()
# Perform monitoring check
self.monitor()
# Iterate forward
self.here += 1
self.here += 1
# If we ended without being able to monitor again, delete last model
# since it will be empty
if self.record[-1]['start'] == 0 and self.record[-1]['end'] == 0:
self.record = self.record[:-1]
self.ran = True
# Deal with start and end of time series #TODO
def screen_timeseries_LOWESS(self, span=None):
""" Screen entire dataset for noise before training using LOWESS
Args:
span (int, optional): span for LOWESS
Returns:
bool: True if timeseries is screened and we can train, else False
"""
if not self.screened:
if not span:
span = self.consecutive * 2 + 1
mask = smooth_mask(self.X[:, self._jx], self.Y, span,
crit=self.screening_crit,
green=self.green_band, swir1=self.swir1_band)
# Apply mask to X and Y
self.X = self.X[mask, :]
self.Y = self.Y[:, mask]
# Also apply to _X and _Y for training purposes
self._X = self.X
self._Y = self.Y
self.screened = True
return True
def screen_timeseries_RLM(self):
""" Screen training period for noise with IRWLS RLM
Returns:
bool: True if timeseries is screened and we can train, else False
"""
# Multitemporal noise removal
mask = np.ones(self.X.shape[0], dtype=np.bool)
index = np.arange(self.start, self.here + self.consecutive,
dtype=np.uint16)
mask[index] = multitemp_mask(self.X[index, self._jx],
self.Y[:, index],
self.span_time / self.ndays,
crit=self.screening_crit,
green=self.green_band,
swir1=self.swir1_band)
# Check if there are enough observations for model with noise removed
_span_index = mask[index][:-self.consecutive].sum()
# Return if not enough observations
if _span_index < self.min_obs:
self.logger.debug(' multitemp masking - not enough obs')
return False
# There is enough observations in train period to fit - remove noise
self._X = self.X[mask, :]
self._Y = self.Y[:, mask]
# record our current position
# important for next iteration of noise removal
self._here = self.here
# Go forward after noise removal
self.here = self.start + _span_index - 1
if self.span_time < self.ndays:
self.logger.debug(' multitemp masking - not enough time')
self.here = self._here
return False
self.logger.debug('Updated "here"')
return True
def train(self):
""" Train time series model """
# Test if we can train yet
if self.span_time <= self.ndays or self.span_index < self.n_coef:
self.logger.debug('could not train - moving forward')
return
# Check if screening was OK
if not self.screen_timeseries():
return
# Test if we can still run after noise removal
if self.here >= self._X.shape[0]:
self.logger.debug(
'Not enough observations to proceed after noise removal')
raise TSLengthException(
'Not enough observations after noise removal')
# After noise removal, try to fit models
models = self.fit_models(self._X[self.start:self.here + 1, :],
self._Y[:, self.start:self.here + 1],
bands=self.test_indices)
# Ensure first and last points aren't unusual
start_resid = np.zeros(len(self.test_indices))
end_resid = np.zeros(len(self.test_indices))
slope_resid = np.zeros(len(self.test_indices))
for i, (b, m) in enumerate(zip(self.test_indices, models)):
start_resid[i] = (np.abs(self._Y[b, self.start] -
m.predict(self._X[self.start, :])) /
max(self.min_rmse, m.rmse))
end_resid[i] = (np.abs(self._Y[b, self.here] -
m.predict(self._X[self.here, :])) /
max(self.min_rmse, m.rmse))
slope_resid[i] = (np.abs(m.coef[1] * (self.here - self.start)) /
max(self.min_rmse, m.rmse))
if np.linalg.norm(start_resid) > self.threshold or \
np.linalg.norm(end_resid) > self.threshold or \
(self.slope_test and
np.linalg.norm(slope_resid) > self.threshold):
self.logger.debug('Training period unstable')
self.start += 1
self.here = self._here
return
self.X = self._X
self.Y = self._Y
self.logger.debug('Entering monitoring period')
self.monitoring = True
def update_model(self):
# Only train if enough time has past
if abs(self.X[self.here, self._jx] - self.trained_date) > \
self.retrain_time:
self.logger.debug('Monitoring - retraining ({n} days since last)'.
format(n=self.X[self.here, self._jx] -
self.trained_date))
# Fit timeseries models
self.models = self.fit_models(self.X[self.start:self.here + 1, :],
self.Y[:, self.start:self.here + 1])
# Update record
self.record[self.n_record]['start'] = self.X[self.start, self._jx]
self.record[self.n_record]['end'] = self.X[self.here, self._jx]
for i, m in enumerate(self.models):
self.record[self.n_record]['coef'][:, i] = m.coef
self.record[self.n_record]['rmse'][i] = m.rmse
self.logger.debug('Monitoring - updated ')
self.trained_date = self.X[self.here, self._jx]
else:
# Update record with new end date
self.record[self.n_record]['end'] = self.X[self.here, self._jx]
def monitor(self):
""" Monitor for changes in time series """
# Store test scores
scores = np.zeros((self.consecutive, len(self.test_indices)),
dtype=np.float32)
rmse = self.get_rmse()
for i in range(self.consecutive):
for i_b, b in enumerate(self.test_indices):
m = self.models[b]
# Get test score for future observations
scores[i, i_b] = (
(self.Y[b, self.here + i] -
m.predict(self.X[self.here + i, :])) /
max(self.min_rmse, rmse[i_b])
)
# Check for scores above critical value
mag = np.linalg.norm(np.abs(scores), axis=1)
if np.all(mag > self.threshold):
self.logger.debug('CHANGE DETECTED')
# Record break date
self.record[self.n_record]['break'] = \
self.X[self.here + 1, self._jx]
# Record magnitude of difference for tested indices
self.record[self.n_record]['magnitude'][self.test_indices] = \
np.mean(scores, axis=0)
self.record = np.append(self.record, self.record_template)
self.n_record += 1
# Reset _X and _Y for re-training
self._X = self.X
self._Y = self.Y
self.start = self.here + 1
self.trained_date = 0
self.monitoring = False
elif mag[0] > self.threshold and self.remove_noise:
# Masking way of deleting is faster than `np.delete`
m = np.ones(self.X.shape[0], dtype=bool)
m[self.here] = False
self.X = self.X[m, :]
self.Y = self.Y[:, m]
self.here -= 1
def fit_models_GLMnet(self, X, Y, bands=None):
""" Try to fit models to training period time series """
if bands is None:
bands = self.fit_indices
models = []
for b in bands:
lasso = GLMLasso()
lasso = lasso.fit(X, Y[b, :], lambdas=20)
models.append(lasso)
return np.array(models)
def fit_models_LassoCV(self, X, Y, bands=None):
""" Try to fit models to training period time series """
if bands is None:
bands = self.fit_indices
models = []
for b in bands:
# lasso = LassoCV(n_alphas=100)
# lasso = LassoLarsCV(masx_n_alphas=100)
lasso = LassoLarsIC(criterion='bic')
lasso = lasso.fit(X, Y[b, :])
lasso.nobs = Y[b, :].size
lasso.coef = np.copy(lasso.coef_)
lasso.coef[0] += lasso.intercept_
lasso.fittedvalues = lasso.predict(X)
lasso.rss = np.sum((Y[b, :] - lasso.fittedvalues) ** 2)
lasso.rmse = math.sqrt(lasso.rss / lasso.nobs)
models.append(lasso)
return np.array(models)
def get_model_rmse(self):
""" Return the normal RMSE of each fitted model
Returns:
np.ndarray: NumPy array containing RMSE of each tested model
"""
return np.array([m.rmse for m in self.models])[self.test_indices]
def get_dynamic_rmse(self):
""" Return the dynamic RMSE for each model
Dynamic RMSE refers to the Root Mean Squared Error calculated using
`self.min_obs` number of observations closest in day of year to the
observation `self.consecutive` steps into the future. Goal is to
reduce false-positives during seasonal transitions (high variance in
the signal) while decreasing omission during stable times of year.
Returns:
np.ndarray: NumPy array containing dynamic RMSE of each tested model
"""
# Indices of closest observations based on DOY
i_doy = np.argsort(np.mod(
self.X[self.start:self.here, self._jx] -
self.X[self.here + self.consecutive, self._jx],
self.ndays))[:self.min_obs]
rmse = np.zeros(len(self.test_indices), np.float32)
for i_b, b in enumerate(self.test_indices):
m = self.models[b]
rmse[i_b] = np.sqrt(np.sum(
(self.Y[b, :].take(i_doy) -
m.predict(self.X.take(i_doy, axis=0))) ** 2)
/ i_doy.size)
return rmse
| {
"content_hash": "1bf58e30c84174c7699f35a412fd795d",
"timestamp": "",
"source": "github",
"line_count": 800,
"max_line_length": 79,
"avg_line_length": 36.755,
"alnum_prop": 0.544245680859747,
"repo_name": "jmorton/yatsm",
"id": "158e5dc8e010c318da871441df3ab61242fc79cf",
"size": "29404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yatsm/yatsm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5529"
},
{
"name": "Makefile",
"bytes": "324"
},
{
"name": "Python",
"bytes": "252022"
},
{
"name": "Shell",
"bytes": "2414"
}
],
"symlink_target": ""
} |
"""
::BOH
$Id: connection.py,v 1.2 2006/12/07 05:25:13 peterk Exp $
$HeadURL: http://subversion/stuff/svn/owfs/trunk/ow/__init__.py $
Copyright (c) 2006 Peter Kropf. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
::EOH
OWFS is an open source project developed by Paul Alfille and hosted at
http://www.owfs.org
"""
import sys
import os
import socket
import struct
__author__ = 'Peter Kropf'
__email__ = 'pkropf@gmail.com'
__version__ = '$Id: connection.py,v 1.2 2006/12/07 05:25:13 peterk Exp $'.split()[2]
class OWMsg:
"""
Constants for the owserver api message types.
"""
error = 0
nop = 1
read = 2
write = 3
dir = 4
size = 5
presence = 6
class Connection(object):
"""
A Connection provides access to a owserver without the standard
core ow libraries. Instead, it impliments the wire protocol for
communicating with the owserver. This allows Python programs to
inteact with the ow sensors on any platform supported by Python.
"""
def __init__(self, server, port):
"""
Create a new connection object.
"""
#print 'Connection.__init__(%s, %i)' % (server, port)
self._server = server
self._port = port
def __str__(self):
"""
Print a string representation of the Connection in the form of:
server:port
"""
#print 'Connection.__str__'
return "%s:%i" % (self._server, self._port)
def __repr__(self):
"""
Print a representation of the Connection in the form of:
Connection(server, port)
Example:
>>> Connection('xyzzy', 9876)
Connection(server="xyzzy", port=9876)
"""
#print 'Connection.__repr__'
return 'Connection("%s", %i)' % (self._server, self._port)
def read(self, path):
"""
"""
#print 'Connection.read("%s", %i, "%s")' % (path)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self._server, self._port))
smsg = self.pack(OWMsg.read, len(path) + 1, 8192)
s.sendall(smsg)
s.sendall(path.encode() + b'\x00')
while 1:
data = s.recv(24)
ret, payload_len, data_len = self.unpack(data)
if payload_len:
data = s.recv(payload_len)
rtn = self.toNumber(data[:data_len])
break
else:
rtn = None
break
s.close()
return rtn
def write(self, path, value):
"""
"""
#print 'Connection.write("%s", "%s")' % (path, str(value))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self._server, self._port))
value = str(value)
smsg = self.pack(OWMsg.write, len(path) + 1 + len(value) + 1, len(value) + 1)
s.sendall(smsg)
s.sendall(path.encode() + b'\x00' + value.encode() + b'\x00')
data = s.recv(24)
ret, payload_len, data_len = self.unpack(data)
s.close()
return ret
def dir(self, path):
"""
"""
#print 'Connection.dir("%s")' % (path)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self._server, self._port))
smsg = self.pack(OWMsg.dir, len(path) + 1, 0)
s.sendall(smsg)
s.sendall(path.encode() + b'\x00')
fields = []
while 1:
data = s.recv(24)
ret, payload_len, data_len = self.unpack(data)
if payload_len:
data = s.recv(payload_len)
fields.append(data[:data_len])
else:
break
s.close()
return fields
def pack(self, function, payload_len, data_len):
"""
"""
#print 'Connection.pack(%i, %i, %i)' % (function, payload_len, data_len)
return struct.pack('iiiiii',
socket.htonl(0), #version
socket.htonl(payload_len), #payload length
socket.htonl(function), #type of function call
socket.htonl(258), #format flags
socket.htonl(data_len), #size of data element for read or write
socket.htonl(0), #offset for read or write
)
def unpack(self, msg):
"""
"""
#print 'Connection.unpack("%s")' % msg
val = struct.unpack('iiiiii', msg)
version = socket.ntohl(val[0])
payload_len = socket.ntohl(val[1])
ret_value = socket.ntohl(val[2])
format_flags = socket.ntohl(val[3])
data_len = socket.ntohl(val[4])
offset = socket.ntohl(val[5])
return ret_value, payload_len, data_len
def toNumber(self, str):
"""
"""
str=str.decode()
stripped = str.strip()
if stripped.isdigit():
return int(stripped)
if stripped.replace('.', '').isdigit():
return float(stripped)
return str
| {
"content_hash": "51444b7c71024ee61bd3c8386c30bf6c",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 93,
"avg_line_length": 27.252336448598133,
"alnum_prop": 0.5442386831275721,
"repo_name": "Cirreth/shome",
"id": "b7d98362d40a6bbbd0e7d8dbceae414d158c6e44",
"size": "5832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/ownet/connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "125643"
},
{
"name": "HTML",
"bytes": "31880"
},
{
"name": "JavaScript",
"bytes": "317583"
},
{
"name": "Python",
"bytes": "91654"
},
{
"name": "Smarty",
"bytes": "197"
}
],
"symlink_target": ""
} |
import os
import fnmatch
import numbers
import fcntl
import array
import mmap
import ctypes
import re
from os.path import abspath
from PIL import Image, ImageDraw
from struct import pack, unpack
from subprocess import Popen
INPUT_AUTO = ''
OUTPUT_AUTO = ''
# -----------------------------------------------------------------------------
# Attribute reader/writer with cached file access
class FileCache(object):
def __init__(self):
self._cache = {}
def __del__(self):
for f in self._cache.values():
f.close()
def file_handle(self, path, binary=False):
"""Manages the file handle cache and opening the files in the correct mode"""
if path not in self._cache:
r_ok = os.access(path, os.R_OK)
w_ok = os.access(path, os.W_OK)
if r_ok and w_ok:
mode = 'a+'
elif w_ok:
mode = 'a'
else:
mode = 'r'
if binary:
mode += 'b'
f = open(path, mode, 0)
self._cache[path] = f
else:
f = self._cache[path]
return f
def read(self, path):
f = self.file_handle(path)
f.seek(0)
return f.read().strip()
def write(self, path, value):
f = self.file_handle(path)
f.seek(0)
f.write(value)
# -----------------------------------------------------------------------------
# Define the base class from which all other ev3dev classes are defined.
class Device(object):
"""The ev3dev device base class"""
DEVICE_ROOT_PATH = '/sys/class'
_DEVICE_INDEX = re.compile(r'^.*(?P<idx>\d+)$')
def __init__(self, class_name, name='*', **kwargs):
"""Spin through the Linux sysfs class for the device type and find
a device that matches the provided name and attributes (if any).
Parameters:
class_name: class name of the device, a subdirectory of /sys/class.
For example, 'tacho-motor'.
name: pattern that device name should match.
For example, 'sensor*' or 'motor*'. Default value: '*'.
keyword arguments: used for matching the corresponding device
attributes. For example, port_name='outA', or
driver_name=['lego-ev3-us', 'lego-nxt-us']. When argument value
is a list, then a match against any entry of the list is
enough.
Example::
d = ev3dev.Device('tacho-motor', port_name='outA')
s = ev3dev.Device('lego-sensor', driver_name=['lego-ev3-us', 'lego-nxt-us'])
When connected succesfully, the `connected` attribute is set to True.
"""
classpath = abspath(Device.DEVICE_ROOT_PATH + '/' + class_name)
self._attribute_cache = FileCache()
for file in os.listdir(classpath):
if fnmatch.fnmatch(file, name):
self._path = abspath(classpath + '/' + file)
# See if requested attributes match:
if all([self._matches(k, kwargs[k]) for k in kwargs]):
self.connected = True
match = Device._DEVICE_INDEX.match(file)
if match:
self._device_index = int(match.group('idx'))
else:
self._device_index = None
return
self._path = ''
self.connected = False
def _matches(self, attribute, pattern):
"""Test if attribute value matches pattern (that is, if pattern is a
substring of attribute value). If pattern is a list, then a match with
any one entry is enough.
"""
value = self._get_attribute(attribute)
if isinstance(pattern, list):
return any([value.find(pat) >= 0 for pat in pattern])
else:
return value.find(pattern) >= 0
def _get_attribute(self, attribute):
"""Device attribute getter"""
return self._attribute_cache.read(abspath(self._path + '/' + attribute))
def _set_attribute(self, attribute, value):
"""Device attribute setter"""
self._attribute_cache.write(abspath(self._path + '/' + attribute), value)
def get_attr_int(self, attribute):
return int(self._get_attribute(attribute))
def set_attr_int(self, attribute, value):
self._set_attribute(attribute, '{0:d}'.format(int(value)))
def get_attr_string(self, attribute):
return self._get_attribute(attribute)
def set_attr_string(self, attribute, value):
self._set_attribute(attribute, "{0}".format(value))
def get_attr_line(self, attribute):
return self._get_attribute(attribute)
def get_attr_set(self, attribute):
return [v.strip('[]') for v in self.get_attr_line(attribute).split()]
def get_attr_from_set(self, attribute):
for a in self.get_attr_line(attribute).split():
v = a.strip('[]')
if v != a:
return v
return ""
@property
def device_index(self):
return self._device_index
# ~autogen generic-class classes.motor>currentClass
class Motor(Device):
"""
The motor class provides a uniform interface for using motors with
positional and directional feedback such as the EV3 and NXT motors.
This feedback allows for precise control of the motors. This is the
most common type of motor, so we just call it `motor`.
"""
SYSTEM_CLASS_NAME = 'tacho-motor'
SYSTEM_DEVICE_NAME_CONVENTION = 'motor*'
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, **kwargs)
# ~autogen
# ~autogen generic-get-set classes.motor>currentClass
@property
def command(self):
"""
Sends a command to the motor controller. See `commands` for a list of
possible values.
"""
raise Exception("command is a write-only property!")
@command.setter
def command(self, value):
self.set_attr_string('command', value)
@property
def commands(self):
"""
Returns a list of commands that are supported by the motor
controller. Possible values are `run-forever`, `run-to-abs-pos`, `run-to-rel-pos`,
`run-timed`, `run-direct`, `stop` and `reset`. Not all commands may be supported.
- `run-forever` will cause the motor to run until another command is sent.
- `run-to-abs-pos` will run to an absolute position specified by `position_sp`
and then stop using the command specified in `stop_command`.
- `run-to-rel-pos` will run to a position relative to the current `position` value.
The new position will be current `position` + `position_sp`. When the new
position is reached, the motor will stop using the command specified by `stop_command`.
- `run-timed` will run the motor for the amount of time specified in `time_sp`
and then stop the motor using the command specified by `stop_command`.
- `run-direct` will run the motor at the duty cycle specified by `duty_cycle_sp`.
Unlike other run commands, changing `duty_cycle_sp` while running *will*
take effect immediately.
- `stop` will stop any of the run commands before they are complete using the
command specified by `stop_command`.
- `reset` will reset all of the motor parameter attributes to their default value.
This will also have the effect of stopping the motor.
"""
return self.get_attr_set('commands')
@property
def count_per_rot(self):
"""
Returns the number of tacho counts in one rotation of the motor. Tacho counts
are used by the position and speed attributes, so you can use this value
to convert rotations or degrees to tacho counts. In the case of linear
actuators, the units here will be counts per centimeter.
"""
return self.get_attr_int('count_per_rot')
@property
def driver_name(self):
"""
Returns the name of the driver that provides this tacho motor device.
"""
return self.get_attr_string('driver_name')
@property
def duty_cycle(self):
"""
Returns the current duty cycle of the motor. Units are percent. Values
are -100 to 100.
"""
return self.get_attr_int('duty_cycle')
@property
def duty_cycle_sp(self):
"""
Writing sets the duty cycle setpoint. Reading returns the current value.
Units are in percent. Valid values are -100 to 100. A negative value causes
the motor to rotate in reverse. This value is only used when `speed_regulation`
is off.
"""
return self.get_attr_int('duty_cycle_sp')
@duty_cycle_sp.setter
def duty_cycle_sp(self, value):
self.set_attr_int('duty_cycle_sp', value)
@property
def encoder_polarity(self):
"""
Sets the polarity of the rotary encoder. This is an advanced feature to all
use of motors that send inversed encoder signals to the EV3. This should
be set correctly by the driver of a device. It You only need to change this
value if you are using a unsupported device. Valid values are `normal` and
`inversed`.
"""
return self.get_attr_string('encoder_polarity')
@encoder_polarity.setter
def encoder_polarity(self, value):
self.set_attr_string('encoder_polarity', value)
@property
def polarity(self):
"""
Sets the polarity of the motor. With `normal` polarity, a positive duty
cycle will cause the motor to rotate clockwise. With `inversed` polarity,
a positive duty cycle will cause the motor to rotate counter-clockwise.
Valid values are `normal` and `inversed`.
"""
return self.get_attr_string('polarity')
@polarity.setter
def polarity(self, value):
self.set_attr_string('polarity', value)
@property
def port_name(self):
"""
Returns the name of the port that the motor is connected to.
"""
return self.get_attr_string('port_name')
@property
def position(self):
"""
Returns the current position of the motor in pulses of the rotary
encoder. When the motor rotates clockwise, the position will increase.
Likewise, rotating counter-clockwise causes the position to decrease.
Writing will set the position to that value.
"""
return self.get_attr_int('position')
@position.setter
def position(self, value):
self.set_attr_int('position', value)
@property
def position_p(self):
"""
The proportional constant for the position PID.
"""
return self.get_attr_int('hold_pid/Kp')
@position_p.setter
def position_p(self, value):
self.set_attr_int('hold_pid/Kp', value)
@property
def position_i(self):
"""
The integral constant for the position PID.
"""
return self.get_attr_int('hold_pid/Ki')
@position_i.setter
def position_i(self, value):
self.set_attr_int('hold_pid/Ki', value)
@property
def position_d(self):
"""
The derivative constant for the position PID.
"""
return self.get_attr_int('hold_pid/Kd')
@position_d.setter
def position_d(self, value):
self.set_attr_int('hold_pid/Kd', value)
@property
def position_sp(self):
"""
Writing specifies the target position for the `run-to-abs-pos` and `run-to-rel-pos`
commands. Reading returns the current value. Units are in tacho counts. You
can use the value returned by `counts_per_rot` to convert tacho counts to/from
rotations or degrees.
"""
return self.get_attr_int('position_sp')
@position_sp.setter
def position_sp(self, value):
self.set_attr_int('position_sp', value)
@property
def speed(self):
"""
Returns the current motor speed in tacho counts per second. Not, this is
not necessarily degrees (although it is for LEGO motors). Use the `count_per_rot`
attribute to convert this value to RPM or deg/sec.
"""
return self.get_attr_int('speed')
@property
def speed_sp(self):
"""
Writing sets the target speed in tacho counts per second used when `speed_regulation`
is on. Reading returns the current value. Use the `count_per_rot` attribute
to convert RPM or deg/sec to tacho counts per second.
"""
return self.get_attr_int('speed_sp')
@speed_sp.setter
def speed_sp(self, value):
self.set_attr_int('speed_sp', value)
@property
def ramp_up_sp(self):
"""
Writing sets the ramp up setpoint. Reading returns the current value. Units
are in milliseconds. When set to a value > 0, the motor will ramp the power
sent to the motor from 0 to 100% duty cycle over the span of this setpoint
when starting the motor. If the maximum duty cycle is limited by `duty_cycle_sp`
or speed regulation, the actual ramp time duration will be less than the setpoint.
"""
return self.get_attr_int('ramp_up_sp')
@ramp_up_sp.setter
def ramp_up_sp(self, value):
self.set_attr_int('ramp_up_sp', value)
@property
def ramp_down_sp(self):
"""
Writing sets the ramp down setpoint. Reading returns the current value. Units
are in milliseconds. When set to a value > 0, the motor will ramp the power
sent to the motor from 100% duty cycle down to 0 over the span of this setpoint
when stopping the motor. If the starting duty cycle is less than 100%, the
ramp time duration will be less than the full span of the setpoint.
"""
return self.get_attr_int('ramp_down_sp')
@ramp_down_sp.setter
def ramp_down_sp(self, value):
self.set_attr_int('ramp_down_sp', value)
@property
def speed_regulation_enabled(self):
"""
Turns speed regulation on or off. If speed regulation is on, the motor
controller will vary the power supplied to the motor to try to maintain the
speed specified in `speed_sp`. If speed regulation is off, the controller
will use the power specified in `duty_cycle_sp`. Valid values are `on` and
`off`.
"""
return self.get_attr_string('speed_regulation')
@speed_regulation_enabled.setter
def speed_regulation_enabled(self, value):
self.set_attr_string('speed_regulation', value)
@property
def speed_regulation_p(self):
"""
The proportional constant for the speed regulation PID.
"""
return self.get_attr_int('speed_pid/Kp')
@speed_regulation_p.setter
def speed_regulation_p(self, value):
self.set_attr_int('speed_pid/Kp', value)
@property
def speed_regulation_i(self):
"""
The integral constant for the speed regulation PID.
"""
return self.get_attr_int('speed_pid/Ki')
@speed_regulation_i.setter
def speed_regulation_i(self, value):
self.set_attr_int('speed_pid/Ki', value)
@property
def speed_regulation_d(self):
"""
The derivative constant for the speed regulation PID.
"""
return self.get_attr_int('speed_pid/Kd')
@speed_regulation_d.setter
def speed_regulation_d(self, value):
self.set_attr_int('speed_pid/Kd', value)
@property
def state(self):
"""
Reading returns a list of state flags. Possible flags are
`running`, `ramping` `holding` and `stalled`.
"""
return self.get_attr_set('state')
@property
def stop_command(self):
"""
Reading returns the current stop command. Writing sets the stop command.
The value determines the motors behavior when `command` is set to `stop`.
Also, it determines the motors behavior when a run command completes. See
`stop_commands` for a list of possible values.
"""
return self.get_attr_string('stop_command')
@stop_command.setter
def stop_command(self, value):
self.set_attr_string('stop_command', value)
@property
def stop_commands(self):
"""
Returns a list of stop modes supported by the motor controller.
Possible values are `coast`, `brake` and `hold`. `coast` means that power will
be removed from the motor and it will freely coast to a stop. `brake` means
that power will be removed from the motor and a passive electrical load will
be placed on the motor. This is usually done by shorting the motor terminals
together. This load will absorb the energy from the rotation of the motors and
cause the motor to stop more quickly than coasting. `hold` does not remove
power from the motor. Instead it actively try to hold the motor at the current
position. If an external force tries to turn the motor, the motor will 'push
back' to maintain its position.
"""
return self.get_attr_set('stop_commands')
@property
def time_sp(self):
"""
Writing specifies the amount of time the motor will run when using the
`run-timed` command. Reading returns the current value. Units are in
milliseconds.
"""
return self.get_attr_int('time_sp')
@time_sp.setter
def time_sp(self, value):
self.set_attr_int('time_sp', value)
# ~autogen
# ~autogen generic-property-value classes.motor>currentClass
# Run the motor until another command is sent.
COMMAND_RUN_FOREVER = 'run-forever'
# Run to an absolute position specified by `position_sp` and then
# stop using the command specified in `stop_command`.
COMMAND_RUN_TO_ABS_POS = 'run-to-abs-pos'
# Run to a position relative to the current `position` value.
# The new position will be current `position` + `position_sp`.
# When the new position is reached, the motor will stop using
# the command specified by `stop_command`.
COMMAND_RUN_TO_REL_POS = 'run-to-rel-pos'
# Run the motor for the amount of time specified in `time_sp`
# and then stop the motor using the command specified by `stop_command`.
COMMAND_RUN_TIMED = 'run-timed'
# Run the motor at the duty cycle specified by `duty_cycle_sp`.
# Unlike other run commands, changing `duty_cycle_sp` while running *will*
# take effect immediately.
COMMAND_RUN_DIRECT = 'run-direct'
# Stop any of the run commands before they are complete using the
# command specified by `stop_command`.
COMMAND_STOP = 'stop'
# Reset all of the motor parameter attributes to their default value.
# This will also have the effect of stopping the motor.
COMMAND_RESET = 'reset'
# Sets the normal polarity of the rotary encoder.
ENCODER_POLARITY_NORMAL = 'normal'
# Sets the inversed polarity of the rotary encoder.
ENCODER_POLARITY_INVERSED = 'inversed'
# With `normal` polarity, a positive duty cycle will
# cause the motor to rotate clockwise.
POLARITY_NORMAL = 'normal'
# With `inversed` polarity, a positive duty cycle will
# cause the motor to rotate counter-clockwise.
POLARITY_INVERSED = 'inversed'
# The motor controller will vary the power supplied to the motor
# to try to maintain the speed specified in `speed_sp`.
SPEED_REGULATION_ON = 'on'
# The motor controller will use the power specified in `duty_cycle_sp`.
SPEED_REGULATION_OFF = 'off'
# Power will be removed from the motor and it will freely coast to a stop.
STOP_COMMAND_COAST = 'coast'
# Power will be removed from the motor and a passive electrical load will
# be placed on the motor. This is usually done by shorting the motor terminals
# together. This load will absorb the energy from the rotation of the motors and
# cause the motor to stop more quickly than coasting.
STOP_COMMAND_BRAKE = 'brake'
# Does not remove power from the motor. Instead it actively try to hold the motor
# at the current position. If an external force tries to turn the motor, the motor
# will ``push back`` to maintain its position.
STOP_COMMAND_HOLD = 'hold'
# ~autogen
# ~autogen motor_commands classes.motor>currentClass
def run_forever(self, **kwargs):
"""Run the motor until another command is sent.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'run-forever'
def run_to_abs_pos(self, **kwargs):
"""Run to an absolute position specified by `position_sp` and then
stop using the command specified in `stop_command`.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'run-to-abs-pos'
def run_to_rel_pos(self, **kwargs):
"""Run to a position relative to the current `position` value.
The new position will be current `position` + `position_sp`.
When the new position is reached, the motor will stop using
the command specified by `stop_command`.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'run-to-rel-pos'
def run_timed(self, **kwargs):
"""Run the motor for the amount of time specified in `time_sp`
and then stop the motor using the command specified by `stop_command`.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'run-timed'
def run_direct(self, **kwargs):
"""Run the motor at the duty cycle specified by `duty_cycle_sp`.
Unlike other run commands, changing `duty_cycle_sp` while running *will*
take effect immediately.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'run-direct'
def stop(self, **kwargs):
"""Stop any of the run commands before they are complete using the
command specified by `stop_command`.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'stop'
def reset(self, **kwargs):
"""Reset all of the motor parameter attributes to their default value.
This will also have the effect of stopping the motor.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'reset'
# ~autogen
# ~autogen generic-class classes.largeMotor>currentClass
class LargeMotor(Motor):
"""
EV3 large servo motor
"""
SYSTEM_CLASS_NAME = Motor.SYSTEM_CLASS_NAME
SYSTEM_DEVICE_NAME_CONVENTION = Motor.SYSTEM_DEVICE_NAME_CONVENTION
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, driver_name=['lego-ev3-l-motor'], **kwargs)
# ~autogen
# ~autogen generic-class classes.mediumMotor>currentClass
class MediumMotor(Motor):
"""
EV3 medium servo motor
"""
SYSTEM_CLASS_NAME = Motor.SYSTEM_CLASS_NAME
SYSTEM_DEVICE_NAME_CONVENTION = Motor.SYSTEM_DEVICE_NAME_CONVENTION
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, driver_name=['lego-ev3-m-motor'], **kwargs)
# ~autogen
# ~autogen generic-class classes.dcMotor>currentClass
class DcMotor(Device):
"""
The DC motor class provides a uniform interface for using regular DC motors
with no fancy controls or feedback. This includes LEGO MINDSTORMS RCX motors
and LEGO Power Functions motors.
"""
SYSTEM_CLASS_NAME = 'dc-motor'
SYSTEM_DEVICE_NAME_CONVENTION = 'motor*'
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, **kwargs)
# ~autogen
# ~autogen generic-get-set classes.dcMotor>currentClass
@property
def command(self):
"""
Sets the command for the motor. Possible values are `run-forever`, `run-timed` and
`stop`. Not all commands may be supported, so be sure to check the contents
of the `commands` attribute.
"""
raise Exception("command is a write-only property!")
@command.setter
def command(self, value):
self.set_attr_string('command', value)
@property
def commands(self):
"""
Returns a list of commands supported by the motor
controller.
"""
return self.get_attr_set('commands')
@property
def driver_name(self):
"""
Returns the name of the motor driver that loaded this device. See the list
of [supported devices] for a list of drivers.
"""
return self.get_attr_string('driver_name')
@property
def duty_cycle(self):
"""
Shows the current duty cycle of the PWM signal sent to the motor. Values
are -100 to 100 (-100% to 100%).
"""
return self.get_attr_int('duty_cycle')
@property
def duty_cycle_sp(self):
"""
Writing sets the duty cycle setpoint of the PWM signal sent to the motor.
Valid values are -100 to 100 (-100% to 100%). Reading returns the current
setpoint.
"""
return self.get_attr_int('duty_cycle_sp')
@duty_cycle_sp.setter
def duty_cycle_sp(self, value):
self.set_attr_int('duty_cycle_sp', value)
@property
def polarity(self):
"""
Sets the polarity of the motor. Valid values are `normal` and `inversed`.
"""
return self.get_attr_string('polarity')
@polarity.setter
def polarity(self, value):
self.set_attr_string('polarity', value)
@property
def port_name(self):
"""
Returns the name of the port that the motor is connected to.
"""
return self.get_attr_string('port_name')
@property
def ramp_down_sp(self):
"""
Sets the time in milliseconds that it take the motor to ramp down from 100%
to 0%. Valid values are 0 to 10000 (10 seconds). Default is 0.
"""
return self.get_attr_int('ramp_down_sp')
@ramp_down_sp.setter
def ramp_down_sp(self, value):
self.set_attr_int('ramp_down_sp', value)
@property
def ramp_up_sp(self):
"""
Sets the time in milliseconds that it take the motor to up ramp from 0% to
100%. Valid values are 0 to 10000 (10 seconds). Default is 0.
"""
return self.get_attr_int('ramp_up_sp')
@ramp_up_sp.setter
def ramp_up_sp(self, value):
self.set_attr_int('ramp_up_sp', value)
@property
def state(self):
"""
Gets a list of flags indicating the motor status. Possible
flags are `running` and `ramping`. `running` indicates that the motor is
powered. `ramping` indicates that the motor has not yet reached the
`duty_cycle_sp`.
"""
return self.get_attr_set('state')
@property
def stop_command(self):
"""
Sets the stop command that will be used when the motor stops. Read
`stop_commands` to get the list of valid values.
"""
raise Exception("stop_command is a write-only property!")
@stop_command.setter
def stop_command(self, value):
self.set_attr_string('stop_command', value)
@property
def stop_commands(self):
"""
Gets a list of stop commands. Valid values are `coast`
and `brake`.
"""
return self.get_attr_set('stop_commands')
@property
def time_sp(self):
"""
Writing specifies the amount of time the motor will run when using the
`run-timed` command. Reading returns the current value. Units are in
milliseconds.
"""
return self.get_attr_int('time_sp')
@time_sp.setter
def time_sp(self, value):
self.set_attr_int('time_sp', value)
# ~autogen
# ~autogen generic-property-value classes.dcMotor>currentClass
# Run the motor until another command is sent.
COMMAND_RUN_FOREVER = 'run-forever'
# Run the motor for the amount of time specified in `time_sp`
# and then stop the motor using the command specified by `stop_command`.
COMMAND_RUN_TIMED = 'run-timed'
# Run the motor at the duty cycle specified by `duty_cycle_sp`.
# Unlike other run commands, changing `duty_cycle_sp` while running *will*
# take effect immediately.
COMMAND_RUN_DIRECT = 'run-direct'
# Stop any of the run commands before they are complete using the
# command specified by `stop_command`.
COMMAND_STOP = 'stop'
# With `normal` polarity, a positive duty cycle will
# cause the motor to rotate clockwise.
POLARITY_NORMAL = 'normal'
# With `inversed` polarity, a positive duty cycle will
# cause the motor to rotate counter-clockwise.
POLARITY_INVERSED = 'inversed'
# Power will be removed from the motor and it will freely coast to a stop.
STOP_COMMAND_COAST = 'coast'
# Power will be removed from the motor and a passive electrical load will
# be placed on the motor. This is usually done by shorting the motor terminals
# together. This load will absorb the energy from the rotation of the motors and
# cause the motor to stop more quickly than coasting.
STOP_COMMAND_BRAKE = 'brake'
# ~autogen
# ~autogen motor_commands classes.dcMotor>currentClass
def run_forever(self, **kwargs):
"""Run the motor until another command is sent.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'run-forever'
def run_timed(self, **kwargs):
"""Run the motor for the amount of time specified in `time_sp`
and then stop the motor using the command specified by `stop_command`.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'run-timed'
def run_direct(self, **kwargs):
"""Run the motor at the duty cycle specified by `duty_cycle_sp`.
Unlike other run commands, changing `duty_cycle_sp` while running *will*
take effect immediately.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'run-direct'
def stop(self, **kwargs):
"""Stop any of the run commands before they are complete using the
command specified by `stop_command`.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'stop'
# ~autogen
# ~autogen generic-class classes.servoMotor>currentClass
class ServoMotor(Device):
"""
The servo motor class provides a uniform interface for using hobby type
servo motors.
"""
SYSTEM_CLASS_NAME = 'servo-motor'
SYSTEM_DEVICE_NAME_CONVENTION = 'motor*'
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, **kwargs)
# ~autogen
# ~autogen generic-get-set classes.servoMotor>currentClass
@property
def command(self):
"""
Sets the command for the servo. Valid values are `run` and `float`. Setting
to `run` will cause the servo to be driven to the position_sp set in the
`position_sp` attribute. Setting to `float` will remove power from the motor.
"""
raise Exception("command is a write-only property!")
@command.setter
def command(self, value):
self.set_attr_string('command', value)
@property
def driver_name(self):
"""
Returns the name of the motor driver that loaded this device. See the list
of [supported devices] for a list of drivers.
"""
return self.get_attr_string('driver_name')
@property
def max_pulse_sp(self):
"""
Used to set the pulse size in milliseconds for the signal that tells the
servo to drive to the maximum (clockwise) position_sp. Default value is 2400.
Valid values are 2300 to 2700. You must write to the position_sp attribute for
changes to this attribute to take effect.
"""
return self.get_attr_int('max_pulse_sp')
@max_pulse_sp.setter
def max_pulse_sp(self, value):
self.set_attr_int('max_pulse_sp', value)
@property
def mid_pulse_sp(self):
"""
Used to set the pulse size in milliseconds for the signal that tells the
servo to drive to the mid position_sp. Default value is 1500. Valid
values are 1300 to 1700. For example, on a 180 degree servo, this would be
90 degrees. On continuous rotation servo, this is the 'neutral' position_sp
where the motor does not turn. You must write to the position_sp attribute for
changes to this attribute to take effect.
"""
return self.get_attr_int('mid_pulse_sp')
@mid_pulse_sp.setter
def mid_pulse_sp(self, value):
self.set_attr_int('mid_pulse_sp', value)
@property
def min_pulse_sp(self):
"""
Used to set the pulse size in milliseconds for the signal that tells the
servo to drive to the miniumum (counter-clockwise) position_sp. Default value
is 600. Valid values are 300 to 700. You must write to the position_sp
attribute for changes to this attribute to take effect.
"""
return self.get_attr_int('min_pulse_sp')
@min_pulse_sp.setter
def min_pulse_sp(self, value):
self.set_attr_int('min_pulse_sp', value)
@property
def polarity(self):
"""
Sets the polarity of the servo. Valid values are `normal` and `inversed`.
Setting the value to `inversed` will cause the position_sp value to be
inversed. i.e `-100` will correspond to `max_pulse_sp`, and `100` will
correspond to `min_pulse_sp`.
"""
return self.get_attr_string('polarity')
@polarity.setter
def polarity(self, value):
self.set_attr_string('polarity', value)
@property
def port_name(self):
"""
Returns the name of the port that the motor is connected to.
"""
return self.get_attr_string('port_name')
@property
def position_sp(self):
"""
Reading returns the current position_sp of the servo. Writing instructs the
servo to move to the specified position_sp. Units are percent. Valid values
are -100 to 100 (-100% to 100%) where `-100` corresponds to `min_pulse_sp`,
`0` corresponds to `mid_pulse_sp` and `100` corresponds to `max_pulse_sp`.
"""
return self.get_attr_int('position_sp')
@position_sp.setter
def position_sp(self, value):
self.set_attr_int('position_sp', value)
@property
def rate_sp(self):
"""
Sets the rate_sp at which the servo travels from 0 to 100.0% (half of the full
range of the servo). Units are in milliseconds. Example: Setting the rate_sp
to 1000 means that it will take a 180 degree servo 2 second to move from 0
to 180 degrees. Note: Some servo controllers may not support this in which
case reading and writing will fail with `-EOPNOTSUPP`. In continuous rotation
servos, this value will affect the rate_sp at which the speed ramps up or down.
"""
return self.get_attr_int('rate_sp')
@rate_sp.setter
def rate_sp(self, value):
self.set_attr_int('rate_sp', value)
@property
def state(self):
"""
Returns a list of flags indicating the state of the servo.
Possible values are:
* `running`: Indicates that the motor is powered.
"""
return self.get_attr_set('state')
# ~autogen
# ~autogen generic-property-value classes.servoMotor>currentClass
# Drive servo to the position set in the `position_sp` attribute.
COMMAND_RUN = 'run'
# Remove power from the motor.
COMMAND_FLOAT = 'float'
# With `normal` polarity, a positive duty cycle will
# cause the motor to rotate clockwise.
POLARITY_NORMAL = 'normal'
# With `inversed` polarity, a positive duty cycle will
# cause the motor to rotate counter-clockwise.
POLARITY_INVERSED = 'inversed'
# ~autogen
# ~autogen motor_commands classes.servoMotor>currentClass
def run(self, **kwargs):
"""Drive servo to the position set in the `position_sp` attribute.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'run'
def float(self, **kwargs):
"""Remove power from the motor.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = 'float'
# ~autogen
# ~autogen generic-class classes.sensor>currentClass
class Sensor(Device):
"""
The sensor class provides a uniform interface for using most of the
sensors available for the EV3. The various underlying device drivers will
create a `lego-sensor` device for interacting with the sensors.
Sensors are primarily controlled by setting the `mode` and monitored by
reading the `value<N>` attributes. Values can be converted to floating point
if needed by `value<N>` / 10.0 ^ `decimals`.
Since the name of the `sensor<N>` device node does not correspond to the port
that a sensor is plugged in to, you must look at the `port_name` attribute if
you need to know which port a sensor is plugged in to. However, if you don't
have more than one sensor of each type, you can just look for a matching
`driver_name`. Then it will not matter which port a sensor is plugged in to - your
program will still work.
"""
SYSTEM_CLASS_NAME = 'lego-sensor'
SYSTEM_DEVICE_NAME_CONVENTION = 'sensor*'
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, **kwargs)
# ~autogen
# ~autogen generic-get-set classes.sensor>currentClass
@property
def command(self):
"""
Sends a command to the sensor.
"""
raise Exception("command is a write-only property!")
@command.setter
def command(self, value):
self.set_attr_string('command', value)
@property
def commands(self):
"""
Returns a list of the valid commands for the sensor.
Returns -EOPNOTSUPP if no commands are supported.
"""
return self.get_attr_set('commands')
@property
def decimals(self):
"""
Returns the number of decimal places for the values in the `value<N>`
attributes of the current mode.
"""
return self.get_attr_int('decimals')
@property
def driver_name(self):
"""
Returns the name of the sensor device/driver. See the list of [supported
sensors] for a complete list of drivers.
"""
return self.get_attr_string('driver_name')
@property
def mode(self):
"""
Returns the current mode. Writing one of the values returned by `modes`
sets the sensor to that mode.
"""
return self.get_attr_string('mode')
@mode.setter
def mode(self, value):
self.set_attr_string('mode', value)
@property
def modes(self):
"""
Returns a list of the valid modes for the sensor.
"""
return self.get_attr_set('modes')
@property
def num_values(self):
"""
Returns the number of `value<N>` attributes that will return a valid value
for the current mode.
"""
return self.get_attr_int('num_values')
@property
def port_name(self):
"""
Returns the name of the port that the sensor is connected to, e.g. `ev3:in1`.
I2C sensors also include the I2C address (decimal), e.g. `ev3:in1:i2c8`.
"""
return self.get_attr_string('port_name')
@property
def units(self):
"""
Returns the units of the measured value for the current mode. May return
empty string
"""
return self.get_attr_string('units')
# ~autogen
def value(self, n=0):
if isinstance(n, numbers.Integral):
n = '{0:d}'.format(n)
elif isinstance(n, numbers.Real):
n = '{0:.0f}'.format(n)
if isinstance(n, str):
return self.get_attr_int('value'+n)
else:
return 0
@property
def bin_data_format(self):
"""
Returns the format of the values in `bin_data` for the current mode.
Possible values are:
- `u8`: Unsigned 8-bit integer (byte)
- `s8`: Signed 8-bit integer (sbyte)
- `u16`: Unsigned 16-bit integer (ushort)
- `s16`: Signed 16-bit integer (short)
- `s16_be`: Signed 16-bit integer, big endian
- `s32`: Signed 32-bit integer (int)
- `float`: IEEE 754 32-bit floating point (float)
"""
return self.get_attr_string('bin_data_format')
def bin_data(self, fmt=None):
"""
Returns the unscaled raw values in the `value<N>` attributes as raw byte
array. Use `bin_data_format`, `num_values` and the individual sensor
documentation to determine how to interpret the data.
Use `fmt` to unpack the raw bytes into a struct.
Example::
>>> from ev3dev import *
>>> ir = InfraredSensor()
>>> ir.value()
28
>>> ir.bin_data('<b')
(28,)
"""
if '_bin_data_size' not in self.__dict__:
self._bin_data_size = {
"u8": 1,
"s8": 1,
"u16": 2,
"s16": 2,
"s16_be": 2,
"s32": 4,
"float": 4
}.get(self.bin_data_format, 1) * self.num_values
f = self._attribute_cache.file_handle(abspath(self._path + '/bin_data'), binary=True)
f.seek(0)
raw = bytearray(f.read(self._bin_data_size))
if fmt is None: return raw
return unpack(fmt, raw)
# ~autogen generic-class classes.i2cSensor>currentClass
class I2cSensor(Sensor):
"""
A generic interface to control I2C-type EV3 sensors.
"""
SYSTEM_CLASS_NAME = Sensor.SYSTEM_CLASS_NAME
SYSTEM_DEVICE_NAME_CONVENTION = Sensor.SYSTEM_DEVICE_NAME_CONVENTION
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, driver_name=['nxt-i2c-sensor'], **kwargs)
# ~autogen
# ~autogen generic-get-set classes.i2cSensor>currentClass
@property
def fw_version(self):
"""
Returns the firmware version of the sensor if available. Currently only
I2C/NXT sensors support this.
"""
return self.get_attr_string('fw_version')
@property
def poll_ms(self):
"""
Returns the polling period of the sensor in milliseconds. Writing sets the
polling period. Setting to 0 disables polling. Minimum value is hard
coded as 50 msec. Returns -EOPNOTSUPP if changing polling is not supported.
Currently only I2C/NXT sensors support changing the polling period.
"""
return self.get_attr_int('poll_ms')
@poll_ms.setter
def poll_ms(self, value):
self.set_attr_int('poll_ms', value)
# ~autogen
# ~autogen generic-class classes.colorSensor>currentClass
class ColorSensor(Sensor):
"""
LEGO EV3 color sensor.
"""
SYSTEM_CLASS_NAME = Sensor.SYSTEM_CLASS_NAME
SYSTEM_DEVICE_NAME_CONVENTION = Sensor.SYSTEM_DEVICE_NAME_CONVENTION
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, driver_name=['lego-ev3-color'], **kwargs)
# ~autogen
# ~autogen generic-property-value classes.colorSensor>currentClass
# Reflected light. Red LED on.
MODE_COL_REFLECT = 'COL-REFLECT'
# Ambient light. Red LEDs off.
MODE_COL_AMBIENT = 'COL-AMBIENT'
# Color. All LEDs rapidly cycling, appears white.
MODE_COL_COLOR = 'COL-COLOR'
# Raw reflected. Red LED on
MODE_REF_RAW = 'REF-RAW'
# Raw Color Components. All LEDs rapidly cycling, appears white.
MODE_RGB_RAW = 'RGB-RAW'
# ~autogen
# ~autogen generic-class classes.ultrasonicSensor>currentClass
class UltrasonicSensor(Sensor):
"""
LEGO EV3 ultrasonic sensor.
"""
SYSTEM_CLASS_NAME = Sensor.SYSTEM_CLASS_NAME
SYSTEM_DEVICE_NAME_CONVENTION = Sensor.SYSTEM_DEVICE_NAME_CONVENTION
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, driver_name=['lego-ev3-us', 'lego-nxt-us'], **kwargs)
# ~autogen
# ~autogen generic-property-value classes.ultrasonicSensor>currentClass
# Continuous measurement in centimeters.
# LEDs: On, steady
MODE_US_DIST_CM = 'US-DIST-CM'
# Continuous measurement in inches.
# LEDs: On, steady
MODE_US_DIST_IN = 'US-DIST-IN'
# Listen. LEDs: On, blinking
MODE_US_LISTEN = 'US-LISTEN'
# Single measurement in centimeters.
# LEDs: On momentarily when mode is set, then off
MODE_US_SI_CM = 'US-SI-CM'
# Single measurement in inches.
# LEDs: On momentarily when mode is set, then off
MODE_US_SI_IN = 'US-SI-IN'
# ~autogen
# ~autogen generic-class classes.gyroSensor>currentClass
class GyroSensor(Sensor):
"""
LEGO EV3 gyro sensor.
"""
SYSTEM_CLASS_NAME = Sensor.SYSTEM_CLASS_NAME
SYSTEM_DEVICE_NAME_CONVENTION = Sensor.SYSTEM_DEVICE_NAME_CONVENTION
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, driver_name=['lego-ev3-gyro'], **kwargs)
# ~autogen
# ~autogen generic-property-value classes.gyroSensor>currentClass
# Angle
MODE_GYRO_ANG = 'GYRO-ANG'
# Rotational speed
MODE_GYRO_RATE = 'GYRO-RATE'
# Raw sensor value
MODE_GYRO_FAS = 'GYRO-FAS'
# Angle and rotational speed
MODE_GYRO_G_A = 'GYRO-G&A'
# Calibration ???
MODE_GYRO_CAL = 'GYRO-CAL'
# ~autogen
# ~autogen generic-class classes.infraredSensor>currentClass
class InfraredSensor(Sensor):
"""
LEGO EV3 infrared sensor.
"""
SYSTEM_CLASS_NAME = Sensor.SYSTEM_CLASS_NAME
SYSTEM_DEVICE_NAME_CONVENTION = Sensor.SYSTEM_DEVICE_NAME_CONVENTION
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, driver_name=['lego-ev3-ir'], **kwargs)
# ~autogen
# ~autogen generic-property-value classes.infraredSensor>currentClass
# Proximity
MODE_IR_PROX = 'IR-PROX'
# IR Seeker
MODE_IR_SEEK = 'IR-SEEK'
# IR Remote Control
MODE_IR_REMOTE = 'IR-REMOTE'
# IR Remote Control. State of the buttons is coded in binary
MODE_IR_REM_A = 'IR-REM-A'
# Calibration ???
MODE_IR_CAL = 'IR-CAL'
# ~autogen
# ~autogen generic-class classes.soundSensor>currentClass
class SoundSensor(Sensor):
"""
LEGO NXT Sound Sensor
"""
SYSTEM_CLASS_NAME = Sensor.SYSTEM_CLASS_NAME
SYSTEM_DEVICE_NAME_CONVENTION = Sensor.SYSTEM_DEVICE_NAME_CONVENTION
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, driver_name=['lego-nxt-sound'], **kwargs)
# ~autogen
# ~autogen generic-property-value classes.soundSensor>currentClass
# Sound pressure level. Flat weighting
MODE_DB = 'DB'
# Sound pressure level. A weighting
MODE_DBA = 'DBA'
# ~autogen
# ~autogen generic-class classes.lightSensor>currentClass
class LightSensor(Sensor):
"""
LEGO NXT Light Sensor
"""
SYSTEM_CLASS_NAME = Sensor.SYSTEM_CLASS_NAME
SYSTEM_DEVICE_NAME_CONVENTION = Sensor.SYSTEM_DEVICE_NAME_CONVENTION
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, driver_name=['lego-nxt-light'], **kwargs)
# ~autogen
# ~autogen generic-property-value classes.lightSensor>currentClass
# Reflected light. LED on
MODE_REFLECT = 'REFLECT'
# Ambient light. LED off
MODE_AMBIENT = 'AMBIENT'
# ~autogen
# ~autogen generic-class classes.touchSensor>currentClass
class TouchSensor(Sensor):
"""
Touch Sensor
"""
SYSTEM_CLASS_NAME = Sensor.SYSTEM_CLASS_NAME
SYSTEM_DEVICE_NAME_CONVENTION = Sensor.SYSTEM_DEVICE_NAME_CONVENTION
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, driver_name=['lego-ev3-touch', 'lego-nxt-touch'], **kwargs)
# ~autogen
# ~autogen generic-class classes.led>currentClass
class Led(Device):
"""
Any device controlled by the generic LED driver.
See https://www.kernel.org/doc/Documentation/leds/leds-class.txt
for more details.
"""
SYSTEM_CLASS_NAME = 'leds'
SYSTEM_DEVICE_NAME_CONVENTION = '*'
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, **kwargs)
# ~autogen
# ~autogen generic-get-set classes.led>currentClass
@property
def max_brightness(self):
"""
Returns the maximum allowable brightness value.
"""
return self.get_attr_int('max_brightness')
@property
def brightness(self):
"""
Sets the brightness level. Possible values are from 0 to `max_brightness`.
"""
return self.get_attr_int('brightness')
@brightness.setter
def brightness(self, value):
self.set_attr_int('brightness', value)
@property
def triggers(self):
"""
Returns a list of available triggers.
"""
return self.get_attr_set('trigger')
@property
def trigger(self):
"""
Sets the led trigger. A trigger
is a kernel based source of led events. Triggers can either be simple or
complex. A simple trigger isn't configurable and is designed to slot into
existing subsystems with minimal additional code. Examples are the `ide-disk` and
`nand-disk` triggers.
Complex triggers whilst available to all LEDs have LED specific
parameters and work on a per LED basis. The `timer` trigger is an example.
The `timer` trigger will periodically change the LED brightness between
0 and the current brightness setting. The `on` and `off` time can
be specified via `delay_{on,off}` attributes in milliseconds.
You can change the brightness value of a LED independently of the timer
trigger. However, if you set the brightness value to 0 it will
also disable the `timer` trigger.
"""
return self.get_attr_from_set('trigger')
@trigger.setter
def trigger(self, value):
self.set_attr_string('trigger', value)
@property
def delay_on(self):
"""
The `timer` trigger will periodically change the LED brightness between
0 and the current brightness setting. The `on` time can
be specified via `delay_on` attribute in milliseconds.
"""
return self.get_attr_int('delay_on')
@delay_on.setter
def delay_on(self, value):
self.set_attr_int('delay_on', value)
@property
def delay_off(self):
"""
The `timer` trigger will periodically change the LED brightness between
0 and the current brightness setting. The `off` time can
be specified via `delay_off` attribute in milliseconds.
"""
return self.get_attr_int('delay_off')
@delay_off.setter
def delay_off(self, value):
self.set_attr_int('delay_off', value)
# ~autogen
@property
def brightness_pct(self):
"""
Returns led brightness as a fraction of max_brightness
"""
return float(self.brightness) / self.max_brightness
@brightness_pct.setter
def brightness_pct(self, value):
self.brightness = value * self.max_brightness
class ButtonBase(object):
"""
Abstract button interface.
"""
@staticmethod
def on_change(changed_buttons):
"""
This handler is called by `process()` whenever state of any button has
changed since last `process()` call. `changed_buttons` is a list of
tuples of changed button names and their states.
"""
pass
_state = set([])
def any(self):
"""
Checks if any button is pressed.
"""
return bool(self.buttons_pressed)
def check_buttons(self, buttons=[]):
"""
Check if currently pressed buttons exactly match the given list.
"""
return set(self.buttons_pressed) == set(buttons)
def process(self):
"""
Check for currenly pressed buttons. If the new state differs from the
old state, call the appropriate button event handlers.
"""
new_state = set(self.buttons_pressed)
old_state = self._state
self._state = new_state
state_diff = new_state.symmetric_difference(old_state)
for button in state_diff:
handler = getattr(self, 'on_' + button)
if handler is not None: handler(button in new_state)
if self.on_change is not None and state_diff:
self.on_change([(button, button in new_state) for button in state_diff])
class ButtonEVIO(ButtonBase):
"""
Provides a generic button reading mechanism that works with event interface
and may be adapted to platform specific implementations.
This implementation depends on the availability of the EVIOCGKEY ioctl
to be able to read the button state buffer. See Linux kernel source
in /include/uapi/linux/input.h for details.
"""
KEY_MAX = 0x2FF
KEY_BUF_LEN = int((KEY_MAX + 7) / 8)
EVIOCGKEY = (2 << (14 + 8 + 8) | KEY_BUF_LEN << (8 + 8) | ord('E') << 8 | 0x18)
_buttons = {}
def __init__(self):
self._file_cache = FileCache()
self._buffer_cache = {}
for b in self._buttons:
self._button_file(self._buttons[b]['name'])
self._button_buffer(self._buttons[b]['name'])
def _button_file(self, name):
return self._file_cache.file_handle(name)
def _button_buffer(self, name):
if name not in self._buffer_cache:
self._buffer_cache[name] = array.array('B', [0] * self.KEY_BUF_LEN)
return self._buffer_cache[name]
@property
def buttons_pressed(self):
"""
Returns list of names of pressed buttons.
"""
for b in self._buffer_cache:
fcntl.ioctl(self._button_file(b), self.EVIOCGKEY, self._buffer_cache[b])
pressed = []
for k, v in self._buttons.items():
buf = self._buffer_cache[v['name']]
bit = v['value']
if not bool(buf[int(bit / 8)] & 1 << bit % 8):
pressed += [k]
return pressed
# ~autogen remote-control classes.infraredSensor.remoteControl>currentClass
class RemoteControl(ButtonBase):
"""
EV3 Remote Controller
"""
_BUTTON_VALUES = {
0: [],
1: ['red_up'],
2: ['red_down'],
3: ['blue_up'],
4: ['blue_down'],
5: ['red_up', 'blue_up'],
6: ['red_up', 'blue_down'],
7: ['red_down', 'blue_up'],
8: ['red_down', 'blue_down'],
9: ['beacon'],
10: ['red_up', 'red_down'],
11: ['blue_up', 'blue_down']
}
on_red_up = None
on_red_down = None
on_blue_up = None
on_blue_down = None
on_beacon = None
@property
def red_up(self):
"""
Checks if `red_up` button is pressed.
"""
return 'red_up' in self.buttons_pressed
@property
def red_down(self):
"""
Checks if `red_down` button is pressed.
"""
return 'red_down' in self.buttons_pressed
@property
def blue_up(self):
"""
Checks if `blue_up` button is pressed.
"""
return 'blue_up' in self.buttons_pressed
@property
def blue_down(self):
"""
Checks if `blue_down` button is pressed.
"""
return 'blue_down' in self.buttons_pressed
@property
def beacon(self):
"""
Checks if `beacon` button is pressed.
"""
return 'beacon' in self.buttons_pressed
# ~autogen
def __init__(self, sensor=None, channel=1):
if sensor is None:
self._sensor = InfraredSensor()
else:
self._sensor = sensor
self._channel = max(1, min(4, channel)) - 1
self._state = set([])
if self._sensor.connected:
self._sensor.mode = 'IR-REMOTE'
@property
def buttons_pressed(self):
"""
Returns list of currently pressed buttons.
"""
return RemoteControl._BUTTON_VALUES.get(self._sensor.value(self._channel), [])
# ~autogen generic-class classes.powerSupply>currentClass
class PowerSupply(Device):
"""
A generic interface to read data from the system's power_supply class.
Uses the built-in legoev3-battery if none is specified.
"""
SYSTEM_CLASS_NAME = 'power_supply'
SYSTEM_DEVICE_NAME_CONVENTION = '*'
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, **kwargs)
# ~autogen
# ~autogen generic-get-set classes.powerSupply>currentClass
@property
def measured_current(self):
"""
The measured current that the battery is supplying (in microamps)
"""
return self.get_attr_int('current_now')
@property
def measured_voltage(self):
"""
The measured voltage that the battery is supplying (in microvolts)
"""
return self.get_attr_int('voltage_now')
@property
def max_voltage(self):
"""
"""
return self.get_attr_int('voltage_max_design')
@property
def min_voltage(self):
"""
"""
return self.get_attr_int('voltage_min_design')
@property
def technology(self):
"""
"""
return self.get_attr_string('technology')
@property
def type(self):
"""
"""
return self.get_attr_string('type')
# ~autogen
@property
def measured_amps(self):
"""
The measured current that the battery is supplying (in amps)
"""
return self.measured_current / 1e6
@property
def measured_volts(self):
"""
The measured voltage that the battery is supplying (in volts)
"""
return self.measured_voltage / 1e6
# ~autogen generic-class classes.legoPort>currentClass
class LegoPort(Device):
"""
The `lego-port` class provides an interface for working with input and
output ports that are compatible with LEGO MINDSTORMS RCX/NXT/EV3, LEGO
WeDo and LEGO Power Functions sensors and motors. Supported devices include
the LEGO MINDSTORMS EV3 Intelligent Brick, the LEGO WeDo USB hub and
various sensor multiplexers from 3rd party manufacturers.
Some types of ports may have multiple modes of operation. For example, the
input ports on the EV3 brick can communicate with sensors using UART, I2C
or analog validate signals - but not all at the same time. Therefore there
are multiple modes available to connect to the different types of sensors.
In most cases, ports are able to automatically detect what type of sensor
or motor is connected. In some cases though, this must be manually specified
using the `mode` and `set_device` attributes. The `mode` attribute affects
how the port communicates with the connected device. For example the input
ports on the EV3 brick can communicate using UART, I2C or analog voltages,
but not all at the same time, so the mode must be set to the one that is
appropriate for the connected sensor. The `set_device` attribute is used to
specify the exact type of sensor that is connected. Note: the mode must be
correctly set before setting the sensor type.
Ports can be found at `/sys/class/lego-port/port<N>` where `<N>` is
incremented each time a new port is registered. Note: The number is not
related to the actual port at all - use the `port_name` attribute to find
a specific port.
"""
SYSTEM_CLASS_NAME = 'lego_port'
SYSTEM_DEVICE_NAME_CONVENTION = '*'
def __init__(self, port=None, name=SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
if port is not None:
kwargs['port_name'] = port
Device.__init__(self, self.SYSTEM_CLASS_NAME, name, **kwargs)
# ~autogen
# ~autogen generic-get-set classes.legoPort>currentClass
@property
def driver_name(self):
"""
Returns the name of the driver that loaded this device. You can find the
complete list of drivers in the [list of port drivers].
"""
return self.get_attr_string('driver_name')
@property
def modes(self):
"""
Returns a list of the available modes of the port.
"""
return self.get_attr_set('modes')
@property
def mode(self):
"""
Reading returns the currently selected mode. Writing sets the mode.
Generally speaking when the mode changes any sensor or motor devices
associated with the port will be removed new ones loaded, however this
this will depend on the individual driver implementing this class.
"""
return self.get_attr_string('mode')
@mode.setter
def mode(self, value):
self.set_attr_string('mode', value)
@property
def port_name(self):
"""
Returns the name of the port. See individual driver documentation for
the name that will be returned.
"""
return self.get_attr_string('port_name')
@property
def set_device(self):
"""
For modes that support it, writing the name of a driver will cause a new
device to be registered for that driver and attached to this port. For
example, since NXT/Analog sensors cannot be auto-detected, you must use
this attribute to load the correct driver. Returns -EOPNOTSUPP if setting a
device is not supported.
"""
raise Exception("set_device is a write-only property!")
@set_device.setter
def set_device(self, value):
self.set_attr_string('set_device', value)
@property
def status(self):
"""
In most cases, reading status will return the same value as `mode`. In
cases where there is an `auto` mode additional values may be returned,
such as `no-device` or `error`. See individual port driver documentation
for the full list of possible values.
"""
return self.get_attr_string('status')
# ~autogen
class FbMem(object):
"""The framebuffer memory object.
Made of:
- the framebuffer file descriptor
- the fix screen info struct
- the var screen info struct
- the mapped memory
"""
# ------------------------------------------------------------------
# The code is adapted from
# https://github.com/LinkCareServices/cairotft/blob/master/cairotft/linuxfb.py
#
# The original code came with the following license:
# ------------------------------------------------------------------
# Copyright (c) 2012 Kurichan
#
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://sam.zoy.org/wtfpl/COPYING for more details.
# ------------------------------------------------------------------
__slots__ = ('fid', 'fix_info', 'var_info', 'mmap')
FBIOGET_VSCREENINFO = 0x4600
FBIOGET_FSCREENINFO = 0x4602
FB_VISUAL_MONO01 = 0
FB_VISUAL_MONO10 = 1
class FixScreenInfo(ctypes.Structure):
"""The fb_fix_screeninfo from fb.h."""
_fields_ = [
('id_name', ctypes.c_char * 16),
('smem_start', ctypes.c_ulong),
('smem_len', ctypes.c_uint32),
('type', ctypes.c_uint32),
('type_aux', ctypes.c_uint32),
('visual', ctypes.c_uint32),
('xpanstep', ctypes.c_uint16),
('ypanstep', ctypes.c_uint16),
('ywrapstep', ctypes.c_uint16),
('line_length', ctypes.c_uint32),
('mmio_start', ctypes.c_ulong),
('mmio_len', ctypes.c_uint32),
('accel', ctypes.c_uint32),
('reserved', ctypes.c_uint16 * 3),
]
class VarScreenInfo(ctypes.Structure):
class FbBitField(ctypes.Structure):
"""The fb_bitfield struct from fb.h."""
_fields_ = [
('offset', ctypes.c_uint32),
('length', ctypes.c_uint32),
('msb_right', ctypes.c_uint32),
]
"""The fb_var_screeninfo struct from fb.h."""
_fields_ = [
('xres', ctypes.c_uint32),
('yres', ctypes.c_uint32),
('xres_virtual', ctypes.c_uint32),
('yres_virtual', ctypes.c_uint32),
('xoffset', ctypes.c_uint32),
('yoffset', ctypes.c_uint32),
('bits_per_pixel', ctypes.c_uint32),
('grayscale', ctypes.c_uint32),
('red', FbBitField),
('green', FbBitField),
('blue', FbBitField),
('transp', FbBitField),
]
def __init__(self, fbdev=None):
"""Create the FbMem framebuffer memory object."""
fid = FbMem._open_fbdev(fbdev)
fix_info = FbMem._get_fix_info(fid)
fbmmap = FbMem._map_fb_memory(fid, fix_info)
self.fid = fid
self.fix_info = fix_info
self.var_info = FbMem._get_var_info(fid)
self.mmap = fbmmap
def __del__(self):
"""Close the FbMem framebuffer memory object."""
self.mmap.close()
FbMem._close_fbdev(self.fid)
@staticmethod
def _open_fbdev(fbdev=None):
"""Return the framebuffer file descriptor.
Try to use the FRAMEBUFFER
environment variable if fbdev is not given. Use '/dev/fb0' by
default.
"""
dev = fbdev or os.getenv('FRAMEBUFFER', '/dev/fb0')
fbfid = os.open(dev, os.O_RDWR)
return fbfid
@staticmethod
def _close_fbdev(fbfid):
"""Close the framebuffer file descriptor."""
os.close(fbfid)
@staticmethod
def _get_fix_info(fbfid):
"""Return the fix screen info from the framebuffer file descriptor."""
fix_info = FbMem.FixScreenInfo()
fcntl.ioctl(fbfid, FbMem.FBIOGET_FSCREENINFO, fix_info)
return fix_info
@staticmethod
def _get_var_info(fbfid):
"""Return the var screen info from the framebuffer file descriptor."""
var_info = FbMem.VarScreenInfo()
fcntl.ioctl(fbfid, FbMem.FBIOGET_VSCREENINFO, var_info)
return var_info
@staticmethod
def _map_fb_memory(fbfid, fix_info):
"""Map the framebuffer memory."""
return mmap.mmap(
fbfid,
fix_info.smem_len,
mmap.MAP_SHARED,
mmap.PROT_READ | mmap.PROT_WRITE,
offset=0
)
class Screen(FbMem):
"""
A convenience wrapper for the FbMem class.
Provides drawing functions from the python imaging library (PIL).
"""
def __init__(self):
FbMem.__init__(self)
self._img = Image.new(
self.var_info.bits_per_pixel == 1 and "1" or "RGB",
(self.fix_info.line_length * 8 / self.var_info.bits_per_pixel, self.yres),
"white")
self._draw = ImageDraw.Draw(self._img)
@property
def xres(self):
"""
Horizontal screen resolution
"""
return self.var_info.xres
@property
def yres(self):
"""
Vertical screen resolution
"""
return self.var_info.yres
@property
def shape(self):
"""
Dimensions of the screen.
"""
return (self.xres, self.yres)
@property
def draw(self):
"""
Returns a handle to PIL.ImageDraw.Draw class associated with the screen.
Example::
screen.draw.rectangle((10,10,60,20), fill='black')
"""
return self._draw
def clear(self):
"""
Clears the screen
"""
self._draw.rectangle(((0, 0), self.shape), fill="white")
def _color565(self, r, g, b):
"""Convert red, green, blue components to a 16-bit 565 RGB value. Components
should be values 0 to 255.
"""
return (((r & 0xF8) << 8) | ((g & 0xFC) << 3) | (b >> 3))
def _img_to_rgb565_bytes(self):
pixels = [self._color565(r, g, b) for (r, g, b) in self._img.getdata()]
return pack('H' * len(pixels), *pixels)
def update(self):
"""
Applies pending changes to the screen.
Nothing will be drawn on the screen until this function is called.
"""
if self.var_info.bits_per_pixel == 1:
self.mmap[:] = self._img.tobytes("raw", "1;IR")
elif self.var_info.bits_per_pixel == 16:
self.mmap[:] = self._img_to_rgb565_bytes()
else:
raise Exception("Not supported")
class Sound:
"""
Sound-related functions. The class has only static methods and is not
intended for instantiation. It can beep, play wav files, or convert text to
speech.
Note that all methods of the class spawn system processes and return
subprocess.Popen objects. The methods are asynchronous (they return
immediately after child process was spawned, without waiting for its
completion), but you can call wait() on the returned result.
Examples::
# Play 'bark.wav', return immediately:
Sound.play('bark.wav')
# Introduce yourself, wait for completion:
Sound.speak('Hello, I am Robot').wait()
"""
@staticmethod
def beep(args=''):
"""
Call beep command with the provided arguments (if any).
See `beep man page`_ and google 'linux beep music' for inspiration.
.. _`beep man page`: http://manpages.debian.org/cgi-bin/man.cgi?query=beep
"""
with open(os.devnull, 'w') as n:
return Popen('/usr/bin/beep %s' % args, stdout=n, shell=True)
@staticmethod
def tone(*args):
"""
tone(tone_sequence):
Play tone sequence. The tone_sequence parameter is a list of tuples,
where each tuple contains up to three numbers. The first number is
frequency in Hz, the second is duration in milliseconds, and the third
is delay in milliseconds between this and the next tone in the
sequence.
Here is a cheerful example::
Sound.tone([
(392, 350, 100), (392, 350, 100), (392, 350, 100), (311.1, 250, 100),
(466.2, 25, 100), (392, 350, 100), (311.1, 250, 100), (466.2, 25, 100),
(392, 700, 100), (587.32, 350, 100), (587.32, 350, 100),
(587.32, 350, 100), (622.26, 250, 100), (466.2, 25, 100),
(369.99, 350, 100), (311.1, 250, 100), (466.2, 25, 100), (392, 700, 100),
(784, 350, 100), (392, 250, 100), (392, 25, 100), (784, 350, 100),
(739.98, 250, 100), (698.46, 25, 100), (659.26, 25, 100),
(622.26, 25, 100), (659.26, 50, 400), (415.3, 25, 200), (554.36, 350, 100),
(523.25, 250, 100), (493.88, 25, 100), (466.16, 25, 100), (440, 25, 100),
(466.16, 50, 400), (311.13, 25, 200), (369.99, 350, 100),
(311.13, 250, 100), (392, 25, 100), (466.16, 350, 100), (392, 250, 100),
(466.16, 25, 100), (587.32, 700, 100), (784, 350, 100), (392, 250, 100),
(392, 25, 100), (784, 350, 100), (739.98, 250, 100), (698.46, 25, 100),
(659.26, 25, 100), (622.26, 25, 100), (659.26, 50, 400), (415.3, 25, 200),
(554.36, 350, 100), (523.25, 250, 100), (493.88, 25, 100),
(466.16, 25, 100), (440, 25, 100), (466.16, 50, 400), (311.13, 25, 200),
(392, 350, 100), (311.13, 250, 100), (466.16, 25, 100),
(392.00, 300, 150), (311.13, 250, 100), (466.16, 25, 100), (392, 700)
]).wait()
tone(frequency, duration):
Play single tone of given frequency (Hz) and duration (milliseconds).
"""
def play_tone_sequence(tone_sequence):
def beep_args(frequency=None, duration=None, delay=None):
args = '-n '
if frequency is not None: args += '-f %s ' % frequency
if duration is not None: args += '-l %s ' % duration
if delay is not None: args += '-d %s ' % delay
return args
return Sound.beep(' '.join([beep_args(*t) for t in tone_sequence]))
if len(args) == 1:
return play_tone_sequence(args[0])
elif len(args) == 2:
return play_tone_sequence([(args[0], args[1])])
else:
raise Exception("Unsupported number of parameters in Sound.tone()")
@staticmethod
def play(wav_file):
"""
Play wav file.
"""
with open(os.devnull, 'w') as n:
return Popen('/usr/bin/aplay -q "%s"' % wav_file, stdout=n, shell=True)
@staticmethod
def speak(text):
"""
Speak the given text aloud.
"""
with open(os.devnull, 'w') as n:
return Popen('/usr/bin/espeak -a 200 --stdout "%s" | /usr/bin/aplay -q' % text, stdout=n, shell=True)
| {
"content_hash": "37c8aff24835675a51581f2638b3bdc0",
"timestamp": "",
"source": "github",
"line_count": 2349,
"max_line_length": 119,
"avg_line_length": 32.29714772243508,
"alnum_prop": 0.6075185194948989,
"repo_name": "ddemidov/ev3dev-lang-python-1",
"id": "2ea64aa282650e3b22908d79fe47e44c41edd061",
"size": "77444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ev3dev/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Liquid",
"bytes": "7679"
},
{
"name": "Python",
"bytes": "92062"
}
],
"symlink_target": ""
} |
from PipelineTools.packages import metadata
import maya.cmds as cmds
import os
#print os.getpid()
import pymel.core as pm
class FacialRig(metadata.MetaRig):
'''
Example custom mRig class inheriting from MetaRig
'''
def __init__(self,*args,**kws):
super(FacialRig, self).__init__(*args,**kws)
self.lockState=False
def __bindData__(self):
'''
bind our default attrs to this node
'''
self.addAttr('RigType','FacialRig', l=True)
self.addAttr('CharacterName','')
self.addAttr('BuildBy','{} {}'.format(os.environ.get('COMPUTERNAME'), os.environ.get('USERNAME')), l=True)
self.addAttr('Branch',os.environ.get('USERDOMAIN'), l=True)
self.addAttr('BuildDate',pm.date(), l=True)
#self.addAttr('Models','-'*50, l=True)
self.addAttr('FaceGp','')
self.addAttr('EyeGp','')
self.addAttr('FaceDeformGp','')
self.addAttr('FacialTargetPath','')
#self.addAttr('Bones','-'*100, l=True)
self.addAttr('HeadBone', 'CH_Head')
self.addAttr('FacialBoneGp','')
self.addAttr('EyeBoneGp','')
self.addAttr('BlendshapeCtl','')
self.addAttr('FacialCtl','')
self.addAttr('EyeCtl','')
self.addAttr('TongueCtl','')
self.addAttr('JawCtl','')
#r9Meta.registerMClassInheritanceMapping()
if __name__ == 'main':
try:
networks = pm.ls(type=pm.nt.Network)
networks.extend(pm.selected())
for networknode in networks:
pm.lockNode(networknode, lock=False)
pm.delete(networknode)
except:
pass
myRigmetaGp = pm.nt.Transform(name='facialGP')
myRigmeta = FacialRig(myRigmetaGp.name())
myRigmeta.CharacterName='KG'
#myRigmeta.addRigCtrl(pm.selected()[0],'new')
#pm.select(myRigmeta)
print myRigmeta
#pm.delete(myRigmeta) | {
"content_hash": "ec002642c44d47eb6bb5cc349d2c20eb",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 114,
"avg_line_length": 35.56603773584906,
"alnum_prop": 0.6047745358090185,
"repo_name": "josephkirk/PipelineTools",
"id": "24415333c01333f81dbc3c0b75cf7e5bb276a1f2",
"size": "1885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "etc/testCustomMetaData.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "39475"
},
{
"name": "C++",
"bytes": "1615"
},
{
"name": "JavaScript",
"bytes": "1743"
},
{
"name": "Makefile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "2574046"
}
],
"symlink_target": ""
} |
import unittest
from sections.core import ComplexSection, SimpleSection, Dimensions, cached_property
class ComplexSectionTests(unittest.TestCase):
def setUp(self):
# Create a subclass of ComplexSection which suppresses
# NotImplementedErrors
class BasicImplementation(ComplexSection):
sections = []
densities = []
def update_sections(self):
pass
self.ComplexSection = BasicImplementation
def test_set_dimensions_calls_update_sections(self):
def error_raiser():
raise ValueError
section = self.ComplexSection()
section.update_sections = error_raiser
self.assertRaises(ValueError, section.set_dimensions)
def test_initialization_of_sections(self):
class Section(self.ComplexSection):
sections = [SimpleSection, SimpleSection]
densities = NotImplemented
section = Section()
self.assertTrue(all(isinstance(s, SimpleSection) for s in section.sections))
def test_initialization_of_densities(self):
class Section1(self.ComplexSection):
sections = [SimpleSection, SimpleSection]
densities = NotImplemented
class Section2(self.ComplexSection):
sections = [SimpleSection, SimpleSection]
densities = [2, -3]
class Section3(self.ComplexSection):
sections = [SimpleSection, SimpleSection]
densities = [2, 3, 4]
section1 = Section1()
section2 = Section2()
self.assertEqual(len(section1.densities), len(section1.sections))
self.assertEqual(section1.densities, [1.0, 1.0])
self.assertTrue(all(isinstance(d, float) for d in section1.densities))
self.assertEqual(section2.densities, [2.0, -3.0])
self.assertTrue(all(isinstance(d, float) for d in section2.densities))
# ValueError should be raised if number of densities and sections don't match
self.assertRaises(ValueError, Section3)
def test_density_propagates_to_subsections(self):
class Section(self.ComplexSection):
sections = [SimpleSection, SimpleSection]
densities = [2, -3]
section = Section()
self.assertEqual(section.sections[0].density, 2.0)
self.assertEqual(section.sections[1].density, -3.0)
section.set_density(-2)
self.assertEqual(section.densities, [2.0, -3.0])
self.assertEqual(section.sections[0].density, -4.0)
self.assertEqual(section.sections[1].density, 6.0)
def test_cached_properties(self):
self.assertTrue(isinstance(ComplexSection._cog, cached_property))
self.assertTrue(isinstance(ComplexSection.cog, cached_property))
self.assertTrue(isinstance(ComplexSection.A, cached_property))
self.assertTrue(isinstance(ComplexSection._I0, cached_property))
self.assertTrue(isinstance(ComplexSection._I, cached_property))
self.assertTrue(isinstance(ComplexSection.I0, cached_property))
self.assertTrue(isinstance(ComplexSection.I, cached_property))
def test_reset_cached_properties(self):
# BaseSection.reset_cached_properties should be called when changing
# dimensions, density or position
# It should be also checked that cached properties are deleted by
# reset_cached_properties. This is checked only for subclasses
def error_raiser():
raise ValueError
section = self.ComplexSection()
section.reset_cached_properties = error_raiser
self.assertRaises(ValueError, section.set_dimensions)
self.assertRaises(ValueError, section.set_density, 2)
self.assertRaises(ValueError, section.set_position, 1, 2, 3)
| {
"content_hash": "802a7e1f48344a01399b3c80b970d5a2",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 85,
"avg_line_length": 37.075471698113205,
"alnum_prop": 0.6455470737913486,
"repo_name": "iamlikeme/sections",
"id": "49767c849032d9277d024ecefc82a44d5421cfda",
"size": "3930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_core_ComplexSection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65223"
}
],
"symlink_target": ""
} |
"""Mappie: A Python package for playing with webmap images
Mappie makes it easy to download map tiles from various webmap sources and use
them in visualizations with any type of geospatial data. It is designed with
'matplotlib' and 'cartopy' in mind, but should work nicely with most geospatial
plotting libraries in Python (e.g., Basemap).
Dependencies
----------
On it's own, mappie doesn't require anything beyond basic Python packages and
'PIL' or 'pillow' (http://python-imaging.github.io/Pillow/):
Pillow is the "friendly" PIL fork by Alex Clark and Contributors.
PIL is the Python Imaging Library by Fredrik Lundh and Contributors.
To do anything useful with mappie though, you'll want a few additional packages:
For plotting and visualization of the downloaded map tiles, you'll want
to use 'matplotlib' - the de-facto 2-D plotting library for Python
(http://matplotlib.org/)
For functions and tools specific to drawing maps and visualizing geospatial
data check out 'cartopy' (http://scitools.org.uk/cartopy/) or 'Basemap'
(http://matplotlib.org/basemap/).
The following plotting example is based on 'cartopy', but you can do
similar things with 'Basemap' based on the examples here:
http://matplotlib.org/basemap/users/examples.html
Example
----------
>>> import mappie.sources as sources
>>> from mappie.geocoder import Geocoder
>>> google = sources.GoogleManager()
Using /tmp to cache maptiles.
>>> geocoder = Geocoder()
>>> bbox = geocoder.geocode('Hunter College, New York, NY, USA', output='bbox')
>>> webmap, newbox = google.create_map(bbox, zoom=7)
>>> newbox
(39.694555483164955, 41.824595150921695, -75.37065401673316, -72.55815401673317)
>>> webmap.show()
Plotting
----------
>>> from cartopy import config
>>> import cartopy.crs as ccrs
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 12))
>>> ax = plt.axes(projection=ccrs.PlateCarree())
>>> ext = newbox[2:4]+newbox[0:2] # re-arrange bounding box for 'imshow'
>>> ax.imshow(webmap, origin='upper', extent=ext, transform=ccrs.PlateCarree())
>>> ax.coastlines(resolution='50m', color='black', linewidth=2)
>>> plt.show() # or plt.savefig('google-map.png')
"""
import mappie.sources as sources
from mappie.geocoder import Geocoder as Geocoder
| {
"content_hash": "b2103a118fd5928276e84619421df831",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 80,
"avg_line_length": 44.07692307692308,
"alnum_prop": 0.7255671902268761,
"repo_name": "carsonfarmer/mappie",
"id": "5600f02a52b990b20bd98b5c5bcbd32a0c6a5293",
"size": "2339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37934"
}
],
"symlink_target": ""
} |
import arrow
from oandakey import access_token, accountID
from OnePy.builtin_module.backtest_forex.forex_recorder import ForexRecorder
from OnePy.custom_module.api.oanda_api import OandaAPI
from OnePy.custom_module.oanda_bar import OandaBar
from OnePy.sys_module.models.base_series import (AvgPriceSeries, MoneySeries,
PositionSeries)
class OandaRecorder(ForexRecorder):
def __init__(self):
super().__init__()
self.oanda = OandaAPI(accountID, access_token)
def initialize(self):
self.settle_match_engine_and_series()
self.holding_pnl = self.series.HoldingPnlSeries(250)
self.realized_pnl = self.series.RealizedPnlSeries(250)
self.commission = self.series.CommissionSeries(250)
self.market_value = self.series.MarketValueSeries(250)
self.margin = self.series.MarginSeries(250) # 无法更新,没有分别提供long,short信息
self.position = PositionSeries(250)
self.avg_price = AvgPriceSeries(250)
self.cash = MoneySeries("cash", self.initial_cash, 250)
self.frozen_cash = MoneySeries("frozen_cash", 0, 250)
self.balance = MoneySeries("balance", self.initial_cash, 250)
account_details = self.oanda.get_AccountDetails()
account = account_details["account"]
positions = account["positions"]
for position in positions:
ticker = position["instrument"]
if ticker in self.env.tickers:
for long_or_short in ["long", "short"]:
info = position[long_or_short]
self.position.change_initial_value(
ticker, abs(float(info["units"])), long_or_short
)
self.holding_pnl.change_initial_value(
ticker, float(info["unrealizedPL"]), long_or_short
)
self.commission.change_initial_value(
ticker, float(position["commission"]), long_or_short
)
if info.get("averagePrice"):
self.avg_price.change_initial_value(
ticker, float(info["averagePrice"]), long_or_short
)
else:
self.avg_price.change_initial_value(ticker, 0, long_or_short)
self.cash.change_initial_value(float(account["marginAvailable"]))
self.balance.change_initial_value(float(account["NAV"]))
self.frozen_cash.change_initial_value(float(account["marginUsed"]))
def set_setting(self, slippage: dict, margin_rate=0.02) -> None:
self.slippage = slippage
self.margin_rate = margin_rate
def run(self):
# self._record_order()
pass
def update(self, order_executed=False):
"""取消了margin,market_value,"""
trading_date = arrow.now().format("YYYY-MM-DD HH:mm:ss")
account_details = self.oanda.get_AccountDetails()
account = account_details["account"]
positions = account["positions"]
for position in positions:
ticker = position["instrument"]
if ticker in self.env.tickers:
for long_or_short in ["long", "short"]:
info = position[long_or_short]
self.position._append_value(
ticker, abs(float(info["units"])), long_or_short
)
self.holding_pnl._append_value(
ticker, float(info["unrealizedPL"]), long_or_short
)
self.commission._append_value(
ticker, float(position["commission"]), long_or_short
)
if info.get("averagePrice"):
self.avg_price.change_initial_value(
ticker, float(info["averagePrice"]), long_or_short
)
else:
self.avg_price.change_initial_value(ticker, 0, long_or_short)
self.cash.append(
{"date": trading_date, "value": float(account["marginAvailable"])}
)
self.balance.append({"date": trading_date, "value": float(account["NAV"])})
self.frozen_cash.append(
{"date": trading_date, "value": float(account["marginUsed"])}
)
self.market_value.update_barly(False)
self.margin.update_barly()
def none(self):
if not order_executed:
positions = self.oanda.get_AccountDetails()["account"]["positions"][0]
true_holing_pnl = (
self.holding_pnl.total_value() - self.commission.total_value()
)
print("=" * 30)
print("balance", self.balance.latest())
print("size", self.position.total_value(), positions["long"]["units"])
print("margin", self.margin.total_value(), positions["marginUsed"])
print("cash", self.cash.latest())
print(
"holding_pnl",
self.holding_pnl.total_value(),
positions["long"]["unrealizedPL"],
)
print("true holding_pnl", true_holing_pnl)
print("commission", self.commission.total_value(), positions["commission"])
print("market_value", self.market_value.total_value())
try:
print(
"avgPrice",
self.avg_price.latest("EUR_USD", "long"),
positions["long"]["averagePrice"],
)
except:
print("no position")
@property
def bar_class(self):
return OandaBar
| {
"content_hash": "4b4f33a6b6087a537d006d52c7e45897",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 87,
"avg_line_length": 38.88513513513514,
"alnum_prop": 0.5456125108601216,
"repo_name": "Chandlercjy/OnePy",
"id": "2d982bf3a7f8379e6a06d656edf2a87cdf415acf",
"size": "5789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OnePy/custom_module/oanda_recorder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "251278"
}
],
"symlink_target": ""
} |
from cgi import escape
def preformat(text):
return escape(str(text))
class TagWrapper(str):
tag = None
attributes = None
@classmethod
def _wrap(cls, text):
if cls.attributes:
attrib = ' ' + ' '.join(['{}="{}"'.format(key, value)
for key, value in cls.attributes.items()])
else:
attrib = ''
return '<{tag}{attrib}>{text}</{tag}>'.format(tag=cls.tag,
attrib=attrib,text=text)
def __new__(cls, text):
return super().__new__(cls, cls._wrap(text))
class Italic(TagWrapper):
tag = 'i'
class Oblique(Italic):
pass
class Bold(TagWrapper):
tag = 'b'
class Light(TagWrapper):
tag = 'l'
class Underline(TagWrapper):
tag = 'u'
class Superscript(TagWrapper):
tag = 'sup'
class Subscript(TagWrapper):
tag = 'sub'
class SmallCaps(TagWrapper):
tag = 'span'
attributes = {'style': 'font-variant:small-caps;'}
class Bibliography(str):
bib_prefix = '<div class="csl-bib-body">'
bib_suffix = '</div>'
item_prefix = ' <div class="csl-entry">'
item_suffix = '</div>'
def __new__(cls, items):
output = [cls.bib_prefix]
for text in items:
text = cls.item_prefix + str(text) + cls.item_suffix
output.append(text)
output.append(cls.bib_suffix)
return super().__new__(cls, '\n'.join(output))
| {
"content_hash": "cda41adf20810cd540aad623957bd218",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 21.042857142857144,
"alnum_prop": 0.539714867617108,
"repo_name": "jasonzou/MyPapers",
"id": "dbb875f39fa2c9e6c2d36f57325bd1caab009a33",
"size": "1474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/citepro/citeproc/formatter/html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9166"
},
{
"name": "JavaScript",
"bytes": "9154"
},
{
"name": "Perl",
"bytes": "22029"
},
{
"name": "Python",
"bytes": "544774"
},
{
"name": "Ruby",
"bytes": "18948"
},
{
"name": "Shell",
"bytes": "1890"
},
{
"name": "TeX",
"bytes": "229139"
}
],
"symlink_target": ""
} |
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
STRING_CLEANUP_PAT = re_.compile(r"[\n\r\s]+")
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
# First try with no namespace.
value = attrs.get(attr_name)
if value is None:
# Now try the other possible namespaces.
namespaces = node.nsmap.itervalues()
for namespace in namespaces:
value = attrs.get('{%s}%s' % (namespace, attr_name, ))
if value is not None:
break
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class IdentifierType(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('schemeDataURI', 'xsd:anyURI', 0),
MemberSpec_('schemeID', 'xsd:normalizedString', 0),
MemberSpec_('schemeAgencyName', 'xsd:string', 0),
MemberSpec_('schemeAgencyID', 'xsd:normalizedString', 0),
MemberSpec_('schemeName', 'xsd:string', 0),
MemberSpec_('schemeVersionID', 'xsd:normalizedString', 0),
MemberSpec_('schemeURI', 'xsd:anyURI', 0),
MemberSpec_('valueOf_', 'xsd:normalizedString', 0),
]
subclass = None
superclass = None
def __init__(self, schemeDataURI=None, schemeID=None, schemeAgencyName=None, schemeAgencyID=None, schemeName=None, schemeVersionID=None, schemeURI=None, valueOf_=None):
self.schemeDataURI = _cast(None, schemeDataURI)
self.schemeID = _cast(None, schemeID)
self.schemeAgencyName = _cast(None, schemeAgencyName)
self.schemeAgencyID = _cast(None, schemeAgencyID)
self.schemeName = _cast(None, schemeName)
self.schemeVersionID = _cast(None, schemeVersionID)
self.schemeURI = _cast(None, schemeURI)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if IdentifierType.subclass:
return IdentifierType.subclass(*args_, **kwargs_)
else:
return IdentifierType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_schemeDataURI(self): return self.schemeDataURI
def set_schemeDataURI(self, schemeDataURI): self.schemeDataURI = schemeDataURI
def get_schemeID(self): return self.schemeID
def set_schemeID(self, schemeID): self.schemeID = schemeID
def get_schemeAgencyName(self): return self.schemeAgencyName
def set_schemeAgencyName(self, schemeAgencyName): self.schemeAgencyName = schemeAgencyName
def get_schemeAgencyID(self): return self.schemeAgencyID
def set_schemeAgencyID(self, schemeAgencyID): self.schemeAgencyID = schemeAgencyID
def get_schemeName(self): return self.schemeName
def set_schemeName(self, schemeName): self.schemeName = schemeName
def get_schemeVersionID(self): return self.schemeVersionID
def set_schemeVersionID(self, schemeVersionID): self.schemeVersionID = schemeVersionID
def get_schemeURI(self): return self.schemeURI
def set_schemeURI(self, schemeURI): self.schemeURI = schemeURI
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='IdentifierType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='IdentifierType')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='IdentifierType'):
if self.schemeDataURI is not None and 'schemeDataURI' not in already_processed:
already_processed.append('schemeDataURI')
outfile.write(' schemeDataURI=%s' % (self.gds_format_string(quote_attrib(self.schemeDataURI).encode(ExternalEncoding), input_name='schemeDataURI'), ))
if self.schemeID is not None and 'schemeID' not in already_processed:
already_processed.append('schemeID')
outfile.write(' schemeID=%s' % (self.gds_format_string(quote_attrib(self.schemeID).encode(ExternalEncoding), input_name='schemeID'), ))
if self.schemeAgencyName is not None and 'schemeAgencyName' not in already_processed:
already_processed.append('schemeAgencyName')
outfile.write(' schemeAgencyName=%s' % (self.gds_format_string(quote_attrib(self.schemeAgencyName).encode(ExternalEncoding), input_name='schemeAgencyName'), ))
if self.schemeAgencyID is not None and 'schemeAgencyID' not in already_processed:
already_processed.append('schemeAgencyID')
outfile.write(' schemeAgencyID=%s' % (self.gds_format_string(quote_attrib(self.schemeAgencyID).encode(ExternalEncoding), input_name='schemeAgencyID'), ))
if self.schemeName is not None and 'schemeName' not in already_processed:
already_processed.append('schemeName')
outfile.write(' schemeName=%s' % (self.gds_format_string(quote_attrib(self.schemeName).encode(ExternalEncoding), input_name='schemeName'), ))
if self.schemeVersionID is not None and 'schemeVersionID' not in already_processed:
already_processed.append('schemeVersionID')
outfile.write(' schemeVersionID=%s' % (self.gds_format_string(quote_attrib(self.schemeVersionID).encode(ExternalEncoding), input_name='schemeVersionID'), ))
if self.schemeURI is not None and 'schemeURI' not in already_processed:
already_processed.append('schemeURI')
outfile.write(' schemeURI=%s' % (self.gds_format_string(quote_attrib(self.schemeURI).encode(ExternalEncoding), input_name='schemeURI'), ))
def exportChildren(self, outfile, level, namespace_='', name_='IdentifierType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='IdentifierType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.schemeDataURI is not None and 'schemeDataURI' not in already_processed:
already_processed.append('schemeDataURI')
showIndent(outfile, level)
outfile.write('schemeDataURI = "%s",\n' % (self.schemeDataURI,))
if self.schemeID is not None and 'schemeID' not in already_processed:
already_processed.append('schemeID')
showIndent(outfile, level)
outfile.write('schemeID = "%s",\n' % (self.schemeID,))
if self.schemeAgencyName is not None and 'schemeAgencyName' not in already_processed:
already_processed.append('schemeAgencyName')
showIndent(outfile, level)
outfile.write('schemeAgencyName = "%s",\n' % (self.schemeAgencyName,))
if self.schemeAgencyID is not None and 'schemeAgencyID' not in already_processed:
already_processed.append('schemeAgencyID')
showIndent(outfile, level)
outfile.write('schemeAgencyID = "%s",\n' % (self.schemeAgencyID,))
if self.schemeName is not None and 'schemeName' not in already_processed:
already_processed.append('schemeName')
showIndent(outfile, level)
outfile.write('schemeName = "%s",\n' % (self.schemeName,))
if self.schemeVersionID is not None and 'schemeVersionID' not in already_processed:
already_processed.append('schemeVersionID')
showIndent(outfile, level)
outfile.write('schemeVersionID = "%s",\n' % (self.schemeVersionID,))
if self.schemeURI is not None and 'schemeURI' not in already_processed:
already_processed.append('schemeURI')
showIndent(outfile, level)
outfile.write('schemeURI = "%s",\n' % (self.schemeURI,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('schemeDataURI', node)
if value is not None and 'schemeDataURI' not in already_processed:
already_processed.append('schemeDataURI')
self.schemeDataURI = value
value = find_attr_value_('schemeID', node)
if value is not None and 'schemeID' not in already_processed:
already_processed.append('schemeID')
self.schemeID = value
value = find_attr_value_('schemeAgencyName', node)
if value is not None and 'schemeAgencyName' not in already_processed:
already_processed.append('schemeAgencyName')
self.schemeAgencyName = value
value = find_attr_value_('schemeAgencyID', node)
if value is not None and 'schemeAgencyID' not in already_processed:
already_processed.append('schemeAgencyID')
self.schemeAgencyID = value
value = find_attr_value_('schemeName', node)
if value is not None and 'schemeName' not in already_processed:
already_processed.append('schemeName')
self.schemeName = value
value = find_attr_value_('schemeVersionID', node)
if value is not None and 'schemeVersionID' not in already_processed:
already_processed.append('schemeVersionID')
self.schemeVersionID = value
value = find_attr_value_('schemeURI', node)
if value is not None and 'schemeURI' not in already_processed:
already_processed.append('schemeURI')
self.schemeURI = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class IdentifierType
class BillOfResourcesIDType(IdentifierType):
member_data_items_ = [
MemberSpec_('valueOf_', 'IdentifierType', 0),
]
subclass = None
superclass = IdentifierType
def __init__(self, schemeDataURI=None, schemeID=None, schemeAgencyName=None, schemeAgencyID=None, schemeName=None, schemeVersionID=None, schemeURI=None, valueOf_=None):
super(BillOfResourcesIDType, self).__init__(schemeDataURI, schemeID, schemeAgencyName, schemeAgencyID, schemeName, schemeVersionID, schemeURI, valueOf_, )
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if BillOfResourcesIDType.subclass:
return BillOfResourcesIDType.subclass(*args_, **kwargs_)
else:
return BillOfResourcesIDType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='BillOfResourcesIDType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='BillOfResourcesIDType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="BillOfResourcesIDType"')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BillOfResourcesIDType'):
super(BillOfResourcesIDType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='BillOfResourcesIDType')
def exportChildren(self, outfile, level, namespace_='', name_='BillOfResourcesIDType', fromsubclass_=False):
super(BillOfResourcesIDType, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
self.valueOf_ or
super(BillOfResourcesIDType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='BillOfResourcesIDType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(BillOfResourcesIDType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(BillOfResourcesIDType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(BillOfResourcesIDType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(BillOfResourcesIDType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class BillOfResourcesIDType
class BillOfMaterialIDType(IdentifierType):
member_data_items_ = [
MemberSpec_('valueOf_', 'IdentifierType', 0),
]
subclass = None
superclass = IdentifierType
def __init__(self, schemeDataURI=None, schemeID=None, schemeAgencyName=None, schemeAgencyID=None, schemeName=None, schemeVersionID=None, schemeURI=None, valueOf_=None):
super(BillOfMaterialIDType, self).__init__(schemeDataURI, schemeID, schemeAgencyName, schemeAgencyID, schemeName, schemeVersionID, schemeURI, valueOf_, )
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if BillOfMaterialIDType.subclass:
return BillOfMaterialIDType.subclass(*args_, **kwargs_)
else:
return BillOfMaterialIDType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='', name_='BillOfMaterialIDType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
self.exportAttributes(outfile, level, [], namespace_, name_='BillOfMaterialIDType')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="BillOfMaterialIDType"')
if self.hasContent_():
outfile.write('>')
outfile.write(self.valueOf_.encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='BillOfMaterialIDType'):
super(BillOfMaterialIDType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='BillOfMaterialIDType')
def exportChildren(self, outfile, level, namespace_='', name_='BillOfMaterialIDType', fromsubclass_=False):
super(BillOfMaterialIDType, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
self.valueOf_ or
super(BillOfMaterialIDType, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='BillOfMaterialIDType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(BillOfMaterialIDType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(BillOfMaterialIDType, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(BillOfMaterialIDType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(BillOfMaterialIDType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class BillOfMaterialIDType
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'IdentifierType'
rootClass = IdentifierType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_=rootTag,
## namespacedef_='')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'IdentifierType'
rootClass = IdentifierType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_="IdentifierType",
## namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'IdentifierType'
rootClass = IdentifierType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('#from simplecontent_restriction2_sup import *\n\n')
## sys.stdout.write('import simplecontent_restriction2_sup as model_\n\n')
## sys.stdout.write('rootObj = model_.rootTag(\n')
## rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
## sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"BillOfMaterialIDType",
"BillOfResourcesIDType",
"IdentifierType"
]
| {
"content_hash": "8d8c7a867bc42ec65b59b860d1ff0e2b",
"timestamp": "",
"source": "github",
"line_count": 734,
"max_line_length": 172,
"avg_line_length": 43.6158038147139,
"alnum_prop": 0.6260698444430561,
"repo_name": "botondus/generateds",
"id": "a4feb517fc8238d982bfb63046dbe7079eb0f988",
"size": "32098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/simplecontent_restriction1_sup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "321"
},
{
"name": "C++",
"bytes": "665"
},
{
"name": "Python",
"bytes": "1183937"
},
{
"name": "Shell",
"bytes": "452"
}
],
"symlink_target": ""
} |
from ._base import BaseEvent
from . import register_event
from ..schema import types
@register_event('channel_unarchive')
class ChannelUnarchive(BaseEvent):
"""
.. code-block:: json
:caption: Example json response
{
"type": "channel_unarchive",
"channel": "C024BE91L",
"user": "U024BE7LH"
}
For more information see https://api.slack.com/events/channel_unarchive
"""
@property
def schema(self):
return {
'type': types.String,
'channel': types.Channel.Id,
'user': types.User.Id,
}
| {
"content_hash": "4ea61aa028808aaa0f87d138e500d974",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 24.037037037037038,
"alnum_prop": 0.5408320493066255,
"repo_name": "huntcsg/slackly",
"id": "a9f1d8e7c044dd854071b31af55ab1f1b4e841ff",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/slackly/events/channel_unarchive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "362475"
},
{
"name": "Shell",
"bytes": "1497"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name = 'tagm',
version = '0.2-dev',
maintainer = u'Martin Hult\xe9n-Ashauer',
maintainer_email = 'tagm@nimdraug.com',
url = 'http://github.com/Nimdraug/tagm',
license = 'MIT',
description = 'A command and library for managing meta tags for arbitrary files',
py_modules = [
'tagm'
],
entry_points = {
'console_scripts': [
'tagm = tagm:main'
]
}
)
| {
"content_hash": "cbbd9a445394647eea60b2f2dc7c2d53",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 85,
"avg_line_length": 21.545454545454547,
"alnum_prop": 0.5590717299578059,
"repo_name": "Nimdraug/tagm",
"id": "6d66d25ce236d5eecf776211cf23a31f81d7e297",
"size": "497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29702"
}
],
"symlink_target": ""
} |
import json
import logging
import requests
from datetime import datetime
from django.conf import settings
from optparse import make_option
from django.db import transaction, DEFAULT_DB_ALIAS
from django.core.management.base import BaseCommand
from requests.exceptions import SSLError, ConnectionError, RequestException
from vdw.samples.models import Sample, ResultScore
log = logging.getLogger(__name__)
class Command(BaseCommand):
args = '<sample_label sample_label ...>'
option_list = BaseCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the target database to load results.'),
make_option('--force', action='store_true', dest='force',
default=False,
help='Forces recomputation of all gene rankings')
)
def handle(self, *args, **options):
if not getattr(settings, 'PHENOTYPE_ENDPOINT', None):
log.error('PHENOTYPE_ENDPOINT must be defined in settings for '
'gene rankings to be updated.')
return
if not getattr(settings, 'GENE_RANK_BASE_URL', None):
log.error('GENE_RANK_BASE_URL must be defined in settings for '
'gene rankings to be updated.')
return
if (not getattr(settings, 'VARIFY_CERT', None) or
not getattr(settings, 'VARIFY_KEY', None)):
log.error('VARIFY_CERT and VARIFY_KEY must be defined in settings '
'for gene rankings to be updated.')
return
database = options.get('database')
force = options.get('force')
# Construct the cert from the setting to use in requests to the
# phenotype endpoint.
cert = (settings.VARIFY_CERT, settings.VARIFY_KEY)
# We ignore all the samples that aren't published. They aren't visible
# to the user so we don't bother updating related scores. If there
# were sample labels supplied as arguments then we limit the rankings
# updates to those samples, otherwise we process all samples.
samples = Sample.objects.filter(published=True)
if args:
samples = samples.filter(label__in=args)
updated_samples = 0
total_samples = 0
for sample in samples:
total_samples += 1
# Construct the URL from the setting and the sample label. The
# sample label is used to retrieve the phenotype info on the remote
# endpoint.
url = settings.PHENOTYPE_ENDPOINT.format(sample.label)
# Get the phenotype information for this sample. If the
# phenotype is unavailable then we can skip this sample.
try:
response = requests.get(url, cert=cert, verify=False)
except SSLError:
log.exception('Skipping sample "{0}". An SSLError occurred '
'during phenotype retrieval request.'
.format(sample.label))
continue
except ConnectionError:
log.exception('Skipping sample "{0}". A ConnectionError '
'occurred during phenotype retrieval request.'
.format(sample.label))
continue
except RequestException:
log.exception('Skipping sample "{0}". The sample has no '
'phenotype data associated with it'
.format(sample.label))
continue
try:
phenotype_data = json.loads(response.content)
except ValueError:
log.warning(
"Could not parse response from {0}, skipping '{1}'."
.format(url, sample.label))
continue
try:
phenotype_modified = datetime.strptime(
phenotype_data['last_modified'] or '',
"%Y-%m-%dT%H:%M:%S.%f")
except ValueError:
phenotype_modified = datetime.min
log.warn("Could not parse 'last_modified' field on phenotype "
"data. Using datetime.min so that only unranked "
"samples will be ranked. If the 'force' flag was "
"used then all samples will be updated despite this "
"parsing failure.")
# If the parsed response doesn't contain any HPO terms then we can
# skip this sample since we cannot rank genes without HPO terms.
if not phenotype_data.get('hpoAnnotations'):
log.warning("Response from phenotype missing HPO Annotations, "
"skipping '{0}'.".format(sample.label))
continue
if (not force and sample.phenotype_modified and
sample.phenotype_modified > phenotype_modified):
log.debug("Sample '{0}' is already up to date, skipping it."
.format(sample.label))
continue
# Extract the HPO terms from the data returned from the phenotype
# endpoint. We need to modify the terms a bit because the phenotype
# endpoint has terms in the form 'HP_0011263' and the gene ranking
# enpoint expects them to be of the form 'HP:0011263'.
hpo_terms = []
for hpo_annotation in phenotype_data['hpoAnnotations']:
try:
hpo_id = hpo_annotation.get('hpo_id')
except AttributeError:
continue
if hpo_id:
hpo_terms.append(hpo_id.replace('_', ':'))
# If there are no HPO terms then there will be no rankings so skip
# this sample to avoid any more computations and requests.
if not hpo_terms:
log.warning('Skipping "{0}" because it has no HPO terms '
'associated with it.'.format(sample.label))
continue
# Compute the unique gene list for the entire sample
genes = set(sample.results.values_list(
'variant__effects__transcript__gene__symbol', flat=True))
# Obviously, if there are no genes then the gene ranking endpoint
# will have nothing to do so we can safely skip this sample.
if not genes:
log.warning('Skipping "{0}" because it has no genes '
'associated with it.'.format(sample.label))
continue
# Convert genes to a list so it is serializeable in the json.dumps
# call below when making the request to the ranking service.
data = {
'hpo': hpo_terms,
'genes': [g for g in genes if g]
}
try:
gene_response = requests.post(
settings.GENE_RANK_BASE_URL, data=json.dumps(data),
headers={'content-type': 'application/json'})
except Exception:
log.exception('Error retrieving gene rankings, skipping '
'sample "{0}".'.format(sample.label))
continue
try:
gene_data = json.loads(gene_response.content)
except ValueError:
log.warning(
"Could not parse response from {0}, skipping '{1}'."
.format(settings.GENE_RANK_BASE_URL, sample.label))
continue
ranked_genes = gene_data['ranked_genes']
updated_results = 0
total_results = 0
for result in sample.results.all():
total_results += 1
with transaction.commit_manually(database):
try:
# Instead of trying to remove None from the returned
# values list we just exclude them from the query
# itself.
genes = result.variant.effects\
.exclude(transcript__gene__symbol__isnull=True)\
.order_by('effect__impact')\
.values_list(
'transcript__gene__symbol', flat=True)\
.distinct()
# If there is no gene on this result or the gene is
# not found in the list of ranked genes then skip this
# result.
if not genes:
log.debug("Result with id {0} has no gene, "
"skipping result.".format(result.id))
transaction.rollback()
continue
# Use the first gene from the list since a result can
# have more than one gene associated with it, we
# return the first gene symbol in the list. This is
# the same one that will be shown in the collapsed
# gene list on the variant row in the results table.
gene = genes[0]
# Get the first item in the ranked gene list with a
# symbol matching the gene we looked up above for this
# result.
ranked_gene = None
for ranked_gene in ranked_genes:
if (ranked_gene.get('symbol', '').lower() ==
gene.lower()):
break
else:
ranked_gene = None
if not ranked_gene:
log.debug("Could not find '{0}' in ranked gene "
"list, skipping result".format(gene))
transaction.rollback()
continue
try:
rs = ResultScore.objects.get(result=result)
rs.rank = ranked_gene.get('rank')
rs.score = ranked_gene.get('score')
except ResultScore.DoesNotExist:
rs = ResultScore(
result=result,
rank=ranked_gene.get('rank'),
score=ranked_gene.get('score'))
rs.save()
updated_results += 1
except Exception:
log.exception("Error saving gene ranks and scores for "
"sample '{0}'".format(sample.label))
transaction.rollback()
continue
transaction.commit()
sample.phenotype_modified = datetime.now()
sample.save()
log.info("Updated {0} and skipped {1} results in sample '{2}'"
.format(updated_results, total_results - updated_results,
sample.label))
updated_samples += 1
log.info("Updated {0} and skipped {1} samples"
.format(updated_samples, total_samples-updated_samples))
| {
"content_hash": "2d73397cf256e06ddceec988dae9e402",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 79,
"avg_line_length": 44.14448669201521,
"alnum_prop": 0.5063738156761413,
"repo_name": "chop-dbhi/varify-data-warehouse",
"id": "b89be1c345d6c30aa61826a7c65742a14df858b2",
"size": "11610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vdw/samples/management/subcommands/gene_ranks.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Puppet",
"bytes": "14864"
},
{
"name": "Python",
"bytes": "1796480"
},
{
"name": "Shell",
"bytes": "37"
}
],
"symlink_target": ""
} |
"""Test processing of unrequested blocks.
Setup: two nodes, node0 + node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
import time
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import CBlockHeader, CInv, MSG_BLOCK, msg_block, msg_headers, msg_inv
from test_framework.p2p import p2p_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class AcceptBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
self.setup_nodes()
def run_test(self):
test_node = self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=0)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
# 1. Have nodes mine a block (leave IBD)
[self.generate(n, 1, sync_fun=self.no_op) for n in self.nodes]
tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_and_ping(msg_block(blocks_h2[0]))
min_work_node.send_and_ping(msg_block(blocks_h2[1]))
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_and_ping(msg_block(block_h1f))
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_and_ping(msg_block(block_h2f))
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_and_ping(msg_block(block_h3))
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as it is not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.wait_for_disconnect()
test_node = self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=1)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_and_ping(msg_block(all_blocks[1]))
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
test_node = self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=2)
test_node.send_and_ping(msg_block(block_h1f))
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with p2p_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(MSG_BLOCK, block_h3.sha256)]))
test_node.sync_with_ping()
with p2p_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_and_ping(msg_block(block_h1f))
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
# block_291 spends a coinbase below maturity!
tx_to_add = create_tx_with_script(block_290f.vtx[0], 0, script_sig=b"42", amount=1)
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1, txlist=[tx_to_add])
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_and_ping(headers_message)
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert tip_entry_found
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_and_ping(msg_block(block_290f))
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=3)
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
self.connect_nodes(0, 1)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| {
"content_hash": "36551f99dcf277a075ff9c6a2e3ffbf1",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 113,
"avg_line_length": 45.017361111111114,
"alnum_prop": 0.6551484766679522,
"repo_name": "bitcoinknots/bitcoin",
"id": "759af1dce710d4a1550588fc9982266a893e5b84",
"size": "13179",
"binary": false,
"copies": "1",
"ref": "refs/heads/23.x-knots",
"path": "test/functional/p2p_unrequested_blocks.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28173"
},
{
"name": "C",
"bytes": "1140958"
},
{
"name": "C++",
"bytes": "8527798"
},
{
"name": "CMake",
"bytes": "28560"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "M4",
"bytes": "227016"
},
{
"name": "Makefile",
"bytes": "123534"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2360788"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Sage",
"bytes": "35184"
},
{
"name": "Scheme",
"bytes": "7554"
},
{
"name": "Shell",
"bytes": "157469"
}
],
"symlink_target": ""
} |
"""
A simple module to do repetitive tasks in the morning.
Targeted as updating git repos.
"""
__author__ = 'Matthias Bussonnier'
__email__ = 'bussonniermatthias@gmail.com'
__version__ = '0.1.5'
version = __version__
import sys
import os
import re
from os.path import expanduser
import configparser
import io
import argparse
from multiprocessing import Pool
import logging
log = logging.getLogger(__file__)
console=log
logging.root.setLevel(logging.INFO)
logging.root.addHandler(logging.StreamHandler())
class _config(object):
def __enter__(self):
self.config = configparser.ConfigParser()
self.config.read(expanduser('~/.morning'))
return self.config
def __exit__(self, *args_):
with io.open(expanduser('~/.morning'),'w') as f:
self.config.write(f)
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='subcommands', dest='subcmd')
parser_add = subparsers.add_parser('add',help='')
parser_status = subparsers.add_parser('status',help='')
parser_list = subparsers.add_parser('list',help='')
parser_add.add_argument('dir', nargs='*', default=('.',))
args = parser.parse_args()
if args.subcmd == 'add':
for dr in args.dir:
directory = os.path.abspath(expanduser(dr))
if not os.path.isdir(directory):
log.warn('%s is not a directory'%directory)
continue
if not os.path.isdir(os.path.join(directory,'.git')):
log.warn('%s is not a git directory'%directory)
continue
log.info('adding %s to list of git repos to update'%str(directory))
with _config() as config:
if not 'mornings' in config.sections():
config['mornings'] = {}
config['mornings'][directory] = 'true'
elif args.subcmd == 'list':
with _config() as config:
for k in config['mornings'].keys():
log.info(k)
log.debug('%s' %(k))
elif args.subcmd == 'status':
status()
else :
status(gfo=True)
log.info('no arguments, will update all the things.')
\
def each_repo(k):
gfo = True
from subprocess import Popen, run, DEVNULL, PIPE
#log.info('will update git in {}'.format(k))
fast_forward = False
if not os.path.exists(k):
log.info('skipping %s, that does not exists'%k)
return
if gfo:
res = run(['git','fetch','origin'],cwd=k, stdout=PIPE, stderr=PIPE)
if res.returncode is not 0 :
log.error("could not fetch %s"%k)
return
c = configparser.ConfigParser()
c.read(os.path.join(k,'.git/config'))
fast_forward = (c.get('morning', 'fast-forward', fallback=None) == 'True')
log.debug("configured for auto fast-forward %s" % fast_forward)
res = run('git rev-parse --abbrev-ref HEAD'.split(), cwd=k, stdout=PIPE, stderr=PIPE)
branch = res.stdout.decode().strip()
res = run('git status -sb --porcelain'.split(), cwd=k, stdout=PIPE, stderr=PIPE)
m = re.findall('behind ([0-9]+)', res.stdout.splitlines()[0].decode());
behind = int(m[0]) if m else 0
m = re.findall('ahead ([0-9]+)', res.stdout.splitlines()[0].decode());
ahead = int(m[0]) if m else 0
if behind and not ahead and branch == 'master':
extra = 'can be fast-forwarded'
if fast_forward:
log.debug('is ffding')
res = run(['git','merge','origin/master','--ff-only'], cwd=k, stdout=PIPE, stderr=PIPE)
if res.returncode is not 0 :
log.error(res.stderr.decode())
extra = ' [fast-forwarded]'
else:
extra = ''
res = run(['travis','status','--no-interactive'],cwd=k, stdout=PIPE, stderr=PIPE)
extra = extra + res.stdout.decode().split('\n')[0]
log.info('{:40s} on branch {:7s} -{:02d},+{:02d}, {:s}'.format(compressuser(k),branch, behind, ahead, extra))
def status(gfo=False):
p = Pool(15)
with _config() as config:
#l = lambda x: each_repo(gfo,x)
p.map(each_repo,config['mornings'].keys())
def compressuser(path):
from os.path import expanduser as eu
if path.startswith(eu('~').lower()):
return path.replace(eu('~').lower(),"~",1)
return path
if __name__ =='__main__':
main()
| {
"content_hash": "126dd9a68b69413d28d01b38f624ec82",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 113,
"avg_line_length": 31.19858156028369,
"alnum_prop": 0.585360309161173,
"repo_name": "Carreau/morning",
"id": "d280cf0dc743ce35e623cff4e2abbbfcfe87d990",
"size": "4423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "morning/__init__.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4854"
}
],
"symlink_target": ""
} |
from numpy import full, nan
from scipy.stats import rankdata
from .check_nd_array_for_bad import check_nd_array_for_bad
def _normalize_nd_array(_nd_array, method, rank_method, raise_for_bad):
is_good = ~check_nd_array_for_bad(_nd_array, raise_for_bad=raise_for_bad)
nd_array_normalized = full(_nd_array.shape, nan)
if is_good.any():
nd_array_good = _nd_array[is_good]
if method == "-0-":
nd_array_good_std = nd_array_good.std()
if nd_array_good_std == 0:
nd_array_normalized[is_good] = 0
else:
nd_array_normalized[is_good] = (
nd_array_good - nd_array_good.mean()
) / nd_array_good_std
elif method == "0-1":
nd_array_good_min = nd_array_good.min()
nd_array_good_range = nd_array_good.max() - nd_array_good_min
if nd_array_good_range == 0:
nd_array_normalized[is_good] = nan
else:
nd_array_normalized[is_good] = (
nd_array_good - nd_array_good_min
) / nd_array_good_range
elif method == "sum":
if nd_array_good.min() < 0:
raise ValueError("Sum normalize only positives.")
else:
nd_array_good_sum = nd_array_good.sum()
if nd_array_good_sum == 0:
nd_array_normalized[is_good] = 1 / is_good.sum()
else:
nd_array_normalized[is_good] = nd_array_good / nd_array_good_sum
elif method == "rank":
nd_array_normalized[is_good] = rankdata(nd_array_good, method=rank_method)
return nd_array_normalized
| {
"content_hash": "c0dae7a8eea5423664bca09b2769dff5",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 86,
"avg_line_length": 25.17391304347826,
"alnum_prop": 0.5261945883707542,
"repo_name": "UCSD-CCAL/ccal",
"id": "d22b859cac1bd51d4959aafaa6c3f18c47698c25",
"size": "1737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccal/_normalize_nd_array.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "20830"
},
{
"name": "Python",
"bytes": "294577"
}
],
"symlink_target": ""
} |
from django.utils.timezone import now
import factory
import factory.fuzzy
from . import models
class Sms(factory.django.DjangoModelFactory):
class Meta:
model = models.Sms
uuid = factory.Faker('uuid4')
source_address = factory.fuzzy.FuzzyText(length=15)
destination_address = factory.fuzzy.FuzzyText(length=11)
message = factory.fuzzy.FuzzyText(length=70)
dc = now().replace(microsecond=0)
class SmsSendResult(factory.django.DjangoModelFactory):
class Meta:
model = models.SmsSendResult
sms = factory.SubFactory(Sms)
is_success = True
send_dt = now().replace(microsecond=0)
class SmsPart(factory.django.DjangoModelFactory):
class Meta:
model = models.SmsPart
sms = factory.SubFactory(Sms)
external_id = factory.Faker('uuid4')
| {
"content_hash": "117502225ae4507f539829a48f505f9d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 60,
"avg_line_length": 25.34375,
"alnum_prop": 0.7114673242909988,
"repo_name": "telminov/sms-service",
"id": "3df3db03f997768fbcc211064ba378cb018a2b19",
"size": "827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/factories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25049"
}
],
"symlink_target": ""
} |
"""
breathe.apidoc
~~~~~~~~~~~~~~
Parses doxygen XML tree looking for C/C++ modules and creates ReST files
appropriately to create code documentation with Sphinx. It also creates a
modules index (See TYPEDICT below.).
This is derived from the "sphinx-autopackage" script, which is:
Copyright 2008 Société des arts technologiques (SAT),
http://www.sat.qc.ca/
:copyright: Originally by Sphinx Team, C++ modifications by Tatsuyuki Ishi
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import sys
import argparse
import errno
import xml.etree.ElementTree
from breathe import __version__
# Reference: Doxygen XSD schema file, CompoundKind only
# Only what breathe supports are included
# Translates identifier to English
TYPEDICT = {'class': 'Class',
'struct': 'Struct',
'union': 'Union',
'file': 'File',
'namespace': 'Namespace',
'group': 'Group'}
def write_file(name, text, args):
"""Write the output file for module/package <name>."""
fname = os.path.join(args.destdir, '%s.%s' % (name, args.suffix))
if args.dryrun:
print('Would create file %s.' % fname)
return
if not args.force and os.path.isfile(fname):
print('File %s already exists, skipping.' % fname)
else:
print('Creating file %s.' % fname)
if not os.path.exists(os.path.dirname(fname)):
try:
os.makedirs(os.path.dirname(fname))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
target = open(fname, 'w')
try:
target.write(text)
finally:
target.close()
def format_heading(level, text):
"""Create a heading of <level> [1, 2 or 3 supported]."""
underlining = ['=', '-', '~', ][level - 1] * len(text)
return '%s\n%s\n\n' % (text, underlining)
def format_directive(package_type, package):
"""Create the breathe directive and add the options."""
directive = '.. doxygen%s:: %s\n' % (package_type, package)
return directive
def create_package_file(package, package_type, package_id, args):
"""Build the text of the file and write the file."""
# Some types are unsupported by breathe
if package_type not in TYPEDICT:
return
text = format_heading(1, '%s %s' % (TYPEDICT[package_type], package))
text += format_directive(package_type, package)
write_file(os.path.join(package_type, package_id), text, args)
def create_modules_toc_file(key, value, args):
"""Create the module's index."""
if not os.path.isdir(os.path.join(args.destdir, key)):
return
text = format_heading(1, '%s list' % value)
text += '.. toctree::\n'
text += ' :glob:\n\n'
text += ' %s/*\n' % key
write_file('%slist' % key, text, args)
def recurse_tree(args):
"""
Look for every file in the directory tree and create the corresponding
ReST files.
"""
index = xml.etree.ElementTree.parse(os.path.join(args.rootpath, 'index.xml'))
# Assuming this is a valid Doxygen XML
for compound in index.getroot():
create_package_file(compound.findtext('name'), compound.get('kind'),
compound.get('refid'), args)
def main():
"""Parse and check the command line arguments."""
parser = argparse.ArgumentParser(
description="""\
Parse XML created by Doxygen in <rootpath> and create one reST file with
breathe generation directives per definition in the <DESTDIR>.
Note: By default this script will not overwrite already created files.""",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-o', '--output-dir', action='store', dest='destdir',
help='Directory to place all output', required=True)
parser.add_argument('-f', '--force', action='store_true', dest='force',
help='Overwrite existing files')
parser.add_argument('-n', '--dry-run', action='store_true', dest='dryrun',
help='Run the script without creating files')
parser.add_argument('-T', '--no-toc', action='store_true', dest='notoc',
help='Don\'t create a table of contents file')
parser.add_argument('-s', '--suffix', action='store', dest='suffix',
help='file suffix (default: rst)', default='rst')
parser.add_argument('--version', action='version',
version='Breathe (breathe-apidoc) %s' % __version__)
parser.add_argument('rootpath', type=str,
help='The directory contains index.xml')
args = parser.parse_args()
if args.suffix.startswith('.'):
args.suffix = args.suffix[1:]
if not os.path.isdir(args.rootpath):
print('%s is not a directory.' % args.rootpath, file=sys.stderr)
sys.exit(1)
if 'index.xml' not in os.listdir(args.rootpath):
print('%s does not contain a index.xml' % args.rootpath, file=sys.stderr)
sys.exit(1)
if not os.path.isdir(args.destdir):
if not args.dryrun:
os.makedirs(args.destdir)
args.rootpath = os.path.abspath(args.rootpath)
recurse_tree(args)
if not args.notoc:
for key, value in TYPEDICT.items():
create_modules_toc_file(key, value, args)
# So program can be started with "python -m breathe.apidoc ..."
if __name__ == "__main__":
main()
| {
"content_hash": "fb596315a9a609f44f46a2052136352f",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 81,
"avg_line_length": 36,
"alnum_prop": 0.613997113997114,
"repo_name": "frontg8/frontg8lib",
"id": "c6866be51d19c7643d9fec091f72bfb03975811c",
"size": "5570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/ext/breathe/breathe/apidoc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "13967"
},
{
"name": "C++",
"bytes": "48688"
},
{
"name": "CMake",
"bytes": "20304"
}
],
"symlink_target": ""
} |
import fileinput
for line in fileinput.input():
splitLine = line.split()
a = splitLine[0]
b = splitLine[1]
c = int(a) - int(b)
print(abs(c))
| {
"content_hash": "ccc9f49e18e34036c9b8f46cd4020e0e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 30,
"avg_line_length": 20.25,
"alnum_prop": 0.5925925925925926,
"repo_name": "NLSteveO/Kattis",
"id": "5077bdacd40de49bb75049ae02b5b2ec2604cec8",
"size": "162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/different/different.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "21604"
},
{
"name": "Python",
"bytes": "5998"
},
{
"name": "Shell",
"bytes": "3862"
}
],
"symlink_target": ""
} |
import os
from PyQt5.QtCore import pyqtProperty, pyqtSignal, pyqtSlot, QObject
from electrum_ltc.logging import get_logger
from electrum_ltc.storage import WalletStorage, StorageEncryptionVersion
from electrum_ltc.wallet_db import WalletDB
from electrum_ltc.bip32 import normalize_bip32_derivation, xpub_type
from electrum_ltc.util import InvalidPassword
from electrum_ltc import keystore
class QEWalletDB(QObject):
def __init__(self, parent=None):
super().__init__(parent)
from .qeapp import ElectrumQmlApplication
self.daemon = ElectrumQmlApplication._daemon
self.reset()
_logger = get_logger(__name__)
fileNotFound = pyqtSignal()
pathChanged = pyqtSignal([bool], arguments=["ready"])
needsPasswordChanged = pyqtSignal()
needsHWDeviceChanged = pyqtSignal()
passwordChanged = pyqtSignal()
validPasswordChanged = pyqtSignal()
requiresSplitChanged = pyqtSignal()
splitFinished = pyqtSignal()
readyChanged = pyqtSignal()
invalidPassword = pyqtSignal()
def reset(self):
self._path = None
self._needsPassword = False
self._needsHWDevice = False
self._password = ''
self._requiresSplit = False
self._validPassword = True
self._storage = None
self._db = None
self._ready = False
@pyqtProperty('QString', notify=pathChanged)
def path(self):
return self._path
@path.setter
def path(self, wallet_path):
if wallet_path == self._path:
return
self._logger.info('setting path: ' + wallet_path)
self.reset()
self._path = wallet_path
self.pathChanged.emit(self._ready)
@pyqtProperty(bool, notify=needsPasswordChanged)
def needsPassword(self):
return self._needsPassword
@needsPassword.setter
def needsPassword(self, wallet_needs_password):
if wallet_needs_password == self._needsPassword:
return
self._needsPassword = wallet_needs_password
self.needsPasswordChanged.emit()
@pyqtProperty(bool, notify=needsHWDeviceChanged)
def needsHWDevice(self):
return self._needsHWDevice
@needsHWDevice.setter
def needsHWDevice(self, wallet_needs_hw_device):
if wallet_needs_hw_device == self._needsHWDevice:
return
self._needsHWDevice = wallet_needs_hw_device
self.needsHWDeviceChanged.emit()
@pyqtProperty('QString', notify=passwordChanged)
def password(self):
return '' # no read access
@password.setter
def password(self, wallet_password):
if wallet_password == self._password:
return
self._password = wallet_password
self.passwordChanged.emit()
@pyqtProperty(bool, notify=requiresSplitChanged)
def requiresSplit(self):
return self._requiresSplit
@pyqtProperty(bool, notify=validPasswordChanged)
def validPassword(self):
return self._validPassword
@validPassword.setter
def validPassword(self, validPassword):
if self._validPassword != validPassword:
self._validPassword = validPassword
self.validPasswordChanged.emit()
@pyqtProperty(bool, notify=readyChanged)
def ready(self):
return self._ready
@pyqtSlot()
def verify(self):
self.load_storage()
if self._storage:
self.load_db()
@pyqtSlot()
def doSplit(self):
self._logger.warning('doSplit')
if not self._requiresSplit:
return
self._db.split_accounts(self._path)
self.splitFinished.emit()
def load_storage(self):
self._storage = WalletStorage(self._path)
if not self._storage.file_exists():
self._logger.warning('file does not exist')
self.fileNotFound.emit()
self._storage = None
return
if self._storage.is_encrypted():
self.needsPassword = True
try:
self._storage.decrypt('' if not self._password else self._password)
self.validPassword = True
except InvalidPassword as e:
self.validPassword = False
self.invalidPassword.emit()
if not self._storage.is_past_initial_decryption():
self._storage = None
def load_db(self):
# needs storage accessible
self._db = WalletDB(self._storage.read(), manual_upgrades=True)
if self._db.requires_split():
self._logger.warning('wallet requires split')
self._requiresSplit = True
self.requiresSplitChanged.emit()
return
if self._db.get_action():
self._logger.warning('action pending. QML version doesn\'t support continuation of wizard')
return
if self._db.requires_upgrade():
self._logger.warning('wallet requires upgrade, upgrading')
self._db.upgrade()
self._db.write(self._storage)
self._ready = True
self.readyChanged.emit()
| {
"content_hash": "2dd1a01dd0492679ad97e0af0fa3a3f4",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 103,
"avg_line_length": 29.540697674418606,
"alnum_prop": 0.6327494587679591,
"repo_name": "pooler/electrum-ltc",
"id": "491c3d635636bc1ed4811fc7c7793882dc082792",
"size": "5081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum_ltc/gui/qml/qewalletdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "13024"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "2193"
},
{
"name": "NSIS",
"bytes": "7354"
},
{
"name": "Python",
"bytes": "5325268"
},
{
"name": "QML",
"bytes": "318745"
},
{
"name": "Ruby",
"bytes": "16856"
},
{
"name": "Shell",
"bytes": "105672"
},
{
"name": "kvlang",
"bytes": "70748"
}
],
"symlink_target": ""
} |
__author__ = 'thor'
# import ut
import ut.util.ulist
import ut.daf.ch
import ut.daf.get
import pandas as pd
def group_and_count(df, count_col=None, frequency=False):
if isinstance(df, pd.Series):
t = pd.DataFrame()
t[df.name] = df
df = t
del t
count_col = count_col or ut.daf.get.free_col_name(df, ['count', 'gr_count'])
# gr_cols = list(df.columns)
# d = ut.daf.ch.ch_col_names(df.groupby(gr_cols).count()[[gr_cols[0]]], count_col).reset_index()
d = df.copy()
d[count_col] = 1
d = d.groupby(list(df.columns)).count().reset_index()
if frequency:
d[count_col] /= float(d[count_col].sum())
return d
def group_and_gather_unique_values_of_cols(df, groupby_cols, gather_col=None):
# input processing
groupby_cols = ut.util.ulist.ascertain_list(groupby_cols)
if gather_col is None:
assert len(df.columns) == (
len(groupby_cols) + 1
), "I can't guess what the gather_col is in your case (you must have exactly len(groupby_cols)+1 columns"
gather_col = (set(df.columns) - set(groupby_cols)).pop()
df = df[groupby_cols + [gather_col]]
# the actual thing the function should do:
return df.groupby(groupby_cols).agg(lambda x: [list(x[gather_col].unique())])
def group_and_gather_values(df, groupby_cols, gather_col=None):
# input processing
groupby_cols = ut.util.ulist.ascertain_list(groupby_cols)
if gather_col is None:
assert len(df.columns) == (
len(groupby_cols) + 1
), "I can't guess what the gather_col is in your case (you must have exactly len(groupby_cols)+1 columns"
gather_col = (set(df.columns) - set(groupby_cols)).pop()
df = df[groupby_cols + [gather_col]]
# the actual thing the function should do:
return df.groupby(groupby_cols).agg(lambda x: [list(x[gather_col])])
# def group_by_non_hashable_columns(df, groupby_cols, **kwargs):
# if isinstance(groupby_cols, basestring):
# groupby_cols = [groupby_cols]
# groupby_cols_original_types = {c: type(df[c].iloc[0]) for c in groupby_cols}
# df[groupby_cols] = df[groupby_cols].map(tuple)
# dg = df.groupby(map(tuple, df[groupby_cols]))
# df = df.reset_index(drop=False)
# for c, ctype in groupby_cols_original_types.iteritems():
# df[c] = map(ctype, df[c])
| {
"content_hash": "e68292c95eed87f78015e80dca98c301",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 113,
"avg_line_length": 38.60655737704918,
"alnum_prop": 0.6326963906581741,
"repo_name": "thorwhalen/ut",
"id": "5825e32854abd609e56b2462e80532ba4bd6aa73",
"size": "2355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ut/daf/gr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1174"
},
{
"name": "Python",
"bytes": "2258941"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from core.views import HomePageView
urlpatterns = patterns('',
url(r'^$', HomePageView.as_view(), name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('allauth.urls')),
url(r'^page/', include('django.contrib.flatpages.urls')),
)
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT}))
| {
"content_hash": "fe3ab3d1bb14603efc3844056a8197d5",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 64,
"avg_line_length": 30.7,
"alnum_prop": 0.6628664495114006,
"repo_name": "moshthepitt/Kenyan-News",
"id": "ed17821c974185734a31f3538fa7a4968b23444c",
"size": "614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "template/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "17909"
},
{
"name": "JavaScript",
"bytes": "849"
},
{
"name": "Python",
"bytes": "9619"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.