text stringlengths 4 1.02M | meta dict |
|---|---|
from __future__ import unicode_literals
import powerline as powerline_module
import time
from tests import TestCase
from tests.lib import replace_item
from tests.lib.config_mock import swap_attributes, get_powerline, pop_events
from copy import deepcopy
config = {
'config': {
'common': {
'dividers': {
"left": {
"hard": ">>",
"soft": ">",
},
"right": {
"hard": "<<",
"soft": "<",
},
},
'spaces': 0,
'interval': 0,
},
'ext': {
'test': {
'theme': 'default',
'colorscheme': 'default',
},
},
},
'colors': {
'colors': {
"col1": 1,
"col2": 2,
"col3": 3,
"col4": 4,
},
'gradients': {
},
},
'colorschemes/test/default': {
'groups': {
"str1": {"fg": "col1", "bg": "col2", "attr": ["bold"]},
"str2": {"fg": "col3", "bg": "col4", "attr": ["underline"]},
},
},
'colorschemes/test/2': {
'groups': {
"str1": {"fg": "col2", "bg": "col3", "attr": ["bold"]},
"str2": {"fg": "col1", "bg": "col4", "attr": ["underline"]},
},
},
'themes/test/default': {
'segments': {
"left": [
{
"type": "string",
"contents": "s",
"highlight_group": ["str1"],
},
{
"type": "string",
"contents": "g",
"highlight_group": ["str2"],
},
],
"right": [
],
},
},
'themes/test/2': {
'segments': {
"left": [
{
"type": "string",
"contents": "t",
"highlight_group": ["str1"],
},
{
"type": "string",
"contents": "b",
"highlight_group": ["str2"],
},
],
"right": [
],
},
},
}
def sleep(interval):
time.sleep(interval)
def add_watcher_events(p, *args, **kwargs):
p._watcher._reset(args)
while not p._will_create_renderer():
sleep(kwargs.get('interval', 0.1))
if not kwargs.get('wait', True):
return
class TestConfigReload(TestCase):
def assertAccessEvents(self, *args):
self.assertEqual(set(pop_events()), set(args))
def test_noreload(self):
with get_powerline(run_once=True) as p:
with replace_item(globals(), 'config', deepcopy(config)):
self.assertEqual(p.render(), '<1 2 1> s<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('config', 'colors', 'colorschemes/test/default', 'themes/test/default')
config['config']['common']['spaces'] = 1
add_watcher_events(p, 'config', wait=False, interval=0.05)
# When running once thread should not start
self.assertEqual(p.render(), '<1 2 1> s<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents()
self.assertEqual(p.logger._pop_msgs(), [])
# Without the following assertion test_reload_colors may fail for
# unknown reason (with AssertionError telling about “config” accessed
# one more time then needed)
pop_events()
def test_reload_main(self):
with get_powerline(run_once=False) as p:
with replace_item(globals(), 'config', deepcopy(config)):
self.assertEqual(p.render(), '<1 2 1> s<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('config', 'colors', 'colorschemes/test/default', 'themes/test/default')
config['config']['common']['spaces'] = 1
add_watcher_events(p, 'config')
self.assertEqual(p.render(), '<1 2 1> s <2 4 False>>><3 4 4>g <4 False False>>><None None None>')
self.assertAccessEvents('config')
self.assertEqual(p.logger._pop_msgs(), [])
config['config']['ext']['test']['theme'] = 'nonexistent'
add_watcher_events(p, 'config')
self.assertEqual(p.render(), '<1 2 1> s <2 4 False>>><3 4 4>g <4 False False>>><None None None>')
self.assertAccessEvents('config', 'themes/test/nonexistent')
# It should normally handle file missing error
self.assertEqual(p.logger._pop_msgs(), ['exception:test:powerline:Failed to create renderer: themes/test/nonexistent'])
config['config']['ext']['test']['theme'] = 'default'
add_watcher_events(p, 'config')
self.assertEqual(p.render(), '<1 2 1> s <2 4 False>>><3 4 4>g <4 False False>>><None None None>')
self.assertAccessEvents('config', 'themes/test/default')
self.assertEqual(p.logger._pop_msgs(), [])
config['config']['ext']['test']['colorscheme'] = 'nonexistent'
add_watcher_events(p, 'config')
self.assertEqual(p.render(), '<1 2 1> s <2 4 False>>><3 4 4>g <4 False False>>><None None None>')
self.assertAccessEvents('config', 'colorschemes/test/nonexistent')
# It should normally handle file missing error
self.assertEqual(p.logger._pop_msgs(), ['exception:test:powerline:Failed to create renderer: colorschemes/test/nonexistent'])
config['config']['ext']['test']['colorscheme'] = '2'
add_watcher_events(p, 'config')
self.assertEqual(p.render(), '<2 3 1> s <3 4 False>>><1 4 4>g <4 False False>>><None None None>')
self.assertAccessEvents('config', 'colorschemes/test/2')
self.assertEqual(p.logger._pop_msgs(), [])
config['config']['ext']['test']['theme'] = '2'
add_watcher_events(p, 'config')
self.assertEqual(p.render(), '<2 3 1> t <3 4 False>>><1 4 4>b <4 False False>>><None None None>')
self.assertAccessEvents('config', 'themes/test/2')
self.assertEqual(p.logger._pop_msgs(), [])
self.assertEqual(p.renderer.local_themes, None)
config['config']['ext']['test']['local_themes'] = 'something'
add_watcher_events(p, 'config')
self.assertEqual(p.render(), '<2 3 1> t <3 4 False>>><1 4 4>b <4 False False>>><None None None>')
self.assertAccessEvents('config')
self.assertEqual(p.logger._pop_msgs(), [])
self.assertEqual(p.renderer.local_themes, 'something')
pop_events()
def test_reload_unexistent(self):
with get_powerline(run_once=False) as p:
with replace_item(globals(), 'config', deepcopy(config)):
self.assertEqual(p.render(), '<1 2 1> s<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('config', 'colors', 'colorschemes/test/default', 'themes/test/default')
config['config']['ext']['test']['colorscheme'] = 'nonexistentraise'
add_watcher_events(p, 'config')
# It may appear that p.logger._pop_msgs() is called after given
# exception is added to the mesagges, but before config_loader
# exception was added (this one:
# “exception:test:config_loader:Error while running condition
# function for key colorschemes/test/nonexistentraise:
# fcf:colorschemes/test/nonexistentraise”).
# sleep(0.1)
self.assertEqual(p.render(), '<1 2 1> s<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('config')
self.assertIn('exception:test:powerline:Failed to create renderer: fcf:colorschemes/test/nonexistentraise', p.logger._pop_msgs())
config['colorschemes/test/nonexistentraise'] = {
'groups': {
"str1": {"fg": "col1", "bg": "col3", "attr": ["bold"]},
"str2": {"fg": "col2", "bg": "col4", "attr": ["underline"]},
},
}
while not p._will_create_renderer():
sleep(0.000001)
self.assertEqual(p.render(), '<1 3 1> s<3 4 False>>><2 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('colorschemes/test/nonexistentraise')
self.assertEqual(p.logger._pop_msgs(), [])
pop_events()
def test_reload_colors(self):
with get_powerline(run_once=False) as p:
with replace_item(globals(), 'config', deepcopy(config)):
self.assertEqual(p.render(), '<1 2 1> s<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('config', 'colors', 'colorschemes/test/default', 'themes/test/default')
config['colors']['colors']['col1'] = 5
add_watcher_events(p, 'colors')
self.assertEqual(p.render(), '<5 2 1> s<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('colors')
self.assertEqual(p.logger._pop_msgs(), [])
pop_events()
def test_reload_colorscheme(self):
with get_powerline(run_once=False) as p:
with replace_item(globals(), 'config', deepcopy(config)):
self.assertEqual(p.render(), '<1 2 1> s<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('config', 'colors', 'colorschemes/test/default', 'themes/test/default')
config['colorschemes/test/default']['groups']['str1']['bg'] = 'col3'
add_watcher_events(p, 'colorschemes/test/default')
self.assertEqual(p.render(), '<1 3 1> s<3 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('colorschemes/test/default')
self.assertEqual(p.logger._pop_msgs(), [])
pop_events()
def test_reload_theme(self):
with get_powerline(run_once=False) as p:
with replace_item(globals(), 'config', deepcopy(config)):
self.assertEqual(p.render(), '<1 2 1> s<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('config', 'colors', 'colorschemes/test/default', 'themes/test/default')
config['themes/test/default']['segments']['left'][0]['contents'] = 'col3'
add_watcher_events(p, 'themes/test/default')
self.assertEqual(p.render(), '<1 2 1> col3<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('themes/test/default')
self.assertEqual(p.logger._pop_msgs(), [])
pop_events()
def test_reload_theme_main(self):
with replace_item(globals(), 'config', deepcopy(config)):
config['config']['common']['interval'] = None
with get_powerline(run_once=False) as p:
self.assertEqual(p.render(), '<1 2 1> s<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('config', 'colors', 'colorschemes/test/default', 'themes/test/default')
config['themes/test/default']['segments']['left'][0]['contents'] = 'col3'
add_watcher_events(p, 'themes/test/default', wait=False)
self.assertEqual(p.render(), '<1 2 1> col3<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('themes/test/default')
self.assertEqual(p.logger._pop_msgs(), [])
self.assertTrue(p._watcher._calls)
pop_events()
def test_run_once_no_theme_reload(self):
with replace_item(globals(), 'config', deepcopy(config)):
config['config']['common']['interval'] = None
with get_powerline(run_once=True) as p:
self.assertEqual(p.render(), '<1 2 1> s<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents('config', 'colors', 'colorschemes/test/default', 'themes/test/default')
config['themes/test/default']['segments']['left'][0]['contents'] = 'col3'
add_watcher_events(p, 'themes/test/default', wait=False)
self.assertEqual(p.render(), '<1 2 1> s<2 4 False>>><3 4 4>g<4 False False>>><None None None>')
self.assertAccessEvents()
self.assertEqual(p.logger._pop_msgs(), [])
self.assertEqual(p._watcher._calls, [])
pop_events()
replaces = {}
def setUpModule():
global replaces
replaces = swap_attributes(globals(), powerline_module, replaces)
tearDownModule = setUpModule
if __name__ == '__main__':
from tests import main
main()
| {
"content_hash": "92ed44ef9827276237ab571038e53f2c",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 133,
"avg_line_length": 37.22789115646258,
"alnum_prop": 0.6321608040201006,
"repo_name": "keelerm84/powerline",
"id": "282d611f3c1b4131549214ad27a61ad3d3b66b90",
"size": "11020",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_config_reload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "400"
},
{
"name": "Python",
"bytes": "396184"
},
{
"name": "Shell",
"bytes": "15050"
},
{
"name": "VimL",
"bytes": "5766"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
from contextlib import contextmanager
from glob import glob, iglob
__all__ = [
"CustomTestCase",
"list_all_py_files",
]
class CustomTestCase(unittest.TestCase):
@contextmanager
def assertNotRaises(self, exc_type):
try:
yield None
except exc_type:
raise self.failureException(f"{exc_type.__name__} raised")
_excludes_paths = ["tftrt/blog_posts/"]
def list_all_py_files():
for _dir in ["tests", os.path.join("tftrt", "benchmarking-python")]:
for _file in iglob(f"{_dir}/**/*.py", recursive=True):
if any([path in _file for path in _excludes_paths]):
continue
yield _file
| {
"content_hash": "a4671144eb56439ce183c7918eb8a85c",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 72,
"avg_line_length": 22.323529411764707,
"alnum_prop": 0.6034255599472991,
"repo_name": "tensorflow/tensorrt",
"id": "0da913988185b14e30c6a12c2fdad1b35a30f1a0",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "46221"
},
{
"name": "CMake",
"bytes": "4340"
},
{
"name": "Jupyter Notebook",
"bytes": "2748791"
},
{
"name": "Python",
"bytes": "588488"
},
{
"name": "Shell",
"bytes": "103938"
},
{
"name": "Starlark",
"bytes": "1478"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Semantic Parser'
copyright = u'2013, Ali Zaidi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SemanticParserdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'SemanticParser.tex', u'Semantic Parser Documentation',
u'Ali Zaidi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'semanticparser', u'Semantic Parser Documentation',
[u'Ali Zaidi'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SemanticParser', u'Semantic Parser Documentation',
u'Ali Zaidi', 'SemanticParser', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "5f4acaacf379d7c79a984fd98b5f27a4",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 80,
"avg_line_length": 32.33905579399141,
"alnum_prop": 0.7040477770404777,
"repo_name": "alixedi/semantic_parser",
"id": "4e18b4aea879e9484f5d369e41829da0b94ae0f6",
"size": "7961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "Python",
"bytes": "14436"
}
],
"symlink_target": ""
} |
"""Classes and methods related to model_fn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.estimator.export.export_output import ExportOutput
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import nest
class ModeKeys(object):
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training mode.
* `EVAL`: evaluation mode.
* `PREDICT`: inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
PREDICT = 'infer'
class MetricKeys(object):
"""Metric key strings."""
LOSS = 'loss'
class EstimatorSpec(
collections.namedtuple('EstimatorSpec', [
'predictions', 'loss', 'train_op', 'eval_metric_ops',
'export_outputs', 'training_chief_hooks', 'training_hooks',
'scaffold'
])):
"""Ops and objects returned from a `model_fn` and passed to `Estimator`.
`EstimatorSpec` fully defines the model to be run by `Estimator`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metric_ops=None,
export_outputs=None,
training_chief_hooks=None,
training_hooks=None,
scaffold=None):
"""Creates a validated `EstimatorSpec` instance.
Depending on the value of `mode`, different arguments are required. Namely
* For `mode == ModeKeys.TRAIN`: required fields are `loss` and `train_op`.
* For `mode == ModeKeys.EVAL`: required field is`loss`.
* For `mode == ModeKeys.PREDICT`: required fields are `predictions`.
model_fn can populate all arguments independent of mode. In this case, some
arguments will be ignored by `Estimator`. E.g. `train_op` will be ignored
in eval and infer modes. Example:
```python
def my_model_fn(mode, features, labels):
predictions = ...
loss = ...
train_op = ...
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
```
Alternatively, model_fn can just populate the arguments appropriate to the
given mode. Example:
```python
def my_model_fn(mode, features, labels):
if (mode == tf.estimator.ModeKeys.TRAIN or
mode == tf.estimator.ModeKeys.EVAL):
loss = ...
else:
loss = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = ...
else:
train_op = None
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = ...
else:
predictions = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
```
Args:
mode: A `ModeKeys`. Specifies if this is training, evaluation or
prediction.
predictions: Predictions `Tensor` or dict of `Tensor`.
loss: Training loss `Tensor`. Must be either scalar, or with shape `[1]`.
train_op: Op for the training step.
eval_metric_ops: Dict of metric results keyed by name. The values of the
dict are the results of calling a metric function, namely a
`(metric_tensor, update_op)` tuple.
export_outputs: Describes the output signatures to be exported to
`SavedModel` and used during serving.
A dict `{name: output}` where:
* name: An arbitrary name for this output.
* output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Single-headed models only need to specify one entry in this dictionary.
Multi-headed models should specify one entry for each head, one of
which must be named using
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.
training_chief_hooks: A list of `tf.train.SessionRunHook` objects to
run on the chief worker during training.
training_hooks: A list of `tf.train.SessionRunHook` objects that to run on
all workers during training.
scaffold: A `tf.train.Scaffold` object that can be used to set
initialization, saver, and more to be used in training.
Returns:
A validated `EstimatorSpec` object.
Raises:
ValueError: If validation fails.
TypeError: If any of the arguments is not the expected type.
"""
# Validate train_op.
if train_op is None:
if mode == ModeKeys.TRAIN:
raise ValueError('Missing train_op.')
else:
_check_is_tensor_or_operation(train_op, 'train_op')
# Validate loss.
if loss is None:
if mode in (ModeKeys.TRAIN, ModeKeys.EVAL):
raise ValueError('Missing loss.')
else:
loss = _check_is_tensor(loss, 'loss')
loss_shape = loss.get_shape()
if loss_shape.num_elements() not in (None, 1):
raise ValueError('Loss must be scalar, given: {}'.format(loss))
if not loss_shape.is_compatible_with(tensor_shape.scalar()):
loss = array_ops.reshape(loss, [])
# Validate predictions.
if predictions is None:
if mode == ModeKeys.PREDICT:
raise ValueError('Missing predictions.')
predictions = {}
else:
if isinstance(predictions, dict):
predictions = {
k: _check_is_tensor(v, 'predictions[{}]'.format(k))
for k, v in six.iteritems(predictions)
}
else:
predictions = _check_is_tensor(predictions, 'predictions')
# Validate eval_metric_ops.
if eval_metric_ops is None:
eval_metric_ops = {}
else:
if not isinstance(eval_metric_ops, dict):
raise TypeError(
'eval_metric_ops must be a dict, given: {}'.format(eval_metric_ops))
for key, metric_value_and_update in six.iteritems(eval_metric_ops):
if (not isinstance(metric_value_and_update, tuple) or
len(metric_value_and_update) != 2):
raise TypeError(
'Values of eval_metric_ops must be (metric_value, update_op) '
'tuples, given: {} for key: {}'.format(
metric_value_and_update, key))
metric_value, metric_update = metric_value_and_update
for metric_value_member in nest.flatten(metric_value):
# Allow (possibly nested) tuples for metric values, but require that
# each of them be Tensors or Operations.
_check_is_tensor_or_operation(metric_value_member,
'eval_metric_ops[{}]'.format(key))
_check_is_tensor_or_operation(metric_update,
'eval_metric_ops[{}]'.format(key))
# Validate export_outputs.
if export_outputs is not None:
if not isinstance(export_outputs, dict):
raise TypeError('export_outputs must be dict, given: {}'.format(
export_outputs))
for v in six.itervalues(export_outputs):
if not isinstance(v, ExportOutput):
raise TypeError(
'Values in export_outputs must be ExportOutput objects. '
'Given: {}'.format(export_outputs))
# Note export_outputs is allowed to be empty.
if len(export_outputs) == 1:
(key, value), = export_outputs.items()
if key != signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_outputs[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = value
if len(export_outputs) > 1:
if (signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
not in export_outputs):
raise ValueError(
'Multiple export_outputs were provided, but none of them is '
'specified as the default. Do this by naming one of them with '
'signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.')
# Validate that all tensors and ops are from the default graph.
default_graph = ops.get_default_graph()
for value in _prediction_values(predictions):
if value.graph is not default_graph:
raise ValueError('prediction values must be from the default graph.')
if loss is not None and loss.graph is not default_graph:
raise ValueError('loss must be from the default graph.')
if train_op is not None and train_op.graph is not default_graph:
raise ValueError('train_op must be from the default graph.')
for value in nest.flatten(list(eval_metric_ops.values())):
if value.graph is not default_graph:
raise ValueError(
'eval_metric_ops values must be from the default graph.')
# Validate hooks.
if training_chief_hooks is None:
training_chief_hooks = []
if training_hooks is None:
training_hooks = []
for hook in training_hooks + training_chief_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError(
'All hooks must be SessionRunHook instances, given: {}'.format(
hook))
scaffold = scaffold or monitored_session.Scaffold()
# Validate scaffold.
if not isinstance(scaffold, monitored_session.Scaffold):
raise TypeError(
'scaffold must be tf.train.Scaffold. Given: {}'.format(scaffold))
return super(EstimatorSpec, cls).__new__(
cls,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
training_chief_hooks=training_chief_hooks,
training_hooks=training_hooks,
scaffold=scaffold)
def _check_is_tensor_or_operation(x, name):
if not (isinstance(x, ops.Operation) or isinstance(x, ops.Tensor)):
raise TypeError('{} must be Operation or Tensor, given: {}'.format(name, x))
def _check_is_tensor(x, tensor_name):
"""Returns `x` if it is a `Tensor`, raises TypeError otherwise."""
if not isinstance(x, ops.Tensor):
raise TypeError('{} must be Tensor, given: {}'.format(tensor_name, x))
return x
def _prediction_values(predictions):
"""Returns the values of the given predictions dict or `Tensor`."""
if predictions is None:
return []
if isinstance(predictions, dict):
return list(six.itervalues(predictions))
return [predictions]
| {
"content_hash": "97967903d8dbea730dfa934ff8ceb41d",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 80,
"avg_line_length": 37.09473684210526,
"alnum_prop": 0.6392357169882709,
"repo_name": "abhitopia/tensorflow",
"id": "ee5999c78bc97f05cf353a192d2d54eecc47e5b8",
"size": "11262",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/estimator/model_fn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "177254"
},
{
"name": "C++",
"bytes": "22804170"
},
{
"name": "CMake",
"bytes": "140337"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "794578"
},
{
"name": "HTML",
"bytes": "593171"
},
{
"name": "Java",
"bytes": "286562"
},
{
"name": "JavaScript",
"bytes": "13906"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37240"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64166"
},
{
"name": "Protocol Buffer",
"bytes": "209604"
},
{
"name": "Python",
"bytes": "20006785"
},
{
"name": "Shell",
"bytes": "331908"
},
{
"name": "TypeScript",
"bytes": "789019"
}
],
"symlink_target": ""
} |
import json
import argparse
from google.cloud import bigquery
from google.cloud.exceptions import NotFound, BadRequest
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='nfhl')
known_args, others = parser.parse_known_args()
bq_tables = json.load(open('nfhl_layers.json'))
bq_client = bigquery.Client()
project = 'geo-solution-demos'
dataset = known_args.dataset
dataset_id = '{}.{}'.format(project, dataset)
staging_dataset_id = '{}.{}'.format(project, dataset + '_staging')
# create dataset if not exists
try:
bq_client.get_dataset(dataset_id)
except NotFound:
ds = bigquery.Dataset(dataset_id)
ds = bq_client.create_dataset(ds, timeout=30)
# create staging dataset if not exists
try:
bq_client.get_dataset(staging_dataset_id)
except NotFound:
ds = bigquery.Dataset(staging_dataset_id)
ds = bq_client.create_dataset(ds, timeout=30)
# create tables
for table_name in bq_tables:
bq_schema = []
bq_schema_staging = []
schema_filename = '{}.json'.format(table_name)
table_ref = '{}.{}'.format(dataset_id, table_name)
with open(schema_filename) as f:
bq_columns = json.load(f)
for col in bq_columns:
if col['name'] == 'geom':
bq_schema.append(bigquery.SchemaField(col['name'], col['type']))
bq_schema_staging.append(bigquery.SchemaField(col['name'], 'STRING'))
else:
bq_schema.append(bigquery.SchemaField(col['name'], col['type']))
bq_schema_staging.append(bigquery.SchemaField(col['name'], col['type']))
print('creating table {}'.format(table_ref))
bq_table = bigquery.Table(table_ref, schema=bq_schema)
bq_table.clustering_fields = ['geom']
#bq_table.time_partitioning = bigquery.TimePartitioning(type_='YEAR')
try:
bq_table = bq_client.create_table(bq_table)
except BadRequest as e:
print(e)
staging_table_ref = '{}.{}'.format(staging_dataset_id, table_name)
print('creating table {}'.format(staging_table_ref))
bq_table = bigquery.Table(staging_table_ref, schema=bq_schema_staging)
try:
bq_table = bq_client.create_table(bq_table)
except BadRequest as e:
print(e)
| {
"content_hash": "26b80647635535bd4b4c3b7b88f2ec87",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 88,
"avg_line_length": 34.36923076923077,
"alnum_prop": 0.6611459265890779,
"repo_name": "GoogleCloudPlatform/solutions-geospatial-analytics",
"id": "7ae609d6d7021678e53efecb78f9426adef5831f",
"size": "2234",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ark-demo/pipelines/nfhl/bq_create_tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "926"
},
{
"name": "EJS",
"bytes": "9262"
},
{
"name": "HTML",
"bytes": "7648"
},
{
"name": "JavaScript",
"bytes": "65214"
},
{
"name": "Python",
"bytes": "16905"
},
{
"name": "Shell",
"bytes": "68"
}
],
"symlink_target": ""
} |
import os
import sys
from distutils.version import LooseVersion
import warnings
from pyspark.sql.pandas.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
try:
require_minimum_pandas_version()
require_minimum_pyarrow_version()
except ImportError as e:
if os.environ.get("SPARK_TESTING"):
warnings.warn(str(e))
sys.exit()
else:
raise
from pyspark.pandas.version import __version__ # noqa: F401
def assert_python_version() -> None:
major = 3
minor = 5
deprecated_version = (major, minor)
min_supported_version = (major, minor + 1)
if sys.version_info[:2] <= deprecated_version:
warnings.warn(
"pandas-on-Spark support for Python {dep_ver} is deprecated and will be dropped in "
"the future release. At that point, existing Python {dep_ver} workflows "
"that use pandas-on-Spark will continue to work without modification, but "
"Python {dep_ver} users will no longer get access to the latest pandas-on-Spark "
"features and bugfixes. We recommend that you upgrade to Python {min_ver} or "
"newer.".format(
dep_ver=".".join(map(str, deprecated_version)),
min_ver=".".join(map(str, min_supported_version)),
),
FutureWarning,
)
assert_python_version()
import pyarrow
if (
LooseVersion(pyarrow.__version__) >= LooseVersion("2.0.0")
and "PYARROW_IGNORE_TIMEZONE" not in os.environ
):
import logging
logging.warning(
"'PYARROW_IGNORE_TIMEZONE' environment variable was not set. It is required to "
"set this environment variable to '1' in both driver and executor sides if you use "
"pyarrow>=2.0.0. "
"pandas-on-Spark will set it for you but it does not work if there is a Spark context "
"already launched."
)
os.environ["PYARROW_IGNORE_TIMEZONE"] = "1"
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.indexes.base import Index
from pyspark.pandas.indexes.category import CategoricalIndex
from pyspark.pandas.indexes.datetimes import DatetimeIndex
from pyspark.pandas.indexes.multi import MultiIndex
from pyspark.pandas.indexes.numeric import Float64Index, Int64Index
from pyspark.pandas.series import Series
from pyspark.pandas.groupby import NamedAgg
__all__ = [ # noqa: F405
"read_csv",
"read_parquet",
"to_datetime",
"date_range",
"from_pandas",
"get_dummies",
"DataFrame",
"Series",
"Index",
"MultiIndex",
"Int64Index",
"Float64Index",
"CategoricalIndex",
"DatetimeIndex",
"sql",
"range",
"concat",
"melt",
"get_option",
"set_option",
"reset_option",
"read_sql_table",
"read_sql_query",
"read_sql",
"options",
"option_context",
"NamedAgg",
]
def _auto_patch_spark() -> None:
import os
import logging
# Attach a usage logger.
logger_module = os.getenv("KOALAS_USAGE_LOGGER", "")
if logger_module != "":
try:
from pyspark.pandas import usage_logging
usage_logging.attach(logger_module)
except Exception as e:
logger = logging.getLogger("pyspark.pandas.usage_logger")
logger.warning(
"Tried to attach usage logger `{}`, but an exception was raised: {}".format(
logger_module, str(e)
)
)
# Autopatching is on by default.
x = os.getenv("SPARK_KOALAS_AUTOPATCH", "true")
if x.lower() in ("true", "1", "enabled"):
logger = logging.getLogger("spark")
logger.info(
"Patching spark automatically. You can disable it by setting "
"SPARK_KOALAS_AUTOPATCH=false in your environment"
)
from pyspark.sql import dataframe as df
df.DataFrame.to_pandas_on_spark = DataFrame.to_pandas_on_spark # type: ignore
# Keep to_koalas for backward compatibility for now.
df.DataFrame.to_koalas = DataFrame.to_koalas # type: ignore
_frame_has_class_getitem = False
_series_has_class_getitem = False
def _auto_patch_pandas() -> None:
import pandas as pd
# In order to use it in test cases.
global _frame_has_class_getitem
global _series_has_class_getitem
_frame_has_class_getitem = hasattr(pd.DataFrame, "__class_getitem__")
_series_has_class_getitem = hasattr(pd.Series, "__class_getitem__")
if sys.version_info >= (3, 7):
# Just in case pandas implements '__class_getitem__' later.
if not _frame_has_class_getitem:
pd.DataFrame.__class_getitem__ = lambda params: DataFrame.__class_getitem__(params)
if not _series_has_class_getitem:
pd.Series.__class_getitem__ = lambda params: Series.__class_getitem__(params)
_auto_patch_spark()
_auto_patch_pandas()
# Import after the usage logger is attached.
from pyspark.pandas.config import get_option, options, option_context, reset_option, set_option
from pyspark.pandas.namespace import * # F405
from pyspark.pandas.sql_processor import sql
| {
"content_hash": "dc68f3dfe87131a99c940dc0b5a5367b",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 100,
"avg_line_length": 30.873493975903614,
"alnum_prop": 0.6442926829268293,
"repo_name": "maropu/spark",
"id": "12e54adba06a85a803238145988190fc69341dd0",
"size": "5909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/pandas/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "50108"
},
{
"name": "Batchfile",
"bytes": "25676"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26852"
},
{
"name": "Dockerfile",
"bytes": "9127"
},
{
"name": "HTML",
"bytes": "40529"
},
{
"name": "HiveQL",
"bytes": "1890736"
},
{
"name": "Java",
"bytes": "4155366"
},
{
"name": "JavaScript",
"bytes": "209968"
},
{
"name": "Makefile",
"bytes": "1587"
},
{
"name": "PLSQL",
"bytes": "6658"
},
{
"name": "PLpgSQL",
"bytes": "380488"
},
{
"name": "PowerShell",
"bytes": "3865"
},
{
"name": "Python",
"bytes": "3219226"
},
{
"name": "R",
"bytes": "1203999"
},
{
"name": "Roff",
"bytes": "36438"
},
{
"name": "SQLPL",
"bytes": "9325"
},
{
"name": "Scala",
"bytes": "32564514"
},
{
"name": "Shell",
"bytes": "209299"
},
{
"name": "TSQL",
"bytes": "473509"
},
{
"name": "Thrift",
"bytes": "67584"
},
{
"name": "q",
"bytes": "79845"
}
],
"symlink_target": ""
} |
import re
from django.template import Library
from django.template.defaultfilters import stringfilter
from django.utils import timezone
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
register = Library()
def _no_op(x):
return x
def _esc_func(autoescape):
if autoescape:
return conditional_escape
return _no_op
@stringfilter
def spacify(value, autoescape=None):
esc = _esc_func(autoescape)
val = esc(value).replace(' ', " ")
val = val.replace('\t', ' ')
return mark_safe(val)
def _urlify(str):
r = re.compile('"(?P<src>.*\.py)", line (?P<num>[0-9]+).*')
m = r.search(str)
while m:
group = m.groupdict()
src = group['src']
num = group['num']
start = m.start('src')
end = m.end('src')
rep = '<a href="/silk/src/?file_path={src}&line_num={num}">{src}</a>'.format(src=src, num=num)
str = str[:start] + rep + str[end:]
m = r.search(str)
return str
@register.filter
def hash(h, key):
return h[key]
def _process_microseconds(dt_strftime):
splt = dt_strftime.split('.')
micro = splt[-1]
time = '.'.join(splt[0:-1])
micro = '%.3f' % float('0.' + micro)
return time + micro[1:]
def _silk_date_time(dt):
today = timezone.now().date()
if dt.date() == today:
dt_strftime = dt.strftime('%H:%M:%S.%f')
return _process_microseconds(dt_strftime)
else:
return _process_microseconds(dt.strftime('%Y.%m.%d %H:%M.%f'))
@register.filter(expects_localtime=True)
def silk_date_time(dt):
return _silk_date_time(dt)
@register.filter
def sorted(l):
return sorted(l)
@stringfilter
def filepath_urlify(value, autoescape=None):
value = _urlify(value)
return mark_safe(value)
@stringfilter
def body_filter(value):
print(value)
if len(value) > 20:
return 'Too big!'
else:
return value
spacify.needs_autoescape = True
filepath_urlify.needs_autoescape = True
register.filter(spacify)
register.filter(filepath_urlify)
register.filter(body_filter)
| {
"content_hash": "db09ec819885d16646d5692bb0bb9c22",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 102,
"avg_line_length": 22.010416666666668,
"alnum_prop": 0.6256507335541883,
"repo_name": "django-silk/silk",
"id": "11145642dc668eb6d5fd08198e215cfe70db5e4a",
"size": "2113",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "silk/templatetags/silk_filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23911"
},
{
"name": "HTML",
"bytes": "61877"
},
{
"name": "JavaScript",
"bytes": "81685"
},
{
"name": "Python",
"bytes": "192253"
},
{
"name": "Shell",
"bytes": "292"
}
],
"symlink_target": ""
} |
"""This example gets the current user."""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
user_service = client.GetService('UserService', version='v202208')
# Get current user.
user = user_service.getCurrentUser()
print('User with ID %d, name "%s", email "%s", and role "%s" '
'is the current user.' % (
user.id, user.name, user.email, user.roleName))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| {
"content_hash": "791e5d6fbde1e102d5e13d1087d312f2",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 68,
"avg_line_length": 28.636363636363637,
"alnum_prop": 0.6793650793650794,
"repo_name": "googleads/googleads-python-lib",
"id": "b193dbac3d19fb7bbf796b2bd30c923d690e4960",
"size": "1252",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/ad_manager/v202208/user_service/get_current_user.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "403821"
}
],
"symlink_target": ""
} |
"""
주석 부분 입력할거임.
"""
import os
import platform
'''
class ApWrapper:
def __init__(self):
self.hostapd = ''
self.context = device_config_pb2.Devices.AccessPoint.Ap()
def __getitem__(self, key):
if not isinstance(key, int):
return None
return self.context.access_point_information.ap_list[key]
def __setitem__(self, key, value):
if not isinstance(key, int):
return
if not isinstance(value, tuple):
return
ap = self.context.access_point_information.ap_list[key]
ap[value[0]] = value[1]
def append(self, item):
self.context.access_point_information.ap_list.extend([item])
'''
class hostapd:
def get_ip_address(self, ifname):
if self.plat == 'Linux':
ip = os.popen('ip addr show ' + ifname).read().split("inet ")[1].split("/")[0]
else:
ip = '127.0.0.1'
return ip
def __init__(self, ap_listener, path = "./openwinnet.conf"):
self.path = path
self.plat = platform.system()
self.ap_listener = ap_listener
self.is_starting = False
self.version = -1
### mapping method
self.method_map = {
'ssid' : 'set_ssid',
'ss' : 'set_ssid',
'name' : 'set_ssid',
'password' : 'set_password',
'pw' : 'set_password',
'pwd' : 'set_password',
'channel' : 'set_channel'
}
### default configurable dictionary
self.config_dict = {
'ssid': 'default_value',
'ip': '127.0.0.1',
'channel' : '1',
'hw_mode' : 'g',
'power_on_off' : '0',
'password' : '0'
}
### default static dictionary
self.static_dict = {
'wpa_passphrase': '12345678',
'interface' : 'wlan0',
'wpa' : '3',
'wpa_key_mgmt' : 'WPA-PSK',
'wpa_pairwise' : 'TKIP',
'rsn_pairwise' : 'CCMP',
'auth_algs' : '1',
'driver' : 'nl80211',
'ctrl_interface' : '/var/run/hostapd',
'ctrl_interface_group' : '0',
'ieee80211n' : '1',
'wmm_enabled' : '1',
'ht_capab' : '[HT20][SHORT-GI[20]'
}
self.read_config()
self.config_dict['ip'] = self.get_ip_address('eth0')
self.ap_listener_init()
def ap_listener_init(self):
self.ap_listener.on_ap_changed(
self.config_dict
)
def _edit_config(self, command, value):
import inspect
if command in self.method_map:
command = self.method_map[command]
if hasattr(self, command):
func = getattr(self, command)
if callable(func):
if len(inspect.getargspec(func).args) > 1:
func(value)
self.write_config()
if self.is_starting == True:
self.stop()
self.start()
else:
func()
#### setter
def set_ssid(self, value):
self.config_dict['ssid'] = value
self.ap_listener.on_ap_changed(
{'ssid': value }
)
def set_channel(self, value):
self.config_dict['channel'] = value
self.ap_listener.on_ap_changed(
{ 'channel' : value }
)
def set_mode(self, value):
self.config_dict['hw_mode'] = value
self.ap_listener.on_ap_changed(
{ 'hw_mode' : value }
)
def set_power_on_off(self, value):
self.config_dict['power_on_off'] = value
self.ap_listener.on_ap_changed(
{ 'power_on_off' : value }
)
def set_password(self, value):
self.config_dict['password'] = value
self.ap_listener.on_ap_changed(
{'password': value}
)
def read_config(self):
f = open('./openwinnet.conf', 'r')
lines = f.readlines()
for line in lines:
key, val = line.split('=')
val = val.replace('\n', '')
if key in self.static_dict:
if val != '' or len(val) != 0 or val is not None:
self.static_dict[key] = val
elif key in self.config_dict:
self.config_dict[key] = val
f.close()
def write_config(self):
f = open('./openwinnet.conf', 'w')
for key, val in self.config_dict.items():
if key != 'ip' and key != 'power_on_off':
f.write(key + '=' + val + '\n')
for key, val in self.static_dict.items():
f.write(key + '=' + val + '\n')
f.close()
def start(self):
import subprocess
if self.plat == 'Linux':
print(subprocess.getoutput("nmcli radio wifi off"))
print(subprocess.getoutput("rfkill unblock wlan"))
print(subprocess.getoutput("ifconfig wlan0 192.168.1.34 up"))
print(subprocess.getoutput("dhcpd"))
print(subprocess.getoutput("hostapd -dd " + self.path + " -B"))
'''
subprocess.run(['nmcli', 'radio', 'wifi', 'off'])
subprocess.run(['rfkill', 'unblock', 'wlan'])
subprocess.run(['ifconfig', 'wlan0', '192.163.1.34', 'up'])
subprocess.run(['dhcpd'])
subprocess.run(['hostapd', '-dd', self.path, ' -B'])
'''
self.is_starting = True
else:
print ("We will develop other operating systems.")
def stop(self):
import subprocess
if self.plat == 'Linux':
print("temp_start");
subprocess.getoutput("skill dhcpd")
subprocess.getoutput("skill hostapd")
self.is_starting = False
else:
print("We will develop other operating systems.")
# def stop_hostapd(self):
#
| {
"content_hash": "8cb396134cee81ac9537556aac9e560c",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 90,
"avg_line_length": 25.67948717948718,
"alnum_prop": 0.4876019304376768,
"repo_name": "OpenWinCon/OpenWinNet",
"id": "2450f392ce00bf7a38118145aaadefe0b68f0dfa",
"size": "6027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agent/yang_ap_controller/hostapd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "159532"
},
{
"name": "CSS",
"bytes": "81951"
},
{
"name": "HTML",
"bytes": "153375"
},
{
"name": "Java",
"bytes": "42290"
},
{
"name": "JavaScript",
"bytes": "140705"
},
{
"name": "Makefile",
"bytes": "4410"
},
{
"name": "Objective-C",
"bytes": "3394"
},
{
"name": "Python",
"bytes": "7012310"
},
{
"name": "Shell",
"bytes": "10671"
}
],
"symlink_target": ""
} |
from flask import Flask,render_template
from micro_scrabble import app
from micro_scrabble import db
from forms import NewGameForm,PlayerForm,SwapLettersForm
from models import GameArchive
import game as scrabble
#board config
s = 50
def update_game(instance,archive):
"""Update SQL table"""
player_list,letter_racks,scores = [],{},{}
for key in instance.players:
player_list.append(key)
letter_racks[key] = instance.players[key].letter_rack
scores[key] = instance.players[key].score
archive.update({'board_matrix':instance.board.board_matrix,
'letters':instance.tilebag.letters,
'letter_racks':letter_racks,
'scores':scores})
db.session.commit()
def pack_game(instance):
"""Put class instance into a SQL table"""
player_list,letter_racks,scores = [],{},{}
for key in instance.players:
player_list.append(key)
letter_racks[key] = instance.players[key].letter_rack
scores[key] = instance.players[key].score
return GameArchive(instance.name, instance.board.board_matrix, instance.tilebag.letters, instance.board.dims, instance.max_rack_letters, player_list, scores, letter_racks)
def unpack_game(archive):
"""Extract class instance from a SQL table"""
instance = scrabble.Game(name=archive.first().game_name, max_rack_letters=archive.first().max_rack_letters, letter_ratio_file='', board_setup_file='', board_matrix = archive.first().board_matrix, dims=archive.first().dims, letters=archive.first().letters)
instance.add_players(num_players=len(archive.first().players), player_names=archive.first().players, scores=archive.first().scores, letter_racks=archive.first().letter_racks)
return instance
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html',title='Scrabble In a Bottle')
@app.route('/new_game',methods=['GET','POST'])
def new_game():
"""Form for submitting new game"""
#name,players = None,None
create_game_form = NewGameForm()
if create_game_form.validate_on_submit():
#split the string of player names
#TODO: error handling if this is not formatted correctly
players = create_game_form.players.data.split(',')
#Instantiate class
game = scrabble.Game(name=create_game_form.name.data)
#add players
game.add_players(player_names=players,num_players=len(players))
#add to database
game_archive = pack_game(game)
db.session.add(game_archive)
db.session.commit()
cur_game(game.name)
#create player pages
for p in players:
player_view(game.name,p)
return render_template('new_game.html',form=create_game_form)
@app.route('/current_games')
def show_games():
"""List all current games"""
all_games = GameArchive.query.all()
return render_template('current_games.html',games=all_games)
@app.route('/delete-game-<game_name>')
def delete_game(game_name):
"""Delete game from database"""
game_archive = GameArchive.query.filter_by(game_name=game_name).first()
db.session.delete(game_archive)
db.session.commit()
all_games = GameArchive.query.all()
return render_template('current_games.html',games=all_games)
@app.route('/game-<game_name>')
def cur_game(game_name):
"""Current game page"""
#make SQL request
game_archive = GameArchive.query.filter_by(game_name=game_name)
#rebuild class instance
game = unpack_game(game_archive)
return render_template('board.html', name=game.name, height=game.board.dims[0]*s, width=game.board.dims[1]*s, square=s, board_matrix=game.board.board_matrix, player_list = [{'name':game.players[key].name,'score':game.players[key].score} for key in game.players])
@app.route('/game-<game_name>/players/<player_name>',methods=['GET','POST'])
def player_view(game_name,player_name):
"""Player Page"""
#make SQL request
game_archive = GameArchive.query.filter_by(game_name=game_name)
#rebuild class instance
game = unpack_game(game_archive)
#make submit form
player_form = PlayerForm()
#make swap letter form
swap_form = SwapLettersForm()
#validation for play submission
if player_form.validate_on_submit():
#parse tile positions
tile_pos = [(int(r),int(c)) for r,c in zip(player_form.rows.data.split(','), player_form.cols.data.split(','))]
#play word
played_word = game.players[player_name].play_word(word=player_form.word_play.data, tile_pos=tile_pos)
#place tiles
game.board.place_tiles(played_word)
#draw letters
game.tilebag.draw_letters(game.players[player_name])
#update database
update_game(game,game_archive)
elif swap_form.validate_on_submit():
#swap_letter
game.tilebag.swap_letter(game.players[player_name],swap_form.letter.data)
#update database
update_game(game,game_archive)
return render_template('player.html', submit_form=player_form, swap_form=swap_form, letter_rack=game.players[player_name].letter_rack, num_letters=len(game.players[player_name].letter_rack), name=player_name, square=2*s)
| {
"content_hash": "72fbcc0695b21904443b6ca45932847c",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 266,
"avg_line_length": 42.02439024390244,
"alnum_prop": 0.6869800735151866,
"repo_name": "MG-Barnes/micro-scrabble",
"id": "e00f912a053b783887c7c9fb8bb875757bfd6501",
"size": "5169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "micro_scrabble/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "95"
},
{
"name": "HTML",
"bytes": "9284"
},
{
"name": "Python",
"bytes": "15994"
}
],
"symlink_target": ""
} |
"""Python wrapper for the Block GRU Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rnn.ops import gen_gru_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader
_gru_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_gru_ops.so"))
@ops.RegisterGradient("GRUBlockCell")
def _GRUBlockCellGrad(op, *grad):
r"""Gradient for GRUBlockCell.
Args:
op: Op for which the gradient is defined.
*grad: Gradients of the optimization function wrt output
for the Op.
Returns:
d_x: Gradients wrt to x
d_h: Gradients wrt to h
d_w_ru: Gradients wrt to w_ru
d_w_c: Gradients wrt to w_c
d_b_ru: Gradients wrt to b_ru
d_b_c: Gradients wrt to b_c
Mathematics behind the Gradients below:
```
d_c_bar = d_h \circ (1-u) \circ (1-c \circ c)
d_u_bar = d_h \circ (h-c) \circ u \circ (1-u)
d_r_bar_u_bar = [d_r_bar d_u_bar]
[d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T
[d_x_component_2 d_h_prevr] = d_c_bar * w_c^T
d_x = d_x_component_1 + d_x_component_2
d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u
```
Below calculation is performed in the python wrapper for the Gradients
(not in the gradient kernel.)
```
d_w_ru = x_h_prevr^T * d_c_bar
d_w_c = x_h_prev^T * d_r_bar_u_bar
d_b_ru = sum of d_r_bar_u_bar along axis = 0
d_b_c = sum of d_c_bar along axis = 0
```
"""
x, h_prev, w_ru, w_c, b_ru, b_c = op.inputs
r, u, c, _ = op.outputs
_, _, _, d_h = grad
d_x, d_h_prev, d_c_bar, d_r_bar_u_bar = gen_gru_ops.gru_block_cell_grad(
x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h)
x_h_prev = array_ops.concat([x, h_prev], 1)
d_w_ru = math_ops.matmul(x_h_prev, d_r_bar_u_bar, transpose_a=True)
d_b_ru = nn_ops.bias_add_grad(d_r_bar_u_bar)
x_h_prevr = array_ops.concat([x, h_prev * r], 1)
d_w_c = math_ops.matmul(x_h_prevr, d_c_bar, transpose_a=True)
d_b_c = nn_ops.bias_add_grad(d_c_bar)
return d_x, d_h_prev, d_w_ru, d_w_c, d_b_ru, d_b_c
class GRUBlockCell(rnn_cell_impl.RNNCell):
r"""Block GRU cell implementation.
Deprecated: use GRUBlockCellV2 instead.
The implementation is based on: http://arxiv.org/abs/1406.1078
Computes the GRU cell forward propagation for 1 time step.
This kernel op implements the following mathematical equations:
Biases are initialized with:
* `b_ru` - constant_initializer(1.0)
* `b_c` - constant_initializer(0.0)
```
x_h_prev = [x, h_prev]
[r_bar u_bar] = x_h_prev * w_ru + b_ru
r = sigmoid(r_bar)
u = sigmoid(u_bar)
h_prevr = h_prev \circ r
x_h_prevr = [x h_prevr]
c_bar = x_h_prevr * w_c + b_c
c = tanh(c_bar)
h = (1-u) \circ c + u \circ h_prev
```
"""
def __init__(self, cell_size):
"""Initialize the Block GRU cell.
Args:
cell_size: int, GRU cell size.
"""
self._cell_size = cell_size
@property
def state_size(self):
return self._cell_size
@property
def output_size(self):
return self._cell_size
def __call__(self, x, h_prev, scope=None):
"""GRU cell."""
with vs.variable_scope(scope or type(self).__name__):
input_size = x.get_shape().with_rank(2)[1]
# Check if the input size exist.
if input_size is None:
raise ValueError("Expecting input_size to be set.")
# Check cell_size == state_size from h_prev.
cell_size = h_prev.get_shape().with_rank(2)[1]
if cell_size != self._cell_size:
raise ValueError("Shape of h_prev[1] incorrect: cell_size %i vs %s" %
(self._cell_size, cell_size))
if cell_size is None:
raise ValueError("cell_size from `h_prev` should not be None.")
w_ru = vs.get_variable("w_ru", [input_size + self._cell_size,
self._cell_size * 2])
b_ru = vs.get_variable(
"b_ru", [self._cell_size * 2],
initializer=init_ops.constant_initializer(1.0))
w_c = vs.get_variable("w_c",
[input_size + self._cell_size, self._cell_size])
b_c = vs.get_variable(
"b_c", [self._cell_size],
initializer=init_ops.constant_initializer(0.0))
_gru_block_cell = gen_gru_ops.gru_block_cell # pylint: disable=invalid-name
_, _, _, new_h = _gru_block_cell(
x=x, h_prev=h_prev, w_ru=w_ru, w_c=w_c, b_ru=b_ru, b_c=b_c)
return new_h, new_h
class GRUBlockCellV2(GRUBlockCell):
"""Temporary GRUBlockCell impl with a different variable naming scheme.
Only differs from GRUBlockCell by variable names.
"""
def __call__(self, x, h_prev, scope=None):
"""GRU cell."""
with vs.variable_scope(scope or type(self).__name__):
input_size = x.get_shape().with_rank(2)[1]
# Check if the input size exist.
if input_size is None:
raise ValueError("Expecting input_size to be set.")
# Check cell_size == state_size from h_prev.
cell_size = h_prev.get_shape().with_rank(2)[1]
if cell_size != self._cell_size:
raise ValueError("Shape of h_prev[1] incorrect: cell_size %i vs %s" %
(self._cell_size, cell_size))
if cell_size is None:
raise ValueError("cell_size from `h_prev` should not be None.")
with vs.variable_scope("gates"):
w_ru = vs.get_variable("kernel", [input_size + self._cell_size,
self._cell_size * 2])
b_ru = vs.get_variable(
"bias", [self._cell_size * 2],
initializer=init_ops.constant_initializer(1.0))
with vs.variable_scope("candidate"):
w_c = vs.get_variable("kernel",
[input_size + self._cell_size, self._cell_size])
b_c = vs.get_variable(
"bias", [self._cell_size],
initializer=init_ops.constant_initializer(0.0))
_gru_block_cell = gen_gru_ops.gru_block_cell # pylint: disable=invalid-name
_, _, _, new_h = _gru_block_cell(
x=x, h_prev=h_prev, w_ru=w_ru, w_c=w_c, b_ru=b_ru, b_c=b_c)
return new_h, new_h
| {
"content_hash": "bdaf286318a035b7fded20e5500adc30",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 82,
"avg_line_length": 30.829383886255926,
"alnum_prop": 0.6038431975403535,
"repo_name": "tillahoffmann/tensorflow",
"id": "bf74fd7544b8eb64fd19390b3a2af73aeb03f060",
"size": "7194",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/rnn/python/ops/gru_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29666741"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412108"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26418399"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373800"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform_v1beta1
async def sample_list_contexts():
# Create a client
client = aiplatform_v1beta1.MetadataServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListContextsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_contexts(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END aiplatform_v1beta1_generated_MetadataService_ListContexts_async]
| {
"content_hash": "e055df3ea7e278f0a95dc813348e8c13",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 71,
"avg_line_length": 26.75,
"alnum_prop": 0.7233644859813084,
"repo_name": "googleapis/python-aiplatform",
"id": "2c51a86f2194602249009643f42895c290eed70b",
"size": "1932",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name="gcs",
version="3.0.0",
author="Marcus LaFerrera (@mlaferrera)",
url="https://github.com/PUNCH-Cyber/stoq-plugins-public",
license="Apache License 2.0",
description="Read and write data to Google Cloud Storage",
packages=find_packages(),
include_package_data=True,
)
| {
"content_hash": "18d0e1326427fff78b4a1c44c3bf786c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 62,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.6882022471910112,
"repo_name": "PUNCH-Cyber/stoq-plugins-public",
"id": "fa77b4f44ee756e4e7c4b35e78f8bcba2a8eea0d",
"size": "356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gcs/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "706"
},
{
"name": "Python",
"bytes": "199236"
},
{
"name": "Smarty",
"bytes": "1224"
},
{
"name": "YARA",
"bytes": "103573"
}
],
"symlink_target": ""
} |
"""
size tupes are always (height, width) so that image.shape == (height, width, :)
coordinates are always (row, column) so that `image[row, column]` where `0 < row < height`
"""
import warnings
import time
from skimage import img_as_float, io, transform
# Plotting
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy as sp
import logging
def center_roi_around(center_rc, size_hw):
"""
Return a rectangular region of interest (ROI) of size `size_hw` around the
given center coordinates. The returned ROI is defined as
(start_row, start_column, end_row, end_column)
where the start_row/column are ment to be *included* and the
end_row/column are excluded.
- `center_rc`: Tuple of `(row, column)`. The numbers will be rounded to
the closest integer. The `row` corresponds to the height and the
`column` to the width. In mathematical notation you could think of the
center to be given as the tuple `(y, x)`. Be aware of this; It does
fit well to numpy's `shape` method.
- `size_hw`: A tuple of `(height, width)` of the resulting ROI. If this
numbers are even, a UserWarning is issued.
"""
height, width = size_hw
if height % 2 == 0 or width % 2 == 0:
warnings.warn(f"ROI with even height and width cannot be exactly "
f"centered. (height, width)=({height}, {width})")
row, col = int(round(center_rc[0])), int(round(center_rc[1]))
return (row - height//2,
col - width//2,
row + height//2 + 1,
col + width//2 + 1)
def rotation_around(degrees, around_rc):
"""
Returns a `degrees` counter clock wise rotation around the point `around_rc`.
- `degrees`: Number in degrees for ccw rotation.
- `around_rc`: The center of the rotation in (row, column) coordinates.
Returns a `skimage.transform.AffineTransform` object.
Note: You can apply the transfomation with
`skimage.transform.warp(image, rotation)
center_rc...coordinates (row,col) of rotation in image coordinates
"""
# Calculate transformation matrices (skimage uses xy notation, which is [col, row])
center_xy = np.asarray(around_rc[::-1]) # reverse
tform1 = transform.AffineTransform(translation=-center_xy)
tform2 = transform.AffineTransform(rotation=sp.deg2rad(degrees))
tform3 = transform.AffineTransform(translation=center_xy)
return tform1 + tform2 + tform3
def find_pattern_rotated(PF, pattern, image, rescale=1.0, rotations=(0,),
roi_center_rc=None, roi_size_hw=(41,41), plot=False, progress=None,
log_level=logging.DEBUG):
"""
- `rotations`: Iterable over all rotations that should be tried. In degree.
"""
if progress is None:
def progress(x):
return x
logger = logging.getLogger('find_pattern_rotated')
logger.setLevel(log_level)
# Get current time to determine runtime of search
start_time = time.time()
# Initialize values needed later on
result = []
vmax = 0.0
vmin = sp.Inf
if len(image.shape) > 2:
multichannel = True
else:
multichannel = False
assert len(image.shape) == len(pattern.shape)
# Set region of interest
if roi_center_rc is None:
roi_center_rc = sp.array(image.shape[:2])/2.0 - 0.5
else:
roi_center_rc = sp.asarray(roi_center_rc)
roi = center_roi_around(roi_center_rc*rescale, roi_size_hw)
# Give user some feedback on what is happening
logger.info(f"Rescaling image and target by scale={rescale}.\n"
f" image (row, columns): {image.shape[0:2]} px --> {sp.asarray(image.shape[:2])*rescale} px.")
logger.info(f"ROI center_rc={roi_center_rc}, in unscaled image.\n"
f" (height, width) = {roi_size_hw} in scaled image.")
if len(rotations) > 1:
logger.info(f"Trying rotations: {rotations}.")
# Create rescaled copies of image and pattern, determine center coordinates
pattern_scaled = transform.rescale(pattern, rescale, anti_aliasing=False, multichannel=multichannel, mode='constant')
image_scaled = transform.rescale(image, rescale, anti_aliasing=False, multichannel=multichannel, mode='constant')
PF.set_image(image_scaled)
pattern_scaled_center = sp.array(pattern_scaled.shape[:2])/2. - 0.5
pattern_center = sp.array(pattern.shape[:2])/2. - 0.5
# Launch PatternFinder for all rotations defined in function input
for r in progress(rotations):
# Calculate transformation matrix for rotation around center of scaled pattern
rotation_matrix = rotation_around(r, around_rc=pattern_scaled_center)
# Launch Patternfinder
pattern_scaled_rotated = transform.warp(pattern_scaled, rotation_matrix, mode='constant')
# Make sure that the pixel at the image border are transparent, so that
# pixel that are outside of the pattern are also transparent. This is because
# we use the closest (border) pixel for getting the value of the pattern.
pattern_scaled_rotated[0,:,3] = 0
pattern_scaled_rotated[-1,:,3] = 0
pattern_scaled_rotated[:,0,3] = 0
pattern_scaled_rotated[:,-1,3] = 0
out, min_coords, value = PF.find(pattern_scaled_rotated, roi=roi)
opaque_pixel = pattern_scaled_rotated[...,-1].sum() # the last number in RGBA
out /= opaque_pixel
value /= opaque_pixel
# logger.info(f"r={r} opaque_pixel={opaque_pixel}")
# min_ccords are (row, col)
# Collect Min and Max values for plotting later on
outmax = out.max()
outmin = out.min()
if outmax > vmax:
vmax = outmax
if outmin < vmin:
vmin = outmin
# undo the rescale for the coordinates
min_coords = min_coords.astype(sp.float64) / rescale
# create a list of results for all rotations
result.append([r, min_coords, value, out])
logger.info(f"took {time.time()-start_time} seconds.")
# Select the best result from the result list and extract its parameters
# The rotation angle is the 0-th element in result
# The coordinates are in the 2-nd element
# The actual value is the 3-rd element
best_angle, best_coord, best_value, _ = result[sp.argmin([r[2] for r in result])]
logger.info(f"best_angle: {best_angle} deg, best_coord (row,column): {best_coord} in input image")
# Calculate transformation to transform image onto pattern
# (note, PF.find did transform the pattern and NOT the image)
translation = transform.AffineTransform(translation=(best_coord-pattern_center)[::-1])
rotation = rotation_around(-best_angle, best_coord)
T = translation + rotation
#Create a plot showing error over angle
if plot and len(rotations) > 1:
fig, ax = plt.subplots(1)
ax.plot([a[0] for a in result], [a[2] for a in result])
ax.set_xlabel('Angle (rotation)')
ax.set_ylabel('difference image-target')
plt.show()
plt.close()
#Create heat plot of where target is in image
if plot == 'all':
fig, ax = plt.subplots()
ax.imshow(image_scaled)
ax.plot(sp.array([roi[1], roi[3], roi[3], roi[1], roi[1]]),
sp.array([roi[2], roi[2], roi[0], roi[0], roi[2]]), "yellow")
n_rows = int(sp.sqrt(len(result)))
n_cols = int(sp.ceil(len(result)/n_rows))
fig, ax = plt.subplots(n_rows, n_cols, squeeze=False, figsize = (2 * n_cols, 2 * n_rows))
fig.tight_layout(rect=[0, 0.03, 1, 0.97])
fig.suptitle("Correlation map of where target is in image\n", size=16)
n = 0
for i in range(n_rows):
for j in range(n_cols):
ax[i,j].axis("off")
if n < len(result):
ax[i,j].imshow(result[n][3], interpolation="nearest", cmap='cubehelix', vmin=vmin, vmax=vmax)
ax[i,j].annotate('Angle:{0:.1f}\nValue:{1:.3f}'
.format(result[n][0],result[n][2]),[0,0])
ax[i,j].plot(*(result[n][1]*rescale-sp.array(roi[:2]))[::-1], "rx")
n += 1
plt.show()
plt.close()
return T, best_value
| {
"content_hash": "6ae73909a06040aad4c11124d50c13e1",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 121,
"avg_line_length": 41.42786069651741,
"alnum_prop": 0.6273567911612826,
"repo_name": "HearSys/pattern_finder_gpu",
"id": "9631fa9503365aa60d475fbc1403d5bcfff6a336",
"size": "8327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pattern_finder_gpu/find_pattern_rotated.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4102"
},
{
"name": "Jupyter Notebook",
"bytes": "4557042"
},
{
"name": "Python",
"bytes": "22991"
}
],
"symlink_target": ""
} |
def user(): return dict(form=auth())
def download(): return response.download(request,db)
def call(): return service()
import time
### end requires
def index():
redirect(URL('default', 'index'))
@auth.requires_membership('admin')
def add():
session.msg = ''
form = SQLFORM.factory(Field('url', requires=IS_NOT_EMPTY())
#Field('extra_ases', 'boolean'),
#Field('extra_emy', 'boolean'),
#Field('ASES', 'upload', uploadfolder="./"),
#Field('EMY', 'upload', uploadfolder="./")
)
form.add_button('Back to Admin Page', URL('admin', 'panel'))
#print form. form[0][1]
#form.element('#no_table_ASES__row')['_style']="display:none;"
#form.element('#no_table_EMY__row')['_style']="display:none;"
#form.element('#no_table_extra_ases')['_onchange'] = "if(jQuery('#no_table_extra_ases').prop('checked')) jQuery('#no_table_ASES__row').show(); else jQuery('#no_table_ASES__row').hide();"
#form.element('#no_table_extra_emy')['_onchange'] = "if(jQuery('#no_table_extra_emy').prop('checked')) jQuery('#no_table_EMY__row').show(); else jQuery('#no_table_EMY__row').hide();"
if form.accepts(request.vars, session):
print "accepted"
from actions import isnoaurl
if isnoaurl(form.vars.url):
print "ok"
import noainfo
mystation = noainfo.station(form.vars.pop('url'))
session.tmpstation = mystation
session.laststation = time.time()
session.confirm = 1
redirect(URL('confirm.html'), client_side=True)
#response.js = "window.open('"+ URL('stations', 'confirm.html') +"', '_blank', 'toolbar=0,location=0,menubar=0,width=800,height=560');"
else:
print "ok"
response.flash = T('Please enter a valid url')
form.errors.url = 'please enter a valid url'
if form.process().accepted:
print "here"
from actions import isnoaurl
if isnoaurl(form.vars.url):
print "ok"
import noainfo
mystation = noainfo.station(form.vars.pop('url'))
session.tmpstation = mystation
session.laststation = time.time()
session.confirm = 1
redirect(URL('confirm'))
else:
print "ok"
response.flash = T('Please enter a valid url')
form.errors.url = 'please enter a valid url'
return dict(form=form)
@auth.requires_membership('admin')
def confirm():
print db.stations.name.length
TIMEOUT=60*2
button = BUTTON("go back!", _onclick='history.back()')
if session.tmpstation:
if not session.laststation<time.time()-TIMEOUT:
print vars(session.tmpstation)['_data']
db.stations.emy_file.writable = db.stations.emy_file.readable = False
db.stations.noa_url.default = vars(session.tmpstation)['_info'].get('urlbase')
db.stations.noa_url.writable = False
db.stations.name.default = vars(session.tmpstation)['_info'].get('NAME')
#db.stations.name.writable = False
db.stations.city.default = vars(session.tmpstation)['_info'].get('CITY')
#db.stations.city.writable = False
db.stations.state.default = vars(session.tmpstation)['_info'].get('STATE')
#db.stations.state.writable = False
db.stations.lat.default = vars(session.tmpstation)['_info'].get('LAT')
#db.stations.lat.writable = False
db.stations.long.default = vars(session.tmpstation)['_info'].get('LONG')
#db.stations.long.writable = False
db.stations.elev.default = vars(session.tmpstation)['_info'].get('ELEV')
#db.stations.elev.writable = False
db.stations.data_from.default = vars(session.tmpstation)['_info'].get('from')
db.stations.data_from.writable = False
db.stations.data_to.default = vars(session.tmpstation)['_info'].get('to')
db.stations.data_to.writable = False
db.stations.data_list.default = vars(session.tmpstation)['_data'].get('RAIN')
db.stations.data_list.writable = False
db.stations.data_list.readable = False
db.stations.temp_data_list.default = vars(session.tmpstation)['_data'].get('TEMP')
db.stations.temp_data_list.writable = False
db.stations.temp_data_list.readable = False
db.stations.noa_url.requires = IS_NOT_IN_DB(db, 'stations.noa_url')
else: # delete vars...
del session.tmpstation
del session.laststation
redirect(URL('add')) #go back to add
else:
redirect(URL('add'))
print db.stations.name.length
form = SQLFORM(db.stations)
form.element(_type='submit')['_onclick'] = "msg=''"
script=SCRIPT("setTimeout(function(){var msg='Form will be erased and you will be prompted at add page';window.onbeforeunload = function() "
"{if (msg!='') {return msg;}};},"+str(TIMEOUT*1000+session.laststation*1000-int(time.time()*1000))+");")
if form.process().accepted:
print "ohai"
import actions
session.lastid = form.vars.id
actions.populate(db, session, redirect, URL)
actions.calc_data(db, session, redirect, URL, request)
session.flash = T('Station ' + form.vars.name + ' created!')
redirect(URL('stations', 'add'))
elif form.errors:
print "ohoi"
response.flash = T('Failed')
return dict(form=form+script, button=button)
@auth.requires_membership('admin')
def show():
session.flash = ''
print request.vars.station
print request.args(0)
form = SQLFORM.factory(Field('station', requires=IS_IN_DB(db, db.stations.id,'%(name)s'),
default=request.vars.station or request.args(0)),
Field('var', requires=IS_IN_SET(['Rainfall', 'Temp']), default=request.vars.var,
readable=True if request.vars.station else False, writable=True if request.vars.station else False))
form.element(_id="no_table_station")['_onchange'] = "this.form.submit();"
results = ''
info = ''
if request.vars.station:
this_station = db.stations(db.stations.id == request.vars.station)
if request.vars.var == 'Rainfall':
data = db(db.alldata.station_id == this_station.id).select()
else:
data = db(db.temp_alldata.station_id == this_station.id).select()
info = TABLE(TR(TD('Name:'),TD(this_station['name']), TD(''), TD(''), TD('City:'),TD(this_station['city']), TD(''), TD(''), TD('State:'),TD(this_station['state'])),
TR(TD('Elev'),TD(this_station['elev']), TD(''), TD(''), TD('Lang:'),TD(this_station['long'], TD(''), TD(''), TD('Lat:'), TD(this_station['lat']))),)
results = TABLE(TR(TH('Year'),TH('Jan'), TH('Feb'), TH('Mar'), TH('Apr'), TH('May'), TH('Jun'), TH('Jul'), TH('Aug'), TH('Sep'), TH('Oct'), TH('Nov'), TH('Dec')), [TR(TD(record['year']), TD(record['jan']), TD(record['feb']), TD(record['mar']), TD(record['apr']), TD(record['may']), TD(record['jun']), TD(record['jul']), TD(record['aug']), TD(record['sep']), TD(record['oct']), TD(record['nov']), TD(record['dec'])) for record in data])
return dict(form=form, info=info, results=results)
@auth.requires_membership('admin')
def edit():
this_station = db.stations(db.stations.id==request.args(0))
if request.args:
db.stations.data_list.writable = db.stations.temp_data_list.writable = False
db.stations.data_list.readable = db.stations.temp_data_list.readable = False
#db.stations.data_from.writable = db.stations.data_to.writable = False
form = crud.update(db.stations, this_station.id)#, onaccept=)
if form.accepted:
import actions, os
if form.vars.delete_this_record=='on':
import os
actions.delStationCharts(os, request.folder, this_station.name)
elif form.vars.emy_file != None and form.vars.emy_file != '':
db(db.emydata.station_id==form.record_id).delete()
db(db.temp_emydata.station_id==form.record_id).delete()
db(db.alldata.station_id==form.record_id).delete()
db(db.temp_alldata.station_id==form.record_id).delete()
actions.read_file(db, session, redirect, URL, request, os, form.record_id)
actions.calc_data(db, session, redirect, URL, request)
else:
form = SQLFORM.factory(Field('station', requires=IS_IN_DB(db, db.stations.name, '%(name)s')))
if form.process().accepted:
this_station = db.stations(db.stations.name==request.vars.station)
redirect(URL('stations', 'edit', args=this_station.id))
return dict(form=form)
@auth.requires_login()
def charts():
session.forget(response)
chart = ''
if request.vars.station:
requires = IS_IN_SET(['1 month', '3 month', '6 month', '12 month'])
else:
requires = IS_IN_SET([])
if request.vars.station != None and request.vars.chart != '':
url = URL('static', 'charts', args='_'.join(request.vars.station.split()) + '_' + ''.join(request.vars.chart.split()) +'.png')
chart = IMG(_src=url, width=500, height=500)
form = SQLFORM.factory(Field('station', requires=IS_IN_DB(db, 'stations.name'), default=request.vars.station),#writable=False if request.args(0)!=None else True),
Field('chart', requires=requires,default=request.vars.chart, label="Select Time Span", required=False))
form.element(_id="no_table_station__label")['_style'] = "font-weight:bold;"
form.element(_id="no_table_chart__label")['_style'] = "font-weight:bold;"
form.element(_id="no_table_station")['_onchange'] = "jQuery('#no_table_chart').val('');this.form.submit()"
return dict(form=form, chart=chart)
@auth.requires_login()
def userAdd():
import useractions, os
data = False
station = None
res = db.userfiles(db.userfiles.owner==auth.user.id)
form = SQLFORM(db.userfiles)
if (res is not None) and (float(res.time) <= time.time()):
print "deleting old data.."
db(db.userfiles.owner == auth.user.id).delete()
useractions.delUserCharts(os, request.folder+"/", auth.user.id)
station = ''
elif res is None:
print "no data to show.."
data = False
else:
print "has data to show!"
data = True
station = res.station_name
if form.process().accepted:
import UserSPI
db(db.userfiles.owner == auth.user.id).delete()
useractions.delUserCharts(os, request.folder+"/", auth.user.id)
response.flash = 'form accepted'
data = useractions.read_file_user(db, form.vars.id, request.folder, os)
UserSPI.run(request.folder, '_'.join(request.vars.station_name.split())+"_", data, auth.user.id)
db(db.userfiles.id == form.vars.id).update(time=time.time() + 60*2, owner=auth.user.id)
redirect(URL('userAdd'))
elif form.errors:
response.flash = 'form has errors'
return dict(form=form, data=data, station=station)
| {
"content_hash": "69248ab2eb45729eeb8ffab9625f7384",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 443,
"avg_line_length": 50.306666666666665,
"alnum_prop": 0.5982860676738228,
"repo_name": "atzorvas/droughtmeteo",
"id": "96186d59c05c56c9fe6c48f01ce6a797994e2d7e",
"size": "11371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/stations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23474"
},
{
"name": "JavaScript",
"bytes": "45831"
},
{
"name": "Python",
"bytes": "368843"
}
],
"symlink_target": ""
} |
"""
An extension to retry failed requests that are potentially caused by temporary
problems such as a connection timeout or HTTP 500 error.
You can change the behaviour of this middleware by modifing the scraping settings:
RETRY_TIMES - how many times to retry a failed page
RETRY_HTTP_CODES - which HTTP response codes to retry
Failed pages are collected on the scraping process and rescheduled at the end,
once the spider has finished crawling all regular (non failed) pages. Once
there is no more failed pages to retry this middleware sends a signal
(retry_complete), so other extensions could connect to that signal.
About HTTP errors to consider:
- You may want to remove 400 from RETRY_HTTP_CODES, if you stick to the HTTP
protocol. It's included by default because it's a common code used to
indicate server overload, which would be something we want to retry
"""
from twisted.internet import defer
from twisted.internet.error import TimeoutError, DNSLookupError, \
ConnectionRefusedError, ConnectionDone, ConnectError, \
ConnectionLost, TCPTimedOutError
from pyrake import log
from pyrake.exceptions import NotConfigured
from pyrake.utils.response import response_status_message
from pyrake.xlib.tx import ResponseFailed
class RetryMiddleware(object):
# IOError is raised by the HttpCompression middleware when trying to
# decompress an empty response
EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,
ConnectionRefusedError, ConnectionDone, ConnectError,
ConnectionLost, TCPTimedOutError, ResponseFailed,
IOError)
def __init__(self, settings):
if not settings.getbool('RETRY_ENABLED'):
raise NotConfigured
self.max_retry_times = settings.getint('RETRY_TIMES')
self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))
self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_response(self, request, response, spider):
if request.meta.get('dont_retry', False):
return response
if response.status in self.retry_http_codes:
reason = response_status_message(response.status)
return self._retry(request, reason, spider) or response
return response
def process_exception(self, request, exception, spider):
if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \
and not request.meta.get('dont_retry', False):
return self._retry(request, exception, spider)
def _retry(self, request, reason, spider):
retries = request.meta.get('retry_times', 0) + 1
if retries <= self.max_retry_times:
log.msg(format="Retrying %(request)s (failed %(retries)d times): %(reason)s",
level=log.DEBUG, spider=spider, request=request, retries=retries, reason=reason)
retryreq = request.copy()
retryreq.meta['retry_times'] = retries
retryreq.dont_filter = True
retryreq.priority = request.priority + self.priority_adjust
return retryreq
else:
log.msg(format="Gave up retrying %(request)s (failed %(retries)d times): %(reason)s",
level=log.DEBUG, spider=spider, request=request, retries=retries, reason=reason)
| {
"content_hash": "6a7daee90b93f0ef1aa9070fe4acefcc",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 100,
"avg_line_length": 44.53846153846154,
"alnum_prop": 0.6911341393206678,
"repo_name": "elkingtowa/pyrake",
"id": "def6a5f2e6b02275272cefd8d4189787903fcec6",
"size": "3474",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyrake/contrib/downloadermiddleware/retry.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9681"
},
{
"name": "Perl",
"bytes": "1311"
},
{
"name": "Python",
"bytes": "1950905"
},
{
"name": "Shell",
"bytes": "3209"
}
],
"symlink_target": ""
} |
import unittest
import sys
import logging
from unittest.mock import patch
from data_mapper.data_mapper_interface import DataMapperInterface
from data_mapper.exceptions import DataMapperError
class TestDataMapper(unittest.TestCase):
def setUp(self):
self.logger = logging.getLogger()
self.logger.level = logging.DEBUG
self.stream_handler = logging.StreamHandler(sys.stdout)
self.logger.addHandler(self.stream_handler)
def test_new_throws_exception(self):
self.logger.info('test_new_throws_exception: Create undefined data mapper.')
with self.assertRaises(DataMapperError) as context:
DataMapperInterface('undefined')
self.assertEqual(context.exception.message, 'Data mapper not implemented! undefined')
@patch('data_mapper.data_mapper_tarom.DataMapperTarom')
def test_new_data_mapper(self, mock_tarom):
self.logger.info('test_new_data_mapper: Create new data mapper - mocked object.')
mt = mock_tarom.return_value
mt.MAPPER_TYPE = 'tarom'
dmi = DataMapperInterface('tarom')
dmi._mapper = mt
self.assertEquals(dmi.get_type(), 'tarom')
@patch('data_mapper.data_mapper_tarom.DataMapperTarom')
def test_data_mapper_map(self, mock_tarom):
self.logger.info('test_data_mapper_map: Call method map on mocked object.')
mt = mock_tarom.return_value
mt.map.return_value = {'key1' : 'value1', 'key2': 1, 'key3' : 1.0}
dmi = DataMapperInterface('tarom')
dmi._mapper = mt
self.assertEquals(dmi.map('value1,value2,value3'), {'key1' : 'value1', 'key2': 1, 'key3' : 1.0})
def tearDown(self):
self.logger.removeHandler(self.stream_handler)
self.stream_handler.close()
| {
"content_hash": "37c52f6f3dfd114b1d492e077e182d51",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 104,
"avg_line_length": 41.02325581395349,
"alnum_prop": 0.6808390022675737,
"repo_name": "lo100/MyRaspiHome",
"id": "abdae80cbd8775903174619a4cbe51474703be23",
"size": "1764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "framework/tests/data_mapper/data_mapper_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55782"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
} |
try:
from django.apps import django_apps
get_model = django_apps.get_model
except ImportError:
# Django < 1.7
from django.db.models import loading
get_model = loading.get_model
| {
"content_hash": "7cc7e846cce334799ee12aec3c10ae11",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 40,
"avg_line_length": 28.142857142857142,
"alnum_prop": 0.700507614213198,
"repo_name": "miraculixx/plata",
"id": "b6cd1b8b79815d1952b36fff242f6e4ba53407f5",
"size": "197",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "plata/compat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import logging
from django.core.serializers import serialize, deserialize
from django.conf import settings
from tictactoelib import compete
from tictactoe.celery_app import app
from .models import Fight, Entry, LatestEntry
from .logic import winner
logger = logging.getLogger(__name__)
def fight(e1, e2):
logmsg = "Entry %d (%s) vs entry %d (%s) complete: %s, %s"
e1log = e1.id, e1.user
e2log = e2.id, e2.user
kwargs = dict(
cgroup=settings.FIGHT_CGROUP,
memlimit=settings.FIGHT_MEMORY_LIMIT,
timeout=settings.FIGHT_TIMEOUT,
)
round1 = compete(e1.code, e2.code, **kwargs)
logger.debug(logmsg % (e1log + e2log + tuple(round1[:2])))
round2 = compete(e2.code, e1.code, **kwargs)
logger.debug(logmsg % (e2log + e1log + tuple(round2[:2])))
Fight.from_compete(e1, e2, round1).save()
Fight.from_compete(e2, e1, round2).save()
return round1, round2
@app.task
def schedule_qualification(e1_json):
e1 = next(deserialize('json', e1_json)).object
args1 = e1.id, e1.user
logger.debug("Executing qualification for entry %d (%s)" % args1)
dumb = Entry.qualification_entry()
round1, round2 = fight(e1, dumb)
if winner(round1, round2) == 'e1':
e1.add_latest() # this entry is able to compete now
# Schedule pvp
les = LatestEntry.objects.exclude(user=e1.user).exclude(user=dumb)
for latestentry in les.select_related('entry').all():
e2 = latestentry.entry
args2 = e1.id, e1.user, e2.id, e2.user
logger.debug(("Scheduling competition between entries %d (%s) "
"and %d (%s)") % args2)
e1_json = serialize('json', [e1])
e2_json = serialize('json', [e2])
schedule_compete.delay(e1_json, e2_json)
else:
logger.info("Entry %d (%s) did not qualify" % (e1.id, e1.user))
@app.task
def schedule_compete(e1_json, e2_json):
e1 = next(deserialize('json', e1_json)).object
e2 = next(deserialize('json', e2_json)).object
fight(e1, e2)
| {
"content_hash": "b32bcfbd56dcecd7a83b28293ce607d3",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 75,
"avg_line_length": 31.348484848484848,
"alnum_prop": 0.6283228612856452,
"repo_name": "Motiejus/tictactoe",
"id": "ca3fd8a6d2403b90e71f43a3453e321edbe3c547",
"size": "2069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tictactoe/contest/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2248"
},
{
"name": "JavaScript",
"bytes": "3137"
},
{
"name": "Python",
"bytes": "35588"
}
],
"symlink_target": ""
} |
"""Create sample images given a font and text."""
__author__ = "roozbeh@google.com (Roozbeh Pournader)"
import argparse
import codecs
import os
from os import path
import string
from nototools import notoconfig
from nototools.py23 import basestring
from nototools.py23 import unichr
import cairo
import pango
import pangocairo
_fonts_conf_template = """<?xml version="1.0"?>
<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
<fontconfig>
${font_dirs}
<include>/etc/fonts/conf.d</include>
<match target="scan">
<test name="family">
<string>Noto Color Emoji</string>
</test>
<edit name="scalable" mode="assign"><bool>true</bool></edit>
</match>
<cachedir>${cache_dir}</cachedir>
</fontconfig>
"""
def setup_fonts_conf():
"""We first look for fonts.conf under the root nototools, and if we don't
find it we write it. The fontconfig cache also goes there. This of course
requires nototools to be writable."""
# We require notoconfig because we don't know where this code is located,
# nor whether the font directories might be relative to it.
TOOLS_DIR = notoconfig.noto_tools()
fonts_conf = path.join(TOOLS_DIR, "fonts.conf")
if not path.isfile(fonts_conf):
noto_font_dirs = []
FONTS_DIR = notoconfig.noto_fonts()
if FONTS_DIR:
noto_font_dirs.extend(
[path.join(FONTS_DIR, "hinted"), path.join(FONTS_DIR, "unhinted")]
)
CJK_DIR = notoconfig.noto_cjk()
if CJK_DIR:
noto_font_dirs.append(CJK_DIR)
EMOJI_DIR = notoconfig.noto_emoji()
if EMOJI_DIR:
noto_font_dirs.append(path.join(EMOJI_DIR, "fonts"))
font_dirs = "\n ".join("<dir>%s</dir>" % d for d in noto_font_dirs)
cache_dir = path.join(TOOLS_DIR, "fontconfig")
template = string.Template(_fonts_conf_template)
conf_text = template.substitute(font_dirs=font_dirs, cache_dir=cache_dir)
try:
with open(fonts_conf, "w") as f:
f.write(conf_text)
except IOError as e:
raise Exception("unable to write %s: %s" % (fonts_conf, e))
# Note: ensure /etc/fonts/conf.d/10-scale-bitmap-fonts.conf is
# in sync with fontconfig to make sure color emoji font scales properly.
os.putenv("FONTCONFIG_FILE", fonts_conf)
class DrawParams:
"""Parameters used for rendering text in draw_on_surface and its callers"""
def __init__(
self,
family="Noto Sans",
language=None,
rtl=False,
vertical=False,
width=1370,
font_size=32,
line_spacing=50,
weight=pango.WEIGHT_NORMAL,
style=pango.STYLE_NORMAL,
stretch=pango.STRETCH_NORMAL,
maxheight=0,
horiz_margin=0,
):
self.family = family
self.language = language
self.rtl = rtl
self.vertical = vertical
self.width = width
self.font_size = font_size
self.line_spacing = line_spacing
self.weight = weight
self.style = style
self.stretch = stretch
self.maxheight = maxheight
self.horiz_margin = horiz_margin
def __repr__(self):
return str(self.__dict__)
def make_drawparams(**kwargs):
"""Create a DrawParams from kwargs, but converting weight, style, and stretch
from values from string to the pango value types if needed."""
dp = DrawParams(**kwargs)
dp.weight = _get_weight(kwargs.get("weight", "normal"))
dp.style = _get_style(kwargs.get("style", "normal"))
dp.stretch = _get_stretch(kwargs.get("stretch", "normal"))
return dp
def draw_on_surface(surface, text, params):
"""Draw the string on a pre-created surface and return height."""
pangocairo_ctx = pangocairo.CairoContext(cairo.Context(surface))
layout = pangocairo_ctx.create_layout()
pango_ctx = layout.get_context()
if params.language is not None:
pango_ctx.set_language(pango.Language(params.language))
if params.rtl:
if params.vertical:
base_dir = pango.DIRECTION_TTB_RTL
else:
base_dir = pango.DIRECTION_RTL
alignment = pango.ALIGN_RIGHT
else:
if params.vertical:
base_dir = pango.DIRECTION_TTB_LTR
else:
base_dir = pango.DIRECTION_LTR
alignment = pango.ALIGN_LEFT
# The actual meaning of alignment is confusing.
#
# In an RTL context, RTL text aligns to the right by default. So
# setting right alignment and an RTL context means asking for
# 'default alignment' (just as does setting left alignment and an
# LTR context).
#
# What actually happens depends on the directionality of the actual
# text in the paragraph. If the text is Arabic this will be RTL, so
# it is aligned to the right, the default alignment for RTL text.
# And if the text is English this will be LTR, so it is aligned to
# the left, the default alignment for LTR text.
#
# This is reversed when the context and the alignment disagree:
# setting left alignment in an RTL context (or right alignment in an
# LTR context) means asking for 'opposite alignment'. Arabic text
# is aligned to the left, and English text to the right.
#
# pango layout set_auto_dir controls whether the text direction
# is based on the text itself, or influenced by the context. By
# default it is off so the text direction is completely independent
# of the setting of the context: Arabic text is RTL and English text
# is LTR. However, the algorithm depends on the first 'strongly
# directional' character encountered in a paragraph. If you have
# text that is largly Arabic but happens to start with English
# (e.g. brand names) it will be assigned LTR, the wrong direction.
# Either you force the correct direction by munging the text or you
# tell pango to use the context.
#
# The text will be reordered based on the unicode bidi attributes
# of the characters, and this is only as good as your unicode data.
# Newly-encoded scripts can be newer than your libraries and will
# likely order LTR if you implementation doesn't know about them.
# The width is the desired width of the image. The layout uses this
# width minus the margin.
width = params.width - 2 * params.horiz_margin
font = pango.FontDescription()
font.set_family(params.family)
font.set_size(params.font_size * pango.SCALE)
font.set_style(params.style)
font.set_weight(params.weight)
font.set_stretch(params.stretch)
layout.set_font_description(font)
layout.set_alignment(alignment)
layout.set_width(width * pango.SCALE)
layout.set_wrap(pango.WRAP_WORD_CHAR)
layout.set_spacing((params.line_spacing - params.font_size) * pango.SCALE)
pango_ctx.set_base_dir(base_dir)
layout.context_changed()
layout.set_text(text)
if params.maxheight:
numlines = layout.get_line_count()
if params.maxheight < 0:
if -params.maxheight < numlines:
startindex = layout.get_line_readonly(-params.maxheight).start_index
layout.set_text(text[:startindex])
else:
ht = 0
for i in range(numlines):
line = layout.get_line_readonly(i)
lrect = line.get_extents()[1] # logical bounds
lh = (-lrect[1] + lrect[3]) / pango.SCALE
ht += lh
if ht > params.maxheight and i > 0:
layout.set_text(text[: line.start_index])
break
extents = layout.get_pixel_extents()
ovl = -extents[0][0] > params.horiz_margin
ovr = extents[0][2] > width + params.horiz_margin
if ovl or ovr:
if ovl:
print("Error: image overflows left bounds")
if ovr:
print("Error: image overflows right bounds")
print(
"extents: %s, width: %s, margin: %s"
% (extents, params.width, params.horiz_margin)
)
top_usage = min(extents[0][1], extents[1][1], 0)
bottom_usage = max(extents[0][3], extents[1][3])
pangocairo_ctx.set_antialias(cairo.ANTIALIAS_GRAY)
pangocairo_ctx.set_source_rgb(1, 1, 1) # White background
pangocairo_ctx.paint()
pangocairo_ctx.translate(params.horiz_margin, -top_usage)
pangocairo_ctx.set_source_rgb(0, 0, 0) # Black text color
pangocairo_ctx.show_layout(layout)
return bottom_usage - top_usage
def create_svg(text, output_path, **kwargs):
"""Creates an SVG image from the given text."""
setup_fonts_conf()
params = make_drawparams(**kwargs)
temp_surface = cairo.SVGSurface(None, 0, 0)
calculated_height = draw_on_surface(temp_surface, text, params)
real_surface = cairo.SVGSurface(output_path, params.width, calculated_height)
print("writing", output_path)
draw_on_surface(real_surface, text, params)
real_surface.flush()
real_surface.finish()
def create_png(text, output_path, **kwargs):
"""Creates a PNG image from the given text."""
setup_fonts_conf()
params = make_drawparams(**kwargs)
temp_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 0, 0)
calculated_height = draw_on_surface(temp_surface, text, params)
real_surface = cairo.ImageSurface(
cairo.FORMAT_ARGB32, params.width, calculated_height
)
draw_on_surface(real_surface, text, params)
print("writing", output_path)
real_surface.write_to_png(output_path)
def create_img(text, output_path, **kwargs):
"""Creates a PNG or SVG image based on the output_path extension,
from the given text"""
ext = (path.splitext(output_path)[1]).lower()
if ext == ".png":
create_png(text, output_path, **kwargs)
elif ext == ".svg":
create_svg(text, output_path, **kwargs)
else:
print("extension % not supported" % ext)
def test():
"""Test sample Hindi and Arabic texts."""
def test(text_file, output_file, **kwargs):
file_path = "../sample_texts/" + text_file
with codecs.open(file_path, "r", encoding="UTF-8") as input_file:
sample_text = input_file.read().strip()
create_img(sample_text, output_file, **kwargs)
test(
"en-Latn_udhr.txt",
"en_latn_udhr.svg",
family="Noto Serif Display",
maxheight=-2,
font_size=80,
line_spacing=96,
style="italic",
horiz_margin=16,
)
"""
test('hi-Deva_udhr.txt', 'hindi.png', family='Noto Sans',
language='hi-Deva')
test('ar-Arab_udhr.txt', 'arabic.svg', family='Noto Naskh Arabic',
language='ar', rtl=True)
test('mn-Mong_udhr.txt', 'mong.png', family='Noto Sans',
language='mn', vertical=True)
test('sr-Cyrl_udhr.txt', 'sr_cyrl.png', family='Noto Sans',
language='sr-Cyrl')
test('und-Adlm_chars.txt', 'und-adlm.png', family='Noto Sans',
rtl=True)
test('en-Latn_udhr.txt', 'en_latn_udhr_semcond.svg', family='Noto Sans',
stretch='semi-condensed')
test('en-Latn_udhr.txt', 'en_latn_udhr_cond.svg', family='Noto Sans',
stretch='condensed')
test('en-Latn_udhr.txt', 'en_latn_udhr_extcond.svg', family='Noto Sans',
stretch=pango.STRETCH_EXTRA_CONDENSED)
"""
# test('en-Latn_udhr.txt', 'en_latn_rtl.png', family='Noto Sans', rtl=True)
# bidi_txt = u'First ضميرً Second'
# create_img(bidi_txt, 'bidi.png', family='Noto Sans', rtl=True)
_weight_map = {
"ultralight": pango.WEIGHT_ULTRALIGHT,
"light": pango.WEIGHT_LIGHT,
"normal": pango.WEIGHT_NORMAL,
"bold": pango.WEIGHT_BOLD,
"ultrabold": pango.WEIGHT_ULTRABOLD,
"heavy": pango.WEIGHT_HEAVY,
}
def _get_weight(weight_name):
if not weight_name:
return pango.WEIGHT_NORMAL
if isinstance(weight_name, pango.Weight) or isinstance(weight_name, int):
return weight_name
if not isinstance(weight_name, basestring):
raise ValueError("unexpected weight name type (%s)", type(weight_name))
if weight_name not in _weight_map:
raise ValueError(
"could not recognize weight '%s'\naccepted values are %s"
% (weight_name, ", ".join(sorted(_weight_map.keys())))
)
return _weight_map.get(weight_name)
_italic_map = {
"italic": pango.STYLE_ITALIC,
"oblique": pango.STYLE_OBLIQUE,
"normal": pango.STYLE_NORMAL,
}
def _get_style(style_name):
if not style_name:
return pango.STYLE_NORMAL
if isinstance(style_name, pango.Style):
return style_name
if not isinstance(style_name, basestring):
raise ValueError("unexpected style name type (%s)", type(style_name))
if style_name not in _italic_map:
raise ValueError(
"could not recognize style '%s'\naccepted values are %s"
% (style_name, ", ".join(sorted(_italic_map.keys())))
)
return _italic_map.get(style_name)
_stretch_map = {
"ultra-condensed": pango.STRETCH_ULTRA_CONDENSED,
"extra-condensed": pango.STRETCH_EXTRA_CONDENSED,
"condensed": pango.STRETCH_CONDENSED,
"semi-condensed": pango.STRETCH_SEMI_CONDENSED,
"normal": pango.STRETCH_NORMAL,
"semi-expanded": pango.STRETCH_SEMI_EXPANDED,
"expanded": pango.STRETCH_EXPANDED,
"extra-expanded": pango.STRETCH_EXTRA_EXPANDED,
"ultra-expanded": pango.STRETCH_ULTRA_EXPANDED,
}
def _get_stretch(stretch_name):
if not stretch_name:
return pango.STRETCH_NORMAL
if isinstance(stretch_name, pango.Stretch):
return stretch_name
if not isinstance(stretch_name, basestring):
raise ValueError("unexpected stretch name type (%s)", type(stretch_name))
if stretch_name not in _stretch_map:
raise ValueError(
"could not recognize stretch '%s'\naccepted values are %s"
% (stretch_name, ", ".join(sorted(_stretch_map.keys())))
)
return _stretch_map.get(stretch_name)
def render_codes(
file_name,
code_list,
font_name,
weight_name,
style_name,
stretch_name,
font_size,
lang,
ext,
):
text = "".join([unichr(int(s, 16)) for s in code_list])
render_text(
file_name,
text,
font_name,
weight_name,
style_name,
stretch_name,
font_size,
lang,
ext,
)
def render_text(
file_name,
text,
font_name,
weight_name,
style_name,
stretch_name,
font_size,
lang,
ext,
maxheight=0,
horiz_margin=0,
):
font = font_name or "Noto Sans"
font_size = font_size or 32
if not file_name:
name_strs = [font.replace(" ", "")]
name_strs.extend(["%x" % ord(cp) for cp in text])
if weight_name:
name_strs.append(weight_name)
if style_name:
name_strs.append(style_name)
if stretch_name:
name_strs.append(stretch_name)
name_strs.append(str(font_size))
if lang:
name_strs.append(lang)
file_name = "_".join(name_strs) + "." + ext
create_img(
text,
file_name,
family=font,
weight=weight_name,
style=style_name,
stretch=stretch_name,
language=lang,
font_size=font_size,
maxheight=maxheight,
horiz_margin=horiz_margin,
)
print("generated " + file_name)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--test", action="store_true", help="generate test images")
parser.add_argument(
"--codes", metavar="hex", nargs="+", help="list of hex codepoints to render"
)
parser.add_argument(
"--text", metavar="str", help="text to render, can include unicode escapes"
)
parser.add_argument(
"--out",
metavar="name",
help="name of output file, leave empty to generate a name",
default=None,
)
parser.add_argument("-f", "--font", metavar="name", help="name of noto font to use")
parser.add_argument(
"-b", "--bold", metavar="wt", help="pango weight name", default=None
)
parser.add_argument(
"-i", "--italic", metavar="it", help="pango style name", default=None
)
parser.add_argument(
"-st", "--stretch", metavar="st", help="stretch name", default=None
)
parser.add_argument(
"-s",
"--size",
metavar="int",
type=int,
help="point size (default 32)",
default=32,
)
parser.add_argument("-l", "--lang", metavar="lang", help="language code")
parser.add_argument(
"-t", "--type", metavar="ext", help="svg (default) or png", default="svg"
)
parser.add_argument(
"-mh",
"--maxheight",
metavar="ht",
help="0 ignore, <0 for num lines, " "else max height",
default=0,
)
parser.add_argument(
"-hm",
"--horiz_margin",
metavar="mar",
help="left and right margin, " "to handle large italic side bearings",
default=0,
)
args = parser.parse_args()
if args.test:
test()
return
if args.codes and args.text:
print("choose either codes or text")
return
if args.codes:
render_codes(
args.out,
args.codes,
args.font,
args.bold,
args.italic,
args.size,
args.lang,
args.type,
args.maxheight,
args.horiz_margin,
)
elif args.text:
if args.text[0] == "@":
if not args.out:
args.out = path.splitext(args.text[1:])[0] + "." + args.type
with open(args.text[1:], "r") as f:
args.text = f.read()
else:
args.text = args.text.decode("unicode-escape")
print("text length %d" % len(args.text))
render_text(
args.out,
args.text,
args.font,
args.bold,
args.italic,
args.size,
args.lang,
args.type,
args.maxheight,
args.horiz_margin,
)
else:
print("nothing to do")
if __name__ == "__main__":
main()
| {
"content_hash": "9f08aef0980c52e7f0413e4777c2a0df",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 88,
"avg_line_length": 32.01573426573427,
"alnum_prop": 0.6073827335772403,
"repo_name": "googlefonts/nototools",
"id": "b5980a6b41f47ceca3b2921f9f68adfb1be88dd3",
"size": "18968",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "nototools/create_image.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4871"
},
{
"name": "CSS",
"bytes": "895"
},
{
"name": "Dockerfile",
"bytes": "665"
},
{
"name": "HTML",
"bytes": "86714"
},
{
"name": "JavaScript",
"bytes": "6032"
},
{
"name": "Makefile",
"bytes": "3745"
},
{
"name": "Python",
"bytes": "1277966"
},
{
"name": "Shell",
"bytes": "15353"
}
],
"symlink_target": ""
} |
import math
import scipy.signal
import numpy
def result():
start = 0.5e-3 - 10e-5 # part used for amplitude calculation
end = 0.5e-3 + 10e-5 #
# ------------------------------------------------------------------------------
tauold=None
for timesignal in results:
if not isinstance(timesignal, ADC_Result):
print "No result: ",timesignal
continue
data["Timesignal"]=timesignal+0
run = int(timesignal.get_description("run"))+1
accumulations = int(timesignal.get_description("accumulations"))
gradient = float(timesignal.get_description("gradient"))
dac = int(timesignal.get_description("dac"))
delta = float(timesignal.get_description("delta"))
tau = float(timesignal.get_description("tau"))
if tauold != tau:
#print "new result"
mag = MeasurementResult("Amplitude %.1e" % tau)
tauold = tau
#print timesignal.get_description_dictionary()
if run <= accumulations:
if run == 1:
accu = Accumulation(error=True)
if (((run-1)%2)==0):
accu += timesignal
elif (((run-1)%2)==1):
accu -= timesignal
if run == accumulations:
r_start=max(0,int((start-timesignal.x[0])*timesignal.get_sampling_rate()))
r_end=min(int((end-timesignal.x[0])*timesignal.get_sampling_rate()), len(timesignal))
out = numpy.sqrt((accu.y[0]**2+accu.y[1]**2))[r_start:r_end].mean()
out = accu.magnitude().y[0][r_start:r_end].mean() - accu.magnitude().y[0][-128:].mean()
out_err=numpy.sqrt((accu.get_yerr(0)[r_start:r_end]**2).sum())/numpy.sqrt(r_end-r_start)
#mag[tau] = out
mag[dac]=AccumulatedValue(out, out_err, run)
mag.write_to_csv("Amplitude_%.0fe-3_delta_%.1fms.dat" %(tau/1e-3, delta/1e-3))
data["Amplitude %.2e" %tau] = mag
data["Hahn-Echo %.2e %i"%(tau,dac)]=accu
pass | {
"content_hash": "91d9f2d83c09c0847653b04557284a19",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 104,
"avg_line_length": 40.943396226415096,
"alnum_prop": 0.5069124423963134,
"repo_name": "mrosenstihl/PulsePrograms",
"id": "84b2dfc4a9a5c839f19418a23157e8834a294a77",
"size": "2170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoPFGEcho/HahnAuto_res.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "160192"
}
],
"symlink_target": ""
} |
import re
import urllib
import time
import os
def getHtml(url):
page = urllib.urlopen(url)
html = page.read()
return html
def getnexturl(url):
html = getHtml(url)
reg = r'<a href="(\?page=.+?\&t=\d.*?)"'
pageturl = re.compile(reg)
page = re.findall(pageturl, html)
if page:
nexturl = pagelists[0] + page[(len(page)-1)]
pagelists.append(nexturl)
print nexturl,"adding page %s" % len(pagelists)
getnexturl(nexturl)
return pagelists
else:
print 'There is no more page'
return False
if __name__ == '__main__':
url = 'http://sexvvip.lofter.com/'
pagelists = [url]
print getnexturl(url)
| {
"content_hash": "d9ecb951eefb3246c4330634325e28b8",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 49,
"avg_line_length": 18.636363636363637,
"alnum_prop": 0.6617886178861788,
"repo_name": "sparrow629/Lofter-image-Crawler",
"id": "3034b323c96caa460b399284e7fc54f8fb71af9c",
"size": "615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TestFile/crawlepage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GCC Machine Description",
"bytes": "1"
},
{
"name": "Python",
"bytes": "17103"
}
],
"symlink_target": ""
} |
from jinja2 import Template
from IPython.display import IFrame, HTML
import os
import json
from .base_plotter import IPlotter
class C3Plotter(IPlotter):
"""Class for creating c3.js charts in ipython notebook."""
head = """
<!-- Load c3.css -->
<link href='https://cdnjs.cloudflare.com/ajax/libs/c3/0.4.10/c3.min.css' rel='stylesheet' type='text/css'/>
<!-- Load d3.js and c3.js -->
<script src='http://d3js.org/d3.v3.min.js' charset='utf-8'></script>
<script src='http://cdnjs.cloudflare.com/ajax/libs/c3/0.4.10/c3.min.js'></script>
"""
template = """
<div id={{div_id}} style='width: 100%; height: 100%'></div>
<script>
var {{div_id}} = document.getElementById('{{div_id}}');
var data = {{data}};
data['bindto']='#{{div_id}}'
c3.generate(data);
</script>
"""
def __init__(self):
super(C3Plotter, self).__init__()
def render(self, data, div_id="chart", head=""):
"""Render the data in HTML template."""
if not self.is_valid_name(div_id):
raise ValueError(
"Name {} is invalid. Only letters, numbers, '_', and '-' are permitted ".format(
div_id))
return Template(head + self.template).render(
div_id=div_id.replace(" ", "_"),
data=json.dumps(
data, indent=4).replace("'", "\\'").replace('"', "'"))
def plot_and_save(self,
data,
w=800,
h=420,
filename='chart',
overwrite=True):
"""Save the rendered html to a file and returns an IFrame to display the plot in the notebook."""
self.save(data, filename, overwrite)
return IFrame(filename + '.html', w, h)
def plot(self, data, w=800, h=420):
"""Output an iframe containing the plot in the notebook without saving."""
return HTML(
self.iframe.format(
source=self.render(
data=data, div_id="chart", head=self.head),
w=w,
h=h))
def save(self, data, filename='chart', overwrite=True):
"""Save the rendered html to a file in the same directory as the notebook."""
html = self.render(data=data, div_id=filename, head=self.head)
if overwrite:
with open(filename.replace(" ", "_") + '.html', 'w') as f:
f.write(html)
else:
if not os.path.exists(filename.replace(" ", "_") + '.html'):
with open(filename.replace(" ", "_") + '.html', 'w') as f:
f.write(html)
else:
raise IOError('File Already Exists!')
| {
"content_hash": "cce4e0bd3a0dd985a67c11e70cceb323",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 115,
"avg_line_length": 36.68421052631579,
"alnum_prop": 0.5161406025824964,
"repo_name": "niloch/iplotter",
"id": "cb34cb9be92cf4b006cff468f38cf2db5baf4dd3",
"size": "2788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iplotter/c3_plotter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20250"
}
],
"symlink_target": ""
} |
"""
Objects relating to sourcing connections from environment variables
"""
import os
from typing import Optional
from airflow.secrets import BaseSecretsBackend
CONN_ENV_PREFIX = "AIRFLOW_CONN_"
VAR_ENV_PREFIX = "AIRFLOW_VAR_"
class EnvironmentVariablesBackend(BaseSecretsBackend):
"""
Retrieves Connection object from environment variable.
"""
# pylint: disable=missing-docstring
def get_conn_uri(self, conn_id):
# type: (str) -> Optional[str]
environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper())
return environment_uri
def get_variable(self, key):
# type: (str) -> Optional[str]
"""
Get Airflow Variable from Environment Variable
:param key: Variable Key
:return: Variable Value
"""
return os.environ.get(VAR_ENV_PREFIX + key.upper())
| {
"content_hash": "46a241fae4c63ab8a037119824afe118",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 26.181818181818183,
"alnum_prop": 0.6643518518518519,
"repo_name": "owlabs/incubator-airflow",
"id": "c4d5754a857a5291613eec76a5252e78d9662596",
"size": "1651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/secrets/environment_variables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57045"
},
{
"name": "HTML",
"bytes": "147187"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1647566"
},
{
"name": "Shell",
"bytes": "18823"
}
],
"symlink_target": ""
} |
import csv
from django.db import models
from django.conf import settings
import eveapi
from core.models import Type, Location
from API.models import CorpAPIKey
from core.models import Corporation, Alliance
from Map.models import System, MapSystem, Map
from API import cache_handler as handler
User = settings.AUTH_USER_MODEL
class POS(models.Model):
"""Represents a POS somewhere in space."""
system = models.ForeignKey(System, related_name="poses")
planet = models.IntegerField()
moon = models.IntegerField()
towertype = models.ForeignKey(Type, related_name="inspace")
corporation = models.ForeignKey(Corporation, related_name="poses")
posname = models.CharField(max_length=100, blank=True, null=True)
fitting = models.TextField(blank=True, null=True)
# Using CCP's status codes here for sanity with API checks
status = models.IntegerField(choices=((0, 'Unanchored'),
(1, 'Anchored'),
(2, 'Onlining'),
(3, 'Reinforced'),
(4, 'Online')))
# This should be the time the tower exits RF
# TODO: add a validator to make sure this is only set
# if status = 3 (Reinforced)
rftime = models.DateTimeField(null=True, blank=True)
updated = models.DateTimeField()
# These values will be set by the TSV parser from d-scan data if available
guns = models.IntegerField(null=True, blank=True)
ewar = models.IntegerField(null=True, blank=True)
sma = models.IntegerField(null=True, blank=True)
hardener = models.IntegerField(null=True, blank=True)
# This is a short comment that is displayed as a warning
warpin_notice = models.CharField(blank=True, null=True, max_length=64)
class Meta:
ordering = ['system__name', 'planet', 'moon']
@classmethod
def update_from_import_list(cls, system, import_list):
"""
Imports starbases from YAML importer.
"""
for pos in import_list:
planet = pos['planet']
moon = pos['moon']
warpin = pos['warpin']
status = pos['status']
rftime = pos['rftime']
name = pos['name']
tower = Type.objects.get(name=pos['tower'])
try:
owner = Corporation.objects.get(name=pos['owner'])
except Corporation.DoesNotExist:
from core import tasks
api = eveapi.EVEAPIConnection(cacheHandler=handler)
corp_id = api.eve.CharacterID(
names=pos['owner']).characters[0].characterID
owner = tasks.update_corporation(corp_id, True)
if POS.objects.filter(system=system, planet=planet,
moon=moon, corporation=owner).exists():
# Update first existing record
starbase = POS.objects.filter(system=system, planet=planet,
moon=moon,
corporation=owner).all()[0]
starbase.status = status
starbase.name = name
starbase.towertype = tower
if status == 3:
starbase.rftime = rftime
starbase.warpin_notice = warpin
else:
new_pos = POS(system=system, planet=planet, moon=moon,
corporation=owner, towertype=tower,
warpin_notice=warpin, status=status)
if status == 3:
new_pos.rftime = rftime
new_pos.save()
def as_dict(self):
data = {
'planet': self.planet, 'moon': self.moon,
'tower': self.towertype.name, 'owner': self.corporation.name,
'status': self.status, 'name': self.posname,
'rftime': self.rftime, 'warpin': self.warpin_notice,
}
return data
def clean(self):
from django.core.exceptions import ValidationError
if self.rftime and self.status != 3:
raise ValidationError("A POS cannot have an rftime unless "
"it is reinforced")
def __unicode__(self):
return self.posname
# override save to implement posname defaulting to towertype.name
def save(self, *args, **kwargs):
if not self.posname:
self.posname = self.towertype.name
# Mark tower as having been updated
from datetime import datetime
import pytz
self.updated = datetime.now(pytz.utc)
super(POS, self).save(*args, **kwargs)
def log(self, user, action, map_system):
"""
Records a log entry for POS updates and additions.
"""
map_system.map.add_log(
user,
"%s POS (Planet %s Moon %s, owner %s) in %s (%s), %s jumps out from root system."
%(action, self.planet, self.moon, self.corporation, map_system.system.name,
map_system.friendlyname, map_system.distance_from_root()))
def size(self):
"""
Returns the size of the tower, Small Medium or Large.
"""
if u'Small' in self.towertype.name:
return u'Small'
if u'Medium' in self.towertype.name:
return u'Medium'
return u'Large'
def fit_from_dscan(self, dscan):
"""
Fills in a POS's fitting from a copy / paste of d-scan results.
"""
return self.fit_from_iterable(csv.reader(dscan.splitlines(),
delimiter="\t"))
def fit_from_iterable(self, fit):
"""
Fills in a POS's fitting from an iterable (normally parsed d-scan)
"""
from core.models import Type
item_dict = dict()
# marketGroupIDs to consider guns, ewar, hardeners, and smas
guns_groups = [480, 479, 594, 595, 596]
ewar_groups = [481, 1009]
sma_groups = [484]
hardener_groups = [485]
towers = 0
self.sma = 0
self.hardener = 0
self.guns = 0
self.ewar = 0
for row in fit:
try:
item_type = Type.objects.get(name=row[1])
# odd bug where invalid items get into dscan
except Type.DoesNotExist:
continue
if item_type.marketgroup:
group_tree = []
parent = item_type.marketgroup
while parent:
group_tree.append(parent.id)
parent = parent.parentgroup
if item_type.marketgroup.id in guns_groups:
self.guns += 1
if item_type.marketgroup.id in ewar_groups:
self.ewar += 1
if item_type.marketgroup.id in sma_groups:
self.sma += 1
if item_type.marketgroup.id in hardener_groups:
self.hardener += 1
if item_type.marketgroup.id == 478:
towers += 1
towertype = item_type
posname = row[0]
if item_type.name in item_dict:
item_dict[item_type.name] += 1
elif 1285 in group_tree and 478 not in group_tree:
item_dict.update({item_type.name: 1})
self.fitting = "Imported from D-Scan:\n"
for itemtype in item_dict:
self.fitting += "\n%s : %s" % (itemtype, item_dict[itemtype])
if towers == 1 and self.towertype_id is None and self.posname is None:
self.towertype = towertype
self.posname = posname
if towers == 0 and self.towertype_id is None:
raise AttributeError('No POS in the D-Scan!')
elif towers <= 1:
self.save()
else:
raise AttributeError('Too many towers detected in the D-Scan!')
class CorpPOS(POS):
"""A corp-controlled POS with manager and password data."""
manager = models.ForeignKey(User, null=True, blank=True,
related_name='poses')
password = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
# Let's store the CCP Item ID for the tower here to make API lookup easier
# If it is null, then we are not tracking this POS via API
apiitemid = models.BigIntegerField(null=True, blank=True)
apikey = models.ForeignKey(CorpAPIKey, null=True, blank=True,
related_name='poses')
class Meta:
permissions = (('can_see_pos_pw', 'Can see corp POS passwords.'),
('can_see_all_pos', 'Sees all corp POSes '
'regardless of manager.'),)
class POSApplication(models.Model):
"""Represents an application for a personal POS."""
applicant = models.ForeignKey(User, null=True, blank=True,
related_name='posapps')
towertype = models.ForeignKey(Type, null=True, blank=True,
related_name='posapps')
residents = models.ManyToManyField(User)
normalfit = models.TextField()
siegefit = models.TextField()
# Once it is approved, we will fill in these two to tie the records together
approved = models.DateTimeField(blank=True, null=True)
posrecord = models.ForeignKey(CorpPOS, blank=True, null=True,
related_name='application')
class Meta:
permissions = (('can_close_pos_app',
'Can dispose of corp POS applications.'),)
def __unicode__(self):
return 'Applicant: %s Tower: %s' % (self.applicant.username,
self.towertype.name)
class POSVote(models.Model):
"""Represents a vote on a personal POS application."""
application = models.ForeignKey(POSApplication, related_name='votes')
voter = models.ForeignKey(User, related_name='posvotes')
vote = models.IntegerField(choices=((0, 'Deny'),
(1, 'Approve'),
(2, 'Abstain')))
| {
"content_hash": "5915c0543623bd92a85a8162edb40ded",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 93,
"avg_line_length": 40.82539682539682,
"alnum_prop": 0.5541407465007776,
"repo_name": "Zumochi/eve-wspace",
"id": "247df7f3320ceebbb9cda233d2ee596ecc0f4a15",
"size": "10918",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "evewspace/POS/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45009"
},
{
"name": "HTML",
"bytes": "152549"
},
{
"name": "JavaScript",
"bytes": "88761"
},
{
"name": "Nginx",
"bytes": "109"
},
{
"name": "Puppet",
"bytes": "6781"
},
{
"name": "Python",
"bytes": "1100699"
},
{
"name": "Shell",
"bytes": "2632"
}
],
"symlink_target": ""
} |
'''
Configure waf for worch.
It is expected that any external tools that add worch features have already been loaded.
'''
import os
from glob import glob
import waflib.Logs as msg
from waflib import Context
from . import util
from . import pkgconf
from . import envmunge
def locate_config_files(pat):
if os.path.exists(pat):
return [pat]
if pat.startswith('/'):
return glob(pat)
for cdir in os.environ.get('WORCH_CONFIG_PATH','').split(':') + [Context.waf_dir]:
maybe = os.path.join(cdir,pat)
got = glob(maybe)
if got: return got
return None
def get_orch_config_files(cfg):
if not cfg.options.orch_config:
raise RuntimeError('No Orchestration configuration file given (--orch-config)')
orch_config = []
okay = True
for pat in util.string2list(cfg.options.orch_config):
got = locate_config_files(pat)
if got:
orch_config += got
continue
msg.error('File not found: "%s"' % pat)
okay = False
if not okay:
raise ValueError('no configuration files')
return orch_config
def configure(cfg):
msg.debug('orch: CONFIG CALLED')
from . import features
features.load()
orch_config = get_orch_config_files(cfg)
cfg.msg('Orch configuration files', '"%s"' % '", "'.join(orch_config))
extra = dict(cfg.env)
extra['top'] = cfg.path.abspath()
out = cfg.bldnode.abspath() # usually {top}/tmp
assert out, 'No out dir defined'
extra['out'] = out
extra['DESTDIR'] = getattr(cfg.options, 'destdir', '')
msg.debug('top="{top}" out="{out}" DESTDIR="{DESTDIR}"'.format(**extra))
suite = pkgconf.load(orch_config, start = cfg.options.orch_start, **extra)
# load in any external tools in this configuration context that
# may be referenced in the configuration
for group in suite['groups']:
for package in group['packages']:
tools = package.get('tools')
if not tools: continue
for tool in util.string2list(tools):
msg.debug('orch: loading tool: "%s" for package "%s"' % (tool, package['package']))
cfg.load(tool)
suite = pkgconf.fold_in(suite, **extra)
#pkgconf.dump_suite(suite)
# decompose the hierarchy of dicts into waf data
envmunge.decompose(cfg, suite)
cfg.msg('Orch configure envs', '"%s"' % '", "'.join(cfg.all_envs.keys()))
msg.debug('orch: CONFIG CALLED [done]')
return
| {
"content_hash": "3e36432a6ab6f248daeabe99491a595a",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 100,
"avg_line_length": 29.423529411764704,
"alnum_prop": 0.6205517792882846,
"repo_name": "hwaf/hwaf",
"id": "9a1a6d14db9b9ed74a7e24ce2188ae765cadfbbf",
"size": "2523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py-hwaftools/orch/configure.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Go",
"bytes": "255531"
},
{
"name": "Python",
"bytes": "345723"
}
],
"symlink_target": ""
} |
from .vectors import *
| {
"content_hash": "c5bd195e0f16ebb7d3ac56ab3c30d4c1",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 22,
"avg_line_length": 23,
"alnum_prop": 0.7391304347826086,
"repo_name": "cnlohr/bridgesim",
"id": "f7556184ca5adbc935c9782a84f9c4ddeab2d577",
"size": "23",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/server/physics/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1912782"
},
{
"name": "C++",
"bytes": "7683"
},
{
"name": "CSS",
"bytes": "990"
},
{
"name": "JavaScript",
"bytes": "945033"
},
{
"name": "Python",
"bytes": "64299"
}
],
"symlink_target": ""
} |
"""LHN elements."""
# pylint: disable=not-callable
# pylint: disable=not-an-iterable
from selenium.common import exceptions as selenium_exception
from lib import base, exception
from lib.constants import locator
from lib.page import extended_info
from lib.utils import selenium_utils
class _Tab(base.Tab):
"""Tab element."""
locator_element = None
def __init__(self, driver):
"""
Args: driver (base.CustomDriver
"""
super(_Tab, self).__init__(driver, self.locator_element)
class MyObjectsTab(_Tab):
"""In LHN my objects tab."""
locator_element = locator.LhnMenu.MY_OBJECTS
class AllObjectsTab(_Tab):
"""In LHN all objects tab."""
locator_element = locator.LhnMenu.ALL_OBJECTS
class Toggle(base.Toggle):
"""Button in LHN."""
def __init__(self, driver, locator_element, locator_count):
super(Toggle, self).__init__(driver, locator_element)
count_element = selenium_utils.get_when_visible(driver, locator_count)
self.members_count = int(count_element.text)
class DropdownStatic(base.Dropdown):
"""Dropdown in LHN."""
_locator_element = None
def __init__(self, driver):
super(DropdownStatic, self).__init__(driver, self._locator_element)
class AccordionGroup(base.DropdownDynamic):
"""Mmodel for LHN's accoridon group."""
_locator_spinny = None
_locator_button_create_new = None
_locator_accordion_members = None
# modal class which is used when creating a new object
_create_new_modal_cls = None
def __init__(self, driver):
"""
Args: driver (base.CustomDriver)
"""
super(AccordionGroup, self).__init__(
driver, [self._locator_spinny], wait_until_visible=False)
self.button_create_new = base.Button(
self._driver, self._locator_button_create_new)
self._update_loaded_members()
self._set_visible_members()
def _update_loaded_members(self):
self.members_loaded = self._driver.find_elements(
*self._locator_accordion_members)
def _set_visible_members(self):
try:
for element in self.members_loaded:
selenium_utils.wait_until_stops_moving(element)
self.members_visible = [
element for element in self.members_loaded if element.is_displayed()]
except selenium_exception.StaleElementReferenceException:
self._update_loaded_members()
self._set_visible_members()
def _get_visible_member_by_title(self, member_title):
"""Hovers over visible member with (unique) title "member_title".
Args: member_title (basestring): (unique) title of member
Return: selenium.webdriver.remote.webelement.WebElement
"""
try:
for element in self.members_visible:
if element.text == member_title:
break
else:
raise exception.ElementNotFound
return element
except selenium_exception.StaleElementReferenceException:
# the elements can go stale, here we refresh them
self._update_loaded_members()
self._set_visible_members()
return self._get_visible_member_by_title(member_title)
def scroll_down(self):
pass
def scroll_up(self):
pass
def create_new(self):
"""Create new modal for object in LHN.
Return: lib.page.modal.create_new_object.CreateNewObjectModal
"""
self.button_create_new.click()
return self._create_new_modal_cls(self._driver)
def hover_over_visible_member(self, member_title):
"""Hovers over visible member with (unique) title "member_title".
Args: member_title (basestring): (unique) title of member
"""
try:
el = self._get_visible_member_by_title(member_title)
selenium_utils.hover_over_element(self._driver, el)
selenium_utils.get_when_visible(
self._driver, locator.LhnMenu.EXTENDED_INFO)
return extended_info.ExtendedInfo(self._driver)
except selenium_exception.StaleElementReferenceException:
return self.hover_over_visible_member(member_title)
| {
"content_hash": "35dd4a5a437ce734e2e84da10911f59a",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 79,
"avg_line_length": 31.142857142857142,
"alnum_prop": 0.6898572884811417,
"repo_name": "plamut/ggrc-core",
"id": "ad2358a36f38f8e79b32fc05c74e521a307fb11a",
"size": "4036",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "test/selenium/src/lib/element/lhn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "229800"
},
{
"name": "HTML",
"bytes": "1060475"
},
{
"name": "JavaScript",
"bytes": "1951072"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2839040"
},
{
"name": "Shell",
"bytes": "31100"
}
],
"symlink_target": ""
} |
"""
Grafeas API
An API to insert and retrieve annotations on cloud artifacts. # noqa: E501
OpenAPI spec version: v1alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import grafeas
from grafeas.models.api_alias_context import ApiAliasContext # noqa: E501
from grafeas.rest import ApiException
class TestApiAliasContext(unittest.TestCase):
"""ApiAliasContext unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApiAliasContext(self):
"""Test ApiAliasContext"""
# FIXME: construct object with mandatory attributes with example values
# model = grafeas.models.api_alias_context.ApiAliasContext() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "52105dfd6075dff25655d8e44190f48e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 82,
"avg_line_length": 22.657894736842106,
"alnum_prop": 0.6829268292682927,
"repo_name": "grafeas/client-python",
"id": "0e53cbb4b2cae3b34c9fc789c49abcef2941d36f",
"size": "878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_api_alias_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "558375"
},
{
"name": "Shell",
"bytes": "2063"
}
],
"symlink_target": ""
} |
import asyncio
import copy
import discord
import sys
import time
import datetime
import traceback
from discord.ext import commands
from sys import argv
class Loop:
"""
Loop events.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
def __unload(self):
self.is_active = False
is_active = True
last_hour = datetime.datetime.now().hour
def setup(bot):
bot.add_cog(Loop(bot))
| {
"content_hash": "aaa5fb64305f85dccf7e7a70f089dc43",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 16.266666666666666,
"alnum_prop": 0.6290983606557377,
"repo_name": "916253/Kurisu-Reswitched",
"id": "e7972237ab0565ef7910f009d4b3e066729df94d",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "addons/loop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "125780"
}
],
"symlink_target": ""
} |
from Adafruit_AMG88xx import Adafruit_AMG88xx
from time import sleep
#import Adafruit_AMG88xx.Adafruit_AMG88xx as AMG88
# Default constructor will pick a default I2C bus.
#
# For the Raspberry Pi this means you should hook up to the only exposed I2C bus
# from the main GPIO header and the library will figure out the bus number based
# on the Pi's revision.
#
# For the Beaglebone Black the library will assume bus 1 by default, which is
# exposed with SCL = P9_19 and SDA = P9_20.
sensor = Adafruit_AMG88xx()
# Optionally you can override the bus number:
#sensor = AMG88.Adafruit_AMG88xx(busnum=2)
#wait for it to boot
sleep(.1)
while(1):
print(sensor.readPixels())
sleep(1)
| {
"content_hash": "4ca0be1e3c11f08bf6ca8a465476b084",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 80,
"avg_line_length": 27.4,
"alnum_prop": 0.7562043795620438,
"repo_name": "Quantonomist/Kitchen_Pi",
"id": "3ac3228f5f48aed09cfc2cce0f1e3cdb097ba5bd",
"size": "1926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Sensor_Tests/IR_Camera/pixels_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15990"
}
],
"symlink_target": ""
} |
"""
Base class for computation procedures
"""
import abc
from ..interface.models import KVStore
class BaseTasks(abc.ABC):
def __init__(self, storage, logger):
self.storage = storage
self.logger = logger
def submit_tasks(self, data):
"""
Creates results/procedures and tasks in the database
"""
results_ids, existing_ids = self.parse_input(data)
submitted_ids = [x for x in results_ids if x not in existing_ids and x is not None]
n_inserted = 0
missing = []
for num, x in enumerate(results_ids):
if x is None:
missing.append(num)
else:
n_inserted += 1
results = {
"meta": {
"n_inserted": n_inserted,
"duplicates": [],
"validation_errors": [],
"success": True,
"error_description": False,
"errors": [],
},
"data": {"ids": results_ids, "submitted": submitted_ids, "existing": existing_ids},
}
return results
def retrieve_outputs(self, rdata):
"""
Retrieves (possibly compressed) outputs from an AtomicResult (that has been converted to a dictionary)
This function modifies the rdata dictionary in-place
"""
# Get the compressed outputs if they exist
stdout = rdata["extras"].pop("_qcfractal_compressed_stdout", None)
stderr = rdata["extras"].pop("_qcfractal_compressed_stderr", None)
error = rdata["extras"].pop("_qcfractal_compressed_error", None)
# Create KVStore objects from these
if stdout is not None:
stdout = KVStore(**stdout)
if stderr is not None:
stderr = KVStore(**stderr)
if error is not None:
error = KVStore(**error)
# This shouldn't happen, but if they aren't compressed, check for
# uncompressed
if stdout is None and rdata.get("stdout", None) is not None:
self.logger.warning(f"Found uncompressed stdout for result id {rdata['id']}")
stdout = KVStore(data=rdata["stdout"])
if stderr is None and rdata.get("stderr", None) is not None:
self.logger.warning(f"Found uncompressed stderr for result id {rdata['id']}")
stderr = KVStore(data=rdata["stderr"])
if error is None and rdata.get("error", None) is not None:
self.logger.warning(f"Found uncompressed error for result id {rdata['id']}")
error = KVStore(data=rdata["error"])
# Now add to the database and set the ids in the diction
outputs = [stdout, stderr, error]
stdout_id, stderr_id, error_id = self.storage.add_kvstore(outputs)["data"]
rdata["stdout"] = stdout_id
rdata["stderr"] = stderr_id
rdata["error"] = error_id
@abc.abstractmethod
def verify_input(self, data):
pass
@abc.abstractmethod
def parse_input(self, data):
pass
@abc.abstractmethod
def handle_completed_output(self, data):
pass
| {
"content_hash": "ad24a449a96efd942c65dcc8cd93c9d9",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 110,
"avg_line_length": 33.58064516129032,
"alnum_prop": 0.5766890810118476,
"repo_name": "psi4/DatenQM",
"id": "fb49a36f2301dba19c315ce082a2107ad079cb09",
"size": "3123",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qcfractal/procedures/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "8266"
},
{
"name": "Python",
"bytes": "145536"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
} |
VERSION = (1, 7, 0, 'alpha', 0)
def get_version(*args, **kwargs):
# Don't litter django/__init__.py with all the get_version stuff.
# Only import if it's actually called.
from django.utils.version import get_version
return get_version(*args, **kwargs)
def setup():
"""
Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
"""
from django.apps import apps
from django.conf import settings
from django.utils.log import configure_logging
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
apps.populate(settings.INSTALLED_APPS)
| {
"content_hash": "033e08cdd80fcd85619df87f4d18e06a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 74,
"avg_line_length": 32.142857142857146,
"alnum_prop": 0.7037037037037037,
"repo_name": "Beeblio/django",
"id": "14c941601a148964a8925508e79d1fbe7e7e6537",
"size": "675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42830"
},
{
"name": "HTML",
"bytes": "173915"
},
{
"name": "JavaScript",
"bytes": "102290"
},
{
"name": "Makefile",
"bytes": "239"
},
{
"name": "Python",
"bytes": "9172420"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from mpp.models import MPPTestCase
from mpp.lib.gppkg.gppkg import Gppkg
gppkg = Gppkg()
pkgname = 'plperl'
class CompatiblityMPPTestCase(MPPTestCase):
def __init__(self, methodName):
super(CompatiblityMPPTestCase, self).__init__(methodName)
@classmethod
def setUpClass(self):
super(CompatiblityMPPTestCase, self).setUpClass()
gppkg.run_gppkg_uninstall(pkgname)
def test_install_should_fail(self):
"""@product_version gpdb: [4.3.5.0 -]"""
"Old package on the new database which is above the version of 4.3.5.0 should fail"
gppkg = Gppkg()
build_type = None
if os.environ.get("BUILD_TYPE"):
build_type = os.environ["BUILD_TYPE"]
os.environ["BUILD_TYPE"] = 'rc'
with self.assertRaisesRegexp(Exception, 'Failed to install'):
gppkg.gppkg_install(product_version='4.3.4.0', gppkg=pkgname)
if build_type is not None:
os.environ["BUILD_TYPE"] = build_type
existed, _ = gppkg.check_pkg_exists(pkgname)
self.assertFalse(existed)
| {
"content_hash": "9c9fa16cc5434b879c7a74778d049efe",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 91,
"avg_line_length": 37.1875,
"alnum_prop": 0.7002801120448179,
"repo_name": "Quikling/gpdb",
"id": "d2061f5580678e001a82ababea9bf2079b1a9841",
"size": "1785",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "src/test/tinc/tincrepo/mpp/gpdb/tests/package/compat/test_compatibility.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11492"
},
{
"name": "C",
"bytes": "35104900"
},
{
"name": "C++",
"bytes": "3826418"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "731336"
},
{
"name": "HTML",
"bytes": "191406"
},
{
"name": "Java",
"bytes": "268348"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "196275"
},
{
"name": "M4",
"bytes": "105042"
},
{
"name": "Makefile",
"bytes": "428681"
},
{
"name": "PLSQL",
"bytes": "261269"
},
{
"name": "PLpgSQL",
"bytes": "5487194"
},
{
"name": "Perl",
"bytes": "3894496"
},
{
"name": "Perl 6",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "8656525"
},
{
"name": "Roff",
"bytes": "51338"
},
{
"name": "Ruby",
"bytes": "26724"
},
{
"name": "SQLPL",
"bytes": "3824391"
},
{
"name": "Shell",
"bytes": "541518"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "488297"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import
import logging
import re
import urllib
from flexget import plugin, validator
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability, normalize_unicode
log = logging.getLogger('search_cpasbien')
session = requests.Session()
class SearchCPASBIEN(object):
schema = {
'type': 'object',
'properties':
{
'category': {
'type': 'string',
'enum': ['films', 'series', 'musique', 'films-french',
'720p', 'series-francaise', 'films-dvdrip', 'all',
'films-vostfr', '1080p', 'series-vostfr', 'ebook']
},
},
'required': ['category'],
'additionalProperties': False
}
@plugin.internet(log)
def search(self, task, entry, config):
"""CPASBIEN search plugin
Config example:
tv_search_cpasbien:
discover:
what:
- trakt_list:
username: xxxxxxx
api_key: xxxxxxx
series: watchlist
from:
- cpasbien:
category: "series-vostfr"
interval: 1 day
ignore_estimations: yes
Category is ONE of:
all
films
series
musique
films-french
1080p
720p
series-francaise
films-dvdrip
films-vostfr
series-vostfr
ebook
"""
base_url = 'http://www.cpasbien.pe'
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
search_string = search_string.replace(' ', '-').lower()
search_string = search_string.replace('(', '')
search_string = search_string.replace(')', '')
query = normalize_unicode(search_string)
query_url_fragment = urllib.quote_plus(query.encode('utf-8'))
# http://www.cpasbien.pe/recherche/ncis.html
if config['category'] == 'all':
str_url = (base_url, 'recherche', query_url_fragment)
url = '/'.join(str_url)
else:
category_url_fragment = '%s' % config['category']
str_url = (base_url, 'recherche', category_url_fragment, query_url_fragment)
url = '/'.join(str_url)
log.debug('search url: %s' % url + '.html')
# GET URL
f = task.requests.get(url + '.html').content
soup = get_soup(f)
if soup.findAll(text=re.compile(' 0 torrents')):
log.debug('search returned no results')
else:
nextpage = 0
while (nextpage >= 0):
if (nextpage > 0):
newurl = url + '/page-' + str(nextpage)
log.debug('-----> NEXT PAGE : %s' % newurl)
f1 = task.requests.get(newurl).content
soup = get_soup(f1)
for result in soup.findAll('div', attrs={'class': re.compile('ligne')}):
entry = Entry()
link = result.find('a', attrs={'href': re.compile('dl-torrent')})
entry['title'] = link.contents[0]
# REWRITE URL
page_link = link.get('href')
link_rewrite = page_link.split('/')
# get last value in array remove .html and replace by .torrent
endlink = link_rewrite[-1]
str_url = (base_url, '/telechargement/', endlink[:-5], '.torrent')
entry['url'] = ''.join(str_url)
log.debug('Title: %s | DL LINK: %s' % (entry['title'], entry['url']))
entry['torrent_seeds'] = (int(result.find('span', attrs={'class': re.compile('seed')}).text))
entry['torrent_leeches'] = (int(result.find('div', attrs={'class': re.compile('down')}).text))
sizefull = (result.find('div', attrs={'class': re.compile('poid')}).text)
size = sizefull[:-3]
unit = sizefull[-2:]
if unit == 'GB':
entry['content_size'] = int(float(size) * 1024)
elif unit == 'MB':
entry['content_size'] = int(float(size))
elif unit == 'KB':
entry['content_size'] = int(float(size) / 1024)
if(entry['torrent_seeds'] > 0):
entries.add(entry)
else:
log.debug('0 SEED, not adding entry')
if soup.find(text=re.compile('Suiv')):
nextpage += 1
else:
nextpage = -1
return entries
@event('plugin.register')
def register_plugin():
plugin.register(SearchCPASBIEN, 'cpasbien', groups=['search'], api_ver=2)
| {
"content_hash": "5efc94073b1090ca160e83bc71c6821f",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 118,
"avg_line_length": 39.05882352941177,
"alnum_prop": 0.47270331325301207,
"repo_name": "spencerjanssen/Flexget",
"id": "6b7fbbc0e524c45b063e0800add851e4b464aa17",
"size": "5312",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "flexget/plugins/search_cpasbien.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "HTML",
"bytes": "35670"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "2178397"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
AMBARI_SUDO_BINARY = "ambari-sudo.sh"
UPGRADE_TYPE_ROLLING = "rolling"
UPGRADE_TYPE_NON_ROLLING = "nonrolling"
UPGRADE_TYPE_HOST_ORDERED = "host_ordered"
AGENT_TMP_DIR = "/var/lib/ambari-agent/tmp"
LOGFEEDER_CONF_DIR = "/usr/lib/ambari-logsearch-logfeeder/conf"
class SERVICE:
"""
Constants for service names to avoid hardcoding strings.
"""
ATLAS = "ATLAS"
FALCON = "FALCON"
FLUME = "FLUME"
HAWQ = "HAWQ"
HDFS = "HDFS"
HIVE = "HIVE"
KAFKA = "KAFKA"
KNOX = "KNOX"
MAHOUT = "MAHOUT"
OOZIE = "OOZIE"
PIG = "PIG"
PXF = "PXF"
RANGER = "RANGER"
SLIDER = "SLIDER"
SPARK = "SPARK"
SQOOP = "SQOOP"
STORM = "STORM"
TEZ = "TEZ"
YARN = "YARN"
ZEPPELIN = "ZEPPELIN"
ZOOKEEPER = "ZOOKEEPER"
HBASE = "HBASE" | {
"content_hash": "a74d3a262cc9149efb31efa4e6ac2211",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 72,
"avg_line_length": 27.035714285714285,
"alnum_prop": 0.7166446499339498,
"repo_name": "sekikn/ambari",
"id": "abb07dae7707fe65d19cfbea25a0f367211498af",
"size": "1536",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "ambari-common/src/main/python/ambari_commons/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "22734"
},
{
"name": "C",
"bytes": "109499"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "CSS",
"bytes": "616806"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "Dockerfile",
"bytes": "8117"
},
{
"name": "HTML",
"bytes": "3725781"
},
{
"name": "Handlebars",
"bytes": "1594385"
},
{
"name": "Java",
"bytes": "26670585"
},
{
"name": "JavaScript",
"bytes": "14647486"
},
{
"name": "Jinja",
"bytes": "147938"
},
{
"name": "Less",
"bytes": "303080"
},
{
"name": "Makefile",
"bytes": "2407"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "298247"
},
{
"name": "PowerShell",
"bytes": "2047735"
},
{
"name": "Python",
"bytes": "7226684"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Shell",
"bytes": "350773"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim Script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "1133"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_remove_tag_blogpost'),
]
operations = [
migrations.RemoveField(
model_name='tag',
name='id',
),
migrations.AlterField(
model_name='tag',
name='tag',
field=models.CharField(max_length=50, serialize=False, primary_key=True),
preserve_default=True,
),
]
| {
"content_hash": "f42422d2cc27aa6867a99048a2e9622c",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 85,
"avg_line_length": 23.304347826086957,
"alnum_prop": 0.5615671641791045,
"repo_name": "GMadorell/djagolb",
"id": "73200f5e9b2b363755cff32ff18d509e61f6bcd4",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/blog/migrations/0009_auto_20141213_1821.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "382159"
},
{
"name": "JavaScript",
"bytes": "175"
},
{
"name": "Python",
"bytes": "30896"
},
{
"name": "Ruby",
"bytes": "905"
},
{
"name": "Shell",
"bytes": "230"
}
],
"symlink_target": ""
} |
import glob, importlib, inspect, os
import peewee, playhouse
from playhouse.migrate import MySQLMigrator, PostgresqlMigrator, SqliteMigrator
from importlib import import_module
from keydom.util import parse_uri
from malibu.util import log
modules = glob.glob(os.path.dirname(__file__) + "/*.py")
__all__ = [os.path.basename(f)[:-3] for f in modules
if not os.path.basename(f).startswith('_') and
not f.endswith('__init__.py') and os.path.isfile(f)]
database_proxy = peewee.Proxy()
database_migrator = None
class FKSqliteDatabase(peewee.SqliteDatabase):
""" A simple wrapper around peewee's SqliteDatabase that
enables foreign keys with a pragma when the connection
is initialized.
"""
def initialize_connection(self, conn):
self.execute_sql("PRAGMA foreign_keys=ON;")
class BaseModel(peewee.Model):
""" Simple base model with the database set as a peewee
database proxy so we can dynamically initialize the
database connection with information from the config
file.
"""
class Meta:
database = database_proxy
def init_database_from_config(db_config):
""" Takes a malibu ConfigurationSection object to create
the database connection accordingly.
"""
LOG = log.LoggingDriver.find_logger()
global database_migrator
if db_config is None:
raise ValueError("Config section 'database' does not exist!")
db_uri = db_config.get_string("uri", None)
if db_uri is None:
raise ValueError("Config value database.uri can not be empty!")
db_uri = parse_uri(db_uri)
if db_uri["protocol"] == "sqlite":
database = FKSqliteDatabase(db_uri["resource"])
database_migrator = SqliteMigrator(database)
elif db_uri["protocol"] == "postgres":
database = playhouse.postgres_ext.PostgresqlExtDatabase(
db_uri["database"],
user = db_uri["username"],
password = db_uri["password"],
host = db_uri["host"],
port = db_uri["port"])
database_migrator = PostgresqlMigrator(database)
elif db_uri["protocol"] == "mysql":
database = peewee.MySQLDatabase(
db_uri["database"],
user = db_uri["username"],
password = db_uri["password"],
host = db_uri["host"],
port = db_uri["port"])
database_migrator = MySQLMigrator(database)
else:
raise ValueError("Unknown DB protocol: %s" % (db_uri["protocol"]))
database_proxy.initialize(database)
database.connect()
# Import all BaseModels and run create_tables(...)
tables = []
for module in __all__:
mod = import_module("{}.{}".format(__package__, module))
for member in dir(mod):
member_obj = getattr(mod, member)
if not inspect.isclass(member_obj):
continue
if member_obj.__name__ == 'BaseModel':
continue
if issubclass(member_obj, BaseModel):
LOG.debug("Loading database model: %s.%s.%s" % (
__package__, module, member))
tables.append(member_obj)
LOG.debug("Ensuring tables are created (safe=True)")
try: database.create_tables(tables, safe = True)
except: pass
| {
"content_hash": "f5ea50f1c895fb611351c534ecfbe34e",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 79,
"avg_line_length": 33.16,
"alnum_prop": 0.6188178528347407,
"repo_name": "pirogoeth/keydom",
"id": "f9ecb1f92e1e00544a15dc13688a570036bb7e5a",
"size": "3316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keydom/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47471"
}
],
"symlink_target": ""
} |
import boto3
import os
import copy
import time
import json
import datetime
from boto3.dynamodb.conditions import Key, Attr
def handler(event, context):
# DynamoDB access
dynamodb = boto3.resource('dynamodb')
bill_table = dynamodb.Table(os.environ['DB_BILL_TABLE'])
voter_table = dynamodb.Table(os.environ['DB_VOTER_TABLE'])
# Attempt to vote
if 'voterId' in event and event['voterId'] and 'billId' in event and event['billId'] and 'congress' in event and event['congress']:
voter = voter_table.get_item(Key={'voterId': event['voterId']})["Item"]
bill = bill_table.get_item(Key={'bill_id': event['billId'], 'congress': event['congress']})
billId = event['billId']
if 'vote' in event and event['vote'] and billId not in voter['votes']:
# User has not voted, cast vote
dct = voter['votes']
if type(voter['votes']) == str:
dct = dict()
dct[billId] = event['vote'][0]
voter_table.update_item(
Key={
'voterId': event['voterId']
},
UpdateExpression="SET votes = :r",
ExpressionAttributeValues={
':r': dct,
}
)
ts=time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
data = {
"voterId": event['voterId'],
"billId": event['billId'],
"component": event['component'],
"vote": event['vote'],
'state': voter['state'],
"timestamp": timestamp,
"ts": ts
}
# SQS resource
sqs = boto3.resource('sqs')
# Get the queue
queue = sqs.get_queue_by_name(QueueName='voting_queue')
queue.send_message(MessageBody=json.dumps(data))
return "Success"
return "Already Voted" | {
"content_hash": "76c12d831ee46bc78b67f7bee49204a2",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 135,
"avg_line_length": 34.9,
"alnum_prop": 0.4990448901623687,
"repo_name": "peter765/power-polls",
"id": "cd18a6a2a88eea2d1f40cb64684b381f3aa1f9f5",
"size": "2094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lambdas/vote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "58567"
},
{
"name": "JavaScript",
"bytes": "7370"
},
{
"name": "Python",
"bytes": "22988"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 5, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 0); | {
"content_hash": "a9dc9893694a0f0fcb067d81d2fe9f5e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 177,
"avg_line_length": 39.57142857142857,
"alnum_prop": 0.7184115523465704,
"repo_name": "antoinecarme/pyaf",
"id": "f76a3928e315a59961e2a7bb0a3d65a171719223",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_RelativeDifference/trend_MovingMedian/cycle_5/ar_/test_artificial_1024_RelativeDifference_MovingMedian_5__20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import sys
usage = "compare_multiple_sam.py <annotated SAM file1> <annotated SAM file2> <annotated SAM filen> <output file>"
if len(sys.argv) < 3:
print usage
sys.exit(0)
outputFile = sys.argv[len(sys.argv)-1]
#print outputFile
fileObjList = []
for n in range(0, len(sys.argv)-2):
fileName = sys.argv[n+1]
fileObj = open(fileName, "r")
fileObjList.append(fileObj)
#file1 = open(SAMfile1, "r")
outputFile = open(outputFile, "w")
lineList = []
for file in fileObjList:
lineList.append(file.readline())
cur_snapd = 0
best_snapd = 999
best_line = ""
while lineList[0] != '':
# print lineList
for line in lineList:
data = line.split()
if (data[0][0]!="@"): #line is a SAM data line
if (len(data)==14): #there is a match for that line
cur_snapd = data[13].split(":")[2]
if (int(cur_snapd) <= int(best_snapd)): #current match is better than best so far
best_snapd = cur_snapd
best_line = line
if (best_line==""): #no match at all
best_line = lineList[0]
outputFile.write(best_line)
cur_snapd = 0
best_snapd = 999
best_line = ""
lineList = []
for file in fileObjList:
lineList.append(file.readline())
for file in fileObjList:
file.close()
outputFile.close()
| {
"content_hash": "cff4da14f9d1dfeea10b7b2c94275ef5",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 113,
"avg_line_length": 22.37037037037037,
"alnum_prop": 0.6655629139072847,
"repo_name": "chiulab/surpi",
"id": "1af36c812a4824108e61284c3ac952086e07fff0",
"size": "1564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compare_multiple_sam.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "1091"
},
{
"name": "Perl",
"bytes": "19088"
},
{
"name": "Python",
"bytes": "13619"
},
{
"name": "Shell",
"bytes": "181790"
}
],
"symlink_target": ""
} |
import sys, os, datetime, requests, json, uuid, glob, argparse, copy, logging, time, io
from time import sleep
from sqlalchemy import Column, Integer, String, ForeignKey, Table, create_engine, MetaData, Date, DateTime, Float, cast, or_, and_, asc
from src.common.env_loader import *
from src.common.shellprinting import *
from src.logger.logger import Logger
from src.pycore import PyCore
import pandas as pd
import numpy as np
# Docker containers need a different backend for plotting:
# http://stackoverflow.com/questions/3453188/matplotlib-display-plot-on-a-remote-machine
if str(os.getenv("ENV_SHOW_PLOTS", "0")) == "0":
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from pandas.io.json import json_normalize
# Set up the ENV
default_env = "Local"
use_env = os.getenv("ENV_DEPLOYMENT_TYPE", default_env)
os.environ["ENV_DEPLOYMENT_TYPE"] = use_env
if os.path.exists("/opt/aws/bin/ec2-metadata"):
os.environ["ENV_DEPLOYMENT_TYPE"] = "Test"
debug = False
try:
import syslog
except Exception,b:
"""
New Release supports running on Windows 10 Home (untested on Professional or Enterprise)
"""
print "Not Running on Linux"
os.environ["ENV_SYSLOG_ENABLED"] = "0"
os.environ["ENV_DBS_ENABLED"] = "1"
os.environ["ENV_RDS_ENABLED"] = "1"
os.environ["ENV_CORE_CONFIG_FILE"] = "C:\Users\YOURUSER\dev\scipype\configs\windows-jupyter.json"
os.environ["ENV_DATA_SRC_DIR"] = "C:\Users\YOURUSER\dev"
os.environ["ENV_DEPLOYMENT_TYPE"] = "Windows"
if debug:
lg("Start common python initialization Env(" + str(os.getenv("ENV_DEPLOYMENT_TYPE")) + ")", 6)
core_config = os.getenv("ENV_CORE_CONFIG_FILE", "/opt/work/configs/jupyter.json")
data_dir = os.getenv("ENV_DATA_SRC_DIR", "/opt/scipype/data/src")
db_enabled = os.getenv("ENV_DBS_ENABLED", "0")
rd_enabled = os.getenv("ENV_RDS_ENABLED", "1")
lg_enabled = os.getenv("ENV_SYSLOG_ENABLED", "1")
logger_name = os.getenv("ENV_PY_LOGGER_NAME", "ds")
env_name = os.getenv("ENV_DEPLOYMENT_TYPE", "Local")
"""
If the environment variable ENV_IN_DOCKER=0 then you can load a
specific env runtime mapped by the environment variable: ENV_DEPLOYMENT_TYPE
where the ["Core"]["Envs"][ENV_DEPLOYMENT_TYPE] is defined in:
https://github.com/jay-johnson/sci-pype/blob/master/configs/jupyter.json
and also mapped to a specific env file on disk or in the container:
https://github.com/jay-johnson/sci-pype/blob/master/src/common/env_loader.py
"""
load_env_for_specific_runtime()
core = PyCore(core_config)
now = datetime.datetime.now()
ra_name = "CACHE"
ra_key = "NODB_PERFORM_THIS_WORK"
logger = None
if str(lg_enabled) == "1" or str(lg_enabled).lower() == "true":
logger = Logger(logger_name, "/dev/log", logging.DEBUG)
if debug:
lg("Loading Redis Apps", 6)
if str(rd_enabled) == "1" or str(rd_enabled).lower() == "true":
core.load_redis_apps()
core.m_log = logger
core.m_name = logger_name
if str(db_enabled) == "1" or str(db_enabled).lower() == "true":
if debug:
lg("Loading Database Apps", 6)
core.load_db_apps()
if debug:
lg("End common python initialization Env(" + str(os.getenv("ENV_DEPLOYMENT_TYPE")) + ")", 6)
#
#
################################################################
| {
"content_hash": "7a805f8af250d7d598ef86b5f6b0b388",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 135,
"avg_line_length": 36.54736842105263,
"alnum_prop": 0.6414170506912442,
"repo_name": "jay-johnson/sci-pype",
"id": "c28d003fceb30cb5f0f26eb8f8f1f918efe885fb",
"size": "4128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/common/inits_for_python.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "700602"
},
{
"name": "Python",
"bytes": "753297"
},
{
"name": "Ruby",
"bytes": "49233"
},
{
"name": "Shell",
"bytes": "90148"
},
{
"name": "Vim Script",
"bytes": "1253"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'SourceFile', fields ['project', 'file_name']
db.delete_unique(u'ide_sourcefile', ['project_id', 'file_name'])
# Adding unique constraint on 'SourceFile', fields ['project', 'file_name', 'target']
db.create_unique(u'ide_sourcefile', ['project_id', 'file_name', 'target'])
def backwards(self, orm):
# Removing unique constraint on 'SourceFile', fields ['project', 'file_name', 'target']
db.delete_unique(u'ide_sourcefile', ['project_id', 'file_name', 'target'])
# Adding unique constraint on 'SourceFile', fields ['project', 'file_name']
db.create_unique(u'ide_sourcefile', ['project_id', 'file_name'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'ide.buildresult': {
'Meta': {'object_name': 'BuildResult'},
'finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'to': "orm['ide.Project']"}),
'started': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'236d8712-410b-4a77-8f3d-2c31f1cfc1e7'", 'max_length': '36'})
},
'ide.buildsize': {
'Meta': {'object_name': 'BuildSize'},
'binary_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sizes'", 'to': "orm['ide.BuildResult']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'resource_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'total_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'worker_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'ide.dependency': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'Dependency'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependencies'", 'to': "orm['ide.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '2000'})
},
'ide.project': {
'Meta': {'object_name': 'Project'},
'app_capabilities': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'app_company_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'app_is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'app_is_shown_on_communication': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'app_is_watchface': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'app_jshint': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'app_keys': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'app_keywords': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'app_long_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'app_modern_multi_js': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'app_platforms': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'app_short_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'app_uuid': ('django.db.models.fields.CharField', [], {'default': "'6db8e5fa-37b9-41ee-91f4-014fc59aa2e6'", 'max_length': '36', 'null': 'True', 'blank': 'True'}),
'app_version_label': ('django.db.models.fields.CharField', [], {'default': "'1.0'", 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'github_branch': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'github_hook_build': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'github_hook_uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'github_last_commit': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'github_last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'github_repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'optimisation': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'project_dependencies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['ide.Project']", 'symmetrical': 'False'}),
'project_type': ('django.db.models.fields.CharField', [], {'default': "'native'", 'max_length': '10'}),
'sdk_version': ('django.db.models.fields.CharField', [], {'default': "'2'", 'max_length': '6'})
},
'ide.resourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'ResourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_menu_icon': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': "orm['ide.Project']"})
},
'ide.resourceidentifier': {
'Meta': {'object_name': 'ResourceIdentifier'},
'character_regex': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'compatibility': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_format': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'resource_file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'identifiers'", 'to': "orm['ide.ResourceFile']"}),
'resource_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'space_optimisation': ('django.db.models.fields.CharField', [], {'max_length': '7', 'null': 'True', 'blank': 'True'}),
'storage_format': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'target_platforms': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'tracking': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'ide.resourcevariant': {
'Meta': {'unique_together': "(('resource_file', 'tags'),)", 'object_name': 'ResourceVariant'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_legacy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'resource_file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variants'", 'to': "orm['ide.ResourceFile']"}),
'tags': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '50', 'blank': 'True'})
},
'ide.sourcefile': {
'Meta': {'unique_together': "(('project', 'file_name', 'target'),)", 'object_name': 'SourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'folded_lines': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source_files'", 'to': "orm['ide.Project']"}),
'target': ('django.db.models.fields.CharField', [], {'default': "'app'", 'max_length': '10'})
},
'ide.templateproject': {
'Meta': {'object_name': 'TemplateProject', '_ormbases': ['ide.Project']},
u'project_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['ide.Project']", 'unique': 'True', 'primary_key': 'True'}),
'template_kind': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'ide.usergithub': {
'Meta': {'object_name': 'UserGithub'},
'avatar': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'nonce': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'github'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'ide.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'accepted_terms': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'autocomplete': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'keybinds': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'tab_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'cloudpebble'", 'max_length': '50'}),
'use_spaces': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'whats_new': ('django.db.models.fields.PositiveIntegerField', [], {'default': '22'})
}
}
complete_apps = ['ide'] | {
"content_hash": "2f068e2da05891f65721e2648456d8ea",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 195,
"avg_line_length": 82.67032967032966,
"alnum_prop": 0.5531038149674332,
"repo_name": "pebble/cloudpebble",
"id": "2fa453df8c348d8691c65d97a1106b741a7a1071",
"size": "15070",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ide/migrations/0050_auto__del_unique_sourcefile_project_file_name__add_unique_sourcefile_p.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4664"
},
{
"name": "CSS",
"bytes": "70652"
},
{
"name": "HTML",
"bytes": "122226"
},
{
"name": "JavaScript",
"bytes": "508689"
},
{
"name": "Makefile",
"bytes": "202"
},
{
"name": "Python",
"bytes": "950740"
},
{
"name": "Shell",
"bytes": "7895"
}
],
"symlink_target": ""
} |
import gc
import logging
import os.path
temp = True
if not os.path.exists("/sys/class/thermal/thermal_zone0/temp"):
temp = False
class CPUTemp(object):
def __init__(self):
gc.enable()
@staticmethod
def get_kind():
"""
return sensor kind
"""
return "mpcputemp"
@staticmethod
def get_sensordef(testing=False):
"""
Definition of the sensor and data to be shown in the PRTG WebGUI
"""
sensordefinition = {
"kind": CPUTemp.get_kind(),
"name": "CPU Temperature",
"description": "Returns the CPU temperature",
"default": "yes",
"help": "Returns the CPU temperature",
"tag": "mpcputempsensor",
"groups": [
{
"name": "Group",
"caption": "Temperature settings",
"fields": [
{
"type": "radio",
"name": "celfar",
"caption": "Choose between Celsius or Fahrenheit display",
"help": "Choose wether you want to return the value in Celsius or Fahrenheit",
"options": {
"C": "Celsius",
"F": "Fahrenheit"
},
"default": "C"
},
]
}
]
}
if not temp and not testing:
sensordefinition = ""
return sensordefinition
@staticmethod
def get_data(data, out_queue):
temperature = CPUTemp()
try:
tmp = temperature.read_temp(data)
except Exception as e:
logging.error("Ooops Something went wrong with '%s' sensor %s. Error: %s" % (temperature.get_kind(),
data['sensorid'], e))
data = {
"sensorid": int(data['sensorid']),
"error": "Exception",
"code": 1,
"message": "CPUTemp sensor failed. See log for details"
}
out_queue.put(data)
return 1
tempdata = []
for element in tmp:
tempdata.append(element)
data = {
"sensorid": int(data['sensorid']),
"message": "OK",
"channel": tempdata
}
del temperature
gc.collect()
out_queue.put(data)
return 1
@staticmethod
def read_temp(config):
data = []
chandata = []
tmp = open("/sys/class/thermal/thermal_zone0/temp", "r")
lines = tmp.readlines()
tmp.close()
temp_string = lines[0]
logging.debug("CPUTemp Debug message: Temperature from file: %s" % temp_string)
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
logging.debug("CPUTemp Debug message: Temperature after calculations:: %s" % temp_c)
if config['celfar'] == "C":
data.append(temp_c)
else:
data.append(temp_f)
for i in range(len(data)):
chandata.append({"name": "CPU Temperature",
"mode": "float",
"unit": "Custom",
"customunit": config['celfar'],
"LimitMode": 1,
"LimitMaxError": 40,
"LimitMaxWarning": 35,
"value": float(data[i])})
return chandata
| {
"content_hash": "63d0e16d2da6e498e57c420dd43129bb",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 112,
"avg_line_length": 33.8,
"alnum_prop": 0.4359870898332437,
"repo_name": "MyPhate/PythonMiniProbe",
"id": "6e9f836b419e935803cbb592ad4daa6b224bf515",
"size": "5278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miniprobe/sensors/cputemp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "206631"
},
{
"name": "Shell",
"bytes": "3052"
},
{
"name": "Smarty",
"bytes": "105"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import time
import glob
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../"))
from pyAudioAnalysis import utilities
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import ShortTermFeatures
eps = 0.00000001
""" Time-domain audio features """
def beat_extraction(short_features, window_size, plot=False):
"""
This function extracts an estimate of the beat rate for a musical signal.
ARGUMENTS:
- short_features: a np array (n_feats x numOfShortTermWindows)
- window_size: window size in seconds
RETURNS:
- bpm: estimates of beats per minute
- ratio: a confidence measure
"""
# Features that are related to the beat tracking task:
selected_features = [0, 1, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18]
max_beat_time = int(round(2.0 / window_size))
hist_all = np.zeros((max_beat_time,))
# for each feature
for ii, i in enumerate(selected_features):
# dif threshold (3 x Mean of Difs)
dif_threshold = 2.0 * (np.abs(short_features[i, 0:-1] -
short_features[i, 1::])).mean()
if dif_threshold <= 0:
dif_threshold = 0.0000000000000001
# detect local maxima
[pos1, _] = utilities.peakdet(short_features[i, :], dif_threshold)
position_diffs = []
# compute histograms of local maxima changes
for j in range(len(pos1)-1):
position_diffs.append(pos1[j+1]-pos1[j])
histogram_times, histogram_edges = \
np.histogram(position_diffs, np.arange(0.5, max_beat_time + 1.5))
hist_centers = (histogram_edges[0:-1] + histogram_edges[1::]) / 2.0
histogram_times = \
histogram_times.astype(float) / short_features.shape[1]
hist_all += histogram_times
if plot:
plt.subplot(9, 2, ii + 1)
plt.plot(short_features[i, :], 'k')
for k in pos1:
plt.plot(k, short_features[i, k], 'k*')
f1 = plt.gca()
f1.axes.get_xaxis().set_ticks([])
f1.axes.get_yaxis().set_ticks([])
if plot:
plt.show(block=False)
plt.figure()
# Get beat as the argmax of the agregated histogram:
max_indices = np.argmax(hist_all)
bpms = 60 / (hist_centers * window_size)
bpm = bpms[max_indices]
# ... and the beat ratio:
ratio = hist_all[max_indices] / (hist_all.sum() + eps)
if plot:
# filter out >500 beats from plotting:
hist_all = hist_all[bpms < 500]
bpms = bpms[bpms < 500]
plt.plot(bpms, hist_all, 'k')
plt.xlabel('Beats per minute')
plt.ylabel('Freq Count')
plt.show(block=True)
return bpm, ratio
def mid_feature_extraction(signal, sampling_rate, mid_window, mid_step,
short_window, short_step):
"""
Mid-term feature extraction
"""
short_features, short_feature_names = \
ShortTermFeatures.feature_extraction(signal, sampling_rate,
short_window, short_step)
n_stats = 2
n_feats = len(short_features)
#mid_window_ratio = int(round(mid_window / short_step))
mid_window_ratio = round((mid_window -
(short_window - short_step)) / short_step)
mt_step_ratio = int(round(mid_step / short_step))
mid_features, mid_feature_names = [], []
for i in range(n_stats * n_feats):
mid_features.append([])
mid_feature_names.append("")
# for each of the short-term features:
for i in range(n_feats):
cur_position = 0
num_short_features = len(short_features[i])
mid_feature_names[i] = short_feature_names[i] + "_" + "mean"
mid_feature_names[i + n_feats] = short_feature_names[i] + "_" + "std"
while cur_position < num_short_features:
end = cur_position + mid_window_ratio
if end > num_short_features:
end = num_short_features
cur_st_feats = short_features[i][cur_position:end]
mid_features[i].append(np.mean(cur_st_feats))
mid_features[i + n_feats].append(np.std(cur_st_feats))
cur_position += mt_step_ratio
mid_features = np.array(mid_features)
mid_features = np.nan_to_num(mid_features)
return mid_features, short_features, mid_feature_names
""" Feature Extraction Wrappers
- The first two feature extraction wrappers are used to extract
long-term averaged audio features for a list of WAV files stored in a
given category.
It is important to note that, one single feature is extracted per WAV
file (not the whole sequence of feature vectors)
"""
def directory_feature_extraction(folder_path, mid_window, mid_step,
short_window, short_step,
compute_beat=True):
"""
This function extracts the mid-term features of the WAVE files of a
particular folder.
The resulting feature vector is extracted by long-term averaging the
mid-term features.
Therefore ONE FEATURE VECTOR is extracted for each WAV file.
ARGUMENTS:
- folder_path: the path of the WAVE directory
- mid_window, mid_step: mid-term window and step (in seconds)
- short_window, short_step: short-term window and step (in seconds)
"""
mid_term_features = np.array([])
process_times = []
types = ('*.wav', '*.aif', '*.aiff', '*.mp3', '*.au', '*.ogg')
wav_file_list = []
for files in types:
wav_file_list.extend(glob.glob(os.path.join(folder_path, files)))
wav_file_list = sorted(wav_file_list)
wav_file_list2, mid_feature_names = [], []
for i, file_path in enumerate(wav_file_list):
print("Analyzing file {0:d} of {1:d}: {2:s}".format(i + 1,
len(wav_file_list),
file_path))
if os.stat(file_path).st_size == 0:
print(" (EMPTY FILE -- SKIPPING)")
continue
sampling_rate, signal = audioBasicIO.read_audio_file(file_path)
if sampling_rate == 0:
continue
t1 = time.time()
signal = audioBasicIO.stereo_to_mono(signal)
if signal.shape[0] < float(sampling_rate)/5:
print(" (AUDIO FILE TOO SMALL - SKIPPING)")
continue
wav_file_list2.append(file_path)
if compute_beat:
mid_features, short_features, mid_feature_names = \
mid_feature_extraction(signal, sampling_rate,
round(mid_window * sampling_rate),
round(mid_step * sampling_rate),
round(sampling_rate * short_window),
round(sampling_rate * short_step))
beat, beat_conf = beat_extraction(short_features, short_step)
else:
mid_features, _, mid_feature_names = \
mid_feature_extraction(signal, sampling_rate,
round(mid_window * sampling_rate),
round(mid_step * sampling_rate),
round(sampling_rate * short_window),
round(sampling_rate * short_step))
mid_features = np.transpose(mid_features)
mid_features = mid_features.mean(axis=0)
# long term averaging of mid-term statistics
if (not np.isnan(mid_features).any()) and \
(not np.isinf(mid_features).any()):
if compute_beat:
mid_features = np.append(mid_features, beat)
mid_features = np.append(mid_features, beat_conf)
mid_feature_names += ["bpm","ratio"]
if len(mid_term_features) == 0:
# append feature vector
mid_term_features = mid_features
else:
mid_term_features = np.vstack((mid_term_features, mid_features))
t2 = time.time()
duration = float(len(signal)) / sampling_rate
process_times.append((t2 - t1) / duration)
if len(process_times) > 0:
print("Feature extraction complexity ratio: "
"{0:.1f} x realtime".format((1.0 /
np.mean(np.array(process_times)))))
return mid_term_features, wav_file_list2, mid_feature_names
def multiple_directory_feature_extraction(path_list, mid_window, mid_step,
short_window, short_step,
compute_beat=False):
"""
Same as dirWavFeatureExtraction, but instead of a single dir it
takes a list of paths as input and returns a list of feature matrices.
EXAMPLE:
[features, classNames] =
a.dirsWavFeatureExtraction(['audioData/classSegmentsRec/noise',
'audioData/classSegmentsRec/speech',
'audioData/classSegmentsRec/brush-teeth',
'audioData/classSegmentsRec/shower'], 1,
1, 0.02, 0.02);
It can be used during the training process of a classification model ,
in order to get feature matrices from various audio classes (each stored in
a separate path)
"""
# feature extraction for each class:
features = []
class_names = []
file_names = []
for i, d in enumerate(path_list):
f, fn, feature_names = \
directory_feature_extraction(d, mid_window, mid_step,
short_window, short_step,
compute_beat=compute_beat)
if f.shape[0] > 0:
# if at least one audio file has been found in the provided folder:
features.append(f)
file_names.append(fn)
if d[-1] == os.sep:
class_names.append(d.split(os.sep)[-2])
else:
class_names.append(d.split(os.sep)[-1])
return features, class_names, file_names
def directory_feature_extraction_no_avg(folder_path, mid_window, mid_step,
short_window, short_step):
"""
This function extracts the mid-term features of the WAVE
files of a particular folder without averaging each file.
ARGUMENTS:
- folder_path: the path of the WAVE directory
- mid_window, mid_step: mid-term window and step (in seconds)
- short_window, short_step: short-term window and step (in seconds)
RETURNS:
- X: A feature matrix
- Y: A matrix of file labels
- filenames:
"""
wav_file_list = []
signal_idx = np.array([])
mid_features = np.array([])
types = ('*.wav', '*.aif', '*.aiff', '*.ogg')
for files in types:
wav_file_list.extend(glob.glob(os.path.join(folder_path, files)))
wav_file_list = sorted(wav_file_list)
for i, file_path in enumerate(wav_file_list):
sampling_rate, signal = audioBasicIO.read_audio_file(file_path)
if sampling_rate == 0:
continue
signal = audioBasicIO.stereo_to_mono(signal)
mid_feature_vector, _, _ = \
mid_feature_extraction(signal, sampling_rate,
round(mid_window * sampling_rate),
round(mid_step * sampling_rate),
round(sampling_rate * short_window),
round(sampling_rate * short_step))
mid_feature_vector = np.transpose(mid_feature_vector)
if len(mid_features) == 0: # append feature vector
mid_features = mid_feature_vector
signal_idx = np.zeros((mid_feature_vector.shape[0], ))
else:
mid_features = np.vstack((mid_features, mid_feature_vector))
signal_idx = np.append(signal_idx, i *
np.ones((mid_feature_vector.shape[0], )))
return mid_features, signal_idx, wav_file_list
"""
The following two feature extraction wrappers extract features for given audio
files, however NO LONG-TERM AVERAGING is performed. Therefore, the output for
each audio file is NOT A SINGLE FEATURE VECTOR but a whole feature matrix.
Also, another difference between the following two wrappers and the previous
is that they NO LONG-TERM AVERAGING IS PERFORMED. In other words, the WAV
files in these functions are not used as uniform samples that need to be
averaged but as sequences
"""
def mid_feature_extraction_to_file(file_path, mid_window, mid_step,
short_window, short_step, output_file,
store_short_features=False, store_csv=False,
plot=False):
"""
This function is used as a wrapper to:
a) read the content of a WAV file
b) perform mid-term feature extraction on that signal
c) write the mid-term feature sequences to a np file
d) optionally write contents to csv file as well
e) optionally write short-term features in csv and np file
"""
sampling_rate, signal = audioBasicIO.read_audio_file(file_path)
signal = audioBasicIO.stereo_to_mono(signal)
mid_features, short_features, _ = \
mid_feature_extraction(signal, sampling_rate,
round(sampling_rate * mid_window),
round(sampling_rate * mid_step),
round(sampling_rate * short_window),
round(sampling_rate * short_step))
if store_short_features:
# save st features to np file
np.save(output_file + "_st", short_features)
if plot:
print("Short-term np file: " + output_file + "_st.npy saved")
if store_csv:
# store st features to CSV file
np.savetxt(output_file + "_st.csv", short_features.T, delimiter=",")
if plot:
print("Short-term CSV file: " + output_file + "_st.csv saved")
# save mt features to np file
np.save(output_file + "_mt", mid_features)
if plot:
print("Mid-term np file: " + output_file + "_mt.npy saved")
if store_csv:
np.savetxt(output_file + "_mt.csv", mid_features.T, delimiter=",")
if plot:
print("Mid-term CSV file: " + output_file + "_mt.csv saved")
def mid_feature_extraction_file_dir(folder_path, mid_window, mid_step,
short_window, short_step,
store_short_features=False, store_csv=False,
plot=False):
types = (folder_path + os.sep + '*.wav',)
files_list = []
for t in types:
files_list.extend(glob.glob(t))
for f in files_list:
output_path = f
mid_feature_extraction_to_file(f, mid_window, mid_step, short_window,
short_step, output_path,
store_short_features, store_csv, plot)
| {
"content_hash": "7f8fbfbf1f3bf652b16e0ea1b22681bb",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 80,
"avg_line_length": 41.281167108753316,
"alnum_prop": 0.5576045749534152,
"repo_name": "tyiannak/pyAudioAnalysis",
"id": "f764816f53ed9c33107e5b5ab19b9565af93858a",
"size": "15563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyAudioAnalysis/MidTermFeatures.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2167"
},
{
"name": "HTML",
"bytes": "4082"
},
{
"name": "MATLAB",
"bytes": "1758"
},
{
"name": "Python",
"bytes": "207596"
},
{
"name": "Shell",
"bytes": "8796"
}
],
"symlink_target": ""
} |
"""Module to provide implicit behavior based on enviroment.
Allows the datastore package to infer the current dataset ID and
connection from the enviroment.
"""
import os
from gcloud._helpers import _app_engine_id
from gcloud._helpers import _compute_engine_id
from gcloud._helpers import _lazy_property_deco
from gcloud.datastore.connection import Connection
_DATASET_ENV_VAR_NAME = 'GCLOUD_DATASET_ID'
_GCD_DATASET_ENV_VAR_NAME = 'DATASTORE_DATASET'
def _get_production_dataset_id():
"""Gets the production application ID if it can be inferred."""
return os.getenv(_DATASET_ENV_VAR_NAME)
def _get_gcd_dataset_id():
"""Gets the GCD application ID if it can be inferred."""
return os.getenv(_GCD_DATASET_ENV_VAR_NAME)
def _determine_default_dataset_id(dataset_id=None):
"""Determine default dataset ID explicitly or implicitly as fall-back.
In implicit case, supports four environments. In order of precedence, the
implicit environments are:
* GCLOUD_DATASET_ID environment variable
* DATASTORE_DATASET environment variable (for ``gcd`` testing)
* Google App Engine application ID
* Google Compute Engine project ID (from metadata server)
:type dataset_id: string
:param dataset_id: Optional. The dataset ID to use as default.
:rtype: string or ``NoneType``
:returns: Default dataset ID if it can be determined.
"""
if dataset_id is None:
dataset_id = _get_production_dataset_id()
if dataset_id is None:
dataset_id = _get_gcd_dataset_id()
if dataset_id is None:
dataset_id = _app_engine_id()
if dataset_id is None:
dataset_id = _compute_engine_id()
return dataset_id
def set_default_dataset_id(dataset_id=None):
"""Set default dataset ID either explicitly or implicitly as fall-back.
In implicit case, supports four environments. In order of precedence, the
implicit environments are:
* GCLOUD_DATASET_ID environment variable
* DATASTORE_DATASET environment variable (for ``gcd`` testing)
* Google App Engine application ID
* Google Compute Engine project ID (from metadata server)
:type dataset_id: string
:param dataset_id: Optional. The dataset ID to use as default.
:raises: :class:`EnvironmentError` if no dataset ID was implied.
"""
dataset_id = _determine_default_dataset_id(dataset_id=dataset_id)
if dataset_id is not None:
_DEFAULTS.dataset_id = dataset_id
else:
raise EnvironmentError('No dataset ID could be inferred.')
def get_default_dataset_id():
"""Get default dataset ID.
:rtype: string or ``NoneType``
:returns: The default dataset ID if one has been set.
"""
return _DEFAULTS.dataset_id
def get_connection():
"""Shortcut method to establish a connection to the Cloud Datastore.
Use this if you are going to access several datasets
with the same set of credentials (unlikely):
>>> from gcloud import datastore
>>> connection = datastore.get_connection()
>>> key1 = datastore.Key('Kind', 1234, dataset_id='dataset1')
>>> key2 = datastore.Key('Kind', 1234, dataset_id='dataset2')
>>> entity1 = datastore.get(key1, connection=connection)
>>> entity2 = datastore.get(key2, connection=connection)
:rtype: :class:`gcloud.datastore.connection.Connection`
:returns: A connection defined with the proper credentials.
"""
return Connection.from_environment()
def set_default_connection(connection=None):
"""Set default connection either explicitly or implicitly as fall-back.
:type connection: :class:`gcloud.datastore.connection.Connection`
:param connection: A connection provided to be the default.
"""
connection = connection or get_connection()
_DEFAULTS.connection = connection
def get_default_connection():
"""Get default connection.
:rtype: :class:`gcloud.datastore.connection.Connection` or ``NoneType``
:returns: The default connection if one has been set.
"""
return _DEFAULTS.connection
class _DefaultsContainer(object):
"""Container for defaults.
:type connection: :class:`gcloud.datastore.connection.Connection`
:param connection: Persistent implied connection from environment.
:type dataset_id: string
:param dataset_id: Persistent implied dataset ID from environment.
"""
@_lazy_property_deco
@staticmethod
def dataset_id():
"""Return the implicit default dataset ID."""
return _determine_default_dataset_id()
@_lazy_property_deco
@staticmethod
def connection():
"""Return the implicit default connection.."""
return get_connection()
def __init__(self, connection=None, dataset_id=None, implicit=False):
if connection is not None or not implicit:
self.connection = connection
if dataset_id is not None or not implicit:
self.dataset_id = dataset_id
_DEFAULTS = _DefaultsContainer(implicit=True)
| {
"content_hash": "f00d22b6f035d249995ae7185d36093e",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 77,
"avg_line_length": 31.024844720496894,
"alnum_prop": 0.6972972972972973,
"repo_name": "blowmage/gcloud-python",
"id": "fd5ba9e5b8f9623c99eed2fccfbd024d9d5096e2",
"size": "5592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gcloud/datastore/_implicit_environ.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "20396"
},
{
"name": "Python",
"bytes": "861630"
},
{
"name": "Shell",
"bytes": "8257"
}
],
"symlink_target": ""
} |
from mock import Mock, patch
from nose.tools import with_setup, raises
from pretend import stub
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Model as DjangoModel
from neo4django import utils
from neo4django.neo4jclient import EnhancedGraphDatabase
from neo4django.db.models import NodeModel
# Nose does a weird copying from unittest asserts, meaning
# assertListEqual didn't move from unittest2 to unittest until
# Python 2.7. The yuck below is for 2.6 compatibility
try:
from nose.tools import assert_list_equal
except ImportError:
from itertools import starmap, izip
from operator import eq as equals
def assert_list_equal(a, b):
"""
A simple (but hack) for compatibility with `nose.tools` on python
2.6. This just asserts that both lists have the same length and that
the values at the same indexes are equal
"""
assert len(a) == len(b)
assert all(starmap(equals, izip(a, b)))
def test_subborn_dict_restricts_keys():
stubborn = utils.StubbornDict(('foo',), {'bar': 'baz'})
# Setting a stubborn key will not do anything
stubborn['foo'] = 'qux'
assert 'foo' not in stubborn
def test_subborn_dict_allows_keys():
stubborn = utils.StubbornDict(('foo',), {'bar': 'baz'})
# We should be able to set a non-stubborn key
stubborn['qux'] = 'foo'
assert 'qux' in stubborn
def test_uniqify():
values = [1, 1, 'foo', 2, 'foo', 'bar', 'baz']
expected = [1, 'foo', 2, 'bar', 'baz']
unique_values = utils.uniqify(values)
assert_list_equal(expected, unique_values)
def test_all_your_base():
# Establish base classes
class A(object):
pass
class B(A):
pass
class C(B):
pass
class D(object):
pass
class E(C, D):
pass
c_bases = [cls for cls in utils.all_your_base(C, A)]
e_bases = [cls for cls in utils.all_your_base(E, B)]
assert_list_equal(c_bases, [C, B, A])
assert_list_equal(e_bases, [E, C, B])
def test_write_through():
obj = Mock()
obj._meta.write_through = 'foo'
assert utils.write_through(obj) == 'foo'
def test_write_through_default():
obj = object()
assert utils.write_through(obj) is False
def setup_attrrouter():
global router, member
router = utils.AttrRouter()
member = stub(foo='bar')
router.member = member
@with_setup(setup_attrrouter, None)
def test_attrrouter_router_default():
router = utils.AttrRouter()
assert router._router == {}
@with_setup(setup_attrrouter, None)
def test_attrrouter_with_routed_attrs():
router.__dict__[router._key] = 'foo'
assert router._router == 'foo'
@with_setup(setup_attrrouter, None)
def test_attrrouter_gets_obj_attr():
router.foo = 'bar'
assert getattr(router, 'foo') == 'bar'
@with_setup(setup_attrrouter, None)
def test_attrrouter_gets_routed():
# Manually map the routing to ensure we test only what we intend to
router._router['get'] = {'foo': member}
assert router.foo == 'bar'
@with_setup(setup_attrrouter, None)
def test_attrrouter_sets_obj_attr():
router.foo = 'bar'
assert router.foo == 'bar'
@with_setup(setup_attrrouter, None)
def test_attrrouter_sets_routed():
# Manually map the routing to ensure we test only what we intend to
router._router['set'] = {'foo': member}
router._router['get'] = {'foo': member}
# Change the value
router.foo = 'baz'
# It should update both places
assert router.foo == 'baz'
assert member.foo == 'baz'
@with_setup(setup_attrrouter, None)
def test_attrrouter_dels_obj_attr():
router.foo = 'bar'
del router.foo
assert not hasattr(router, 'foo')
@with_setup(setup_attrrouter, None)
def test_attrrouter_dels_routed():
# Manually map the routing to ensure we test only what we intend to
router._router['del'] = {'foo': member}
router._router['get'] = {'foo': member}
del router.foo
# It should delete both places
assert not hasattr(router, 'foo')
assert not hasattr(member, 'foo')
@with_setup(setup_attrrouter, None)
def test_attrrouter_route_get():
router._route(('foo',), member, get=True)
assert router.foo == 'bar'
@with_setup(setup_attrrouter, None)
def test_attrrouter_route_set():
router._route(('foo',), member, set=True)
router.foo = 'baz'
assert router.foo == 'baz'
assert member.foo == 'baz'
@with_setup(setup_attrrouter, None)
def test_attrrouter_route_delete():
router._route(('foo',), member, delete=True)
del router.foo
# It should delete both places
assert not hasattr(router, 'foo')
assert not hasattr(member, 'foo')
@raises(AttributeError)
@with_setup(setup_attrrouter, None)
def test_attrrouter_unroute_get():
router._route(('foo',), member, get=True)
router._unroute(('foo',), get=True)
router.foo
@with_setup(setup_attrrouter, None)
def test_attrrouter_unroute_set():
# Check routed
router._route(('foo',), member, set=True)
router._unroute(('foo',), set=True)
router.foo = 'baz'
# Should be different
assert router.foo == 'baz'
assert member.foo == 'bar'
@raises(AttributeError)
@with_setup(setup_attrrouter, None)
def test_attrrouter_unroute_delete():
# Check routed
router._route(('foo',), member, delete=True)
router._unroute(('foo',), delete=True)
del router.foo
class MyDjangoModel(DjangoModel):
"""
A simple/empty subclass of django.db.models.Model for testing
"""
def test_integration_router_is_node_model():
router = utils.Neo4djangoIntegrationRouter()
model = NodeModel()
assert router._is_node_model(NodeModel)
assert router._is_node_model(model)
def test_integration_router_allow_relation_mismatch():
router = utils.Neo4djangoIntegrationRouter()
node_model = NodeModel()
django_model = MyDjangoModel()
assert router.allow_relation(node_model, django_model) is False
def test_integration_router_allow_relation_between_node_models():
router = utils.Neo4djangoIntegrationRouter()
node_model1 = NodeModel()
node_model2 = NodeModel()
assert router.allow_relation(node_model1, node_model2) is None
def test_integration_router_allow_relation_between_django_models():
router = utils.Neo4djangoIntegrationRouter()
django_model1 = MyDjangoModel()
django_model2 = MyDjangoModel()
assert router.allow_relation(django_model1, django_model2) is None
@raises(ImproperlyConfigured)
@patch('neo4django.utils.import_module')
def test_load_client_fail_module_import(import_module):
import_module.side_effect = ImportError
try:
utils.load_client('foo.bar.baz')
except ImproperlyConfigured as e:
assert 'Could not import' in e.message
raise e
@raises(ImproperlyConfigured)
@patch('neo4django.utils.import_module')
def test_load_client_module_class_missing(import_module):
import_module.return_value = stub(foo='bar')
try:
utils.load_client('foo.bar.baz')
except ImproperlyConfigured as e:
assert 'Neo4j client module' in e.message
raise e
@raises(ImproperlyConfigured)
@patch('neo4django.utils.import_module')
def test_load_client_not_correct_subclass(import_module):
MyClass = type('MyClass', (object,), {})
import_module.return_value = stub(baz=MyClass)
try:
utils.load_client('foo.bar.baz')
except ImproperlyConfigured as e:
assert 'is not a subclass of EnhancedGraphDatabase' in e.message
raise e
@patch('neo4django.utils.import_module')
def test_load_client(import_module):
MyClass = type('MyClass', (EnhancedGraphDatabase,), {})
import_module.return_value = stub(baz=MyClass)
assert utils.load_client('foo.bar.baz') == MyClass
def test_sliding_pair():
ret = utils.sliding_pair(('foo', 'bar', 'baz'))
expected = [('foo', 'bar'),
('bar', 'baz'),
('baz', None)]
assert_list_equal(expected, list(ret))
def test_assignable_list():
list_obj = utils.AssignableList()
list_obj.foo = 'bar'
# Check that we can get the attribute
assert list_obj.foo == 'bar'
# We should have added object to _new_attrs
assert 'foo' in list_obj.get_new_attrs()
# hasattr() should return True
assert hasattr(list_obj, 'foo')
def test_enum_numerical_items():
e = utils.Enum('foo', 'bar', 'baz')
assert e.FOO == 0
assert e.BAR == 1
assert e.BAZ == 2
def test_enum_explict_items():
e = utils.Enum(foo='bar', baz='qux')
assert e.FOO == 'bar'
assert e.BAZ == 'qux'
def test_countdown():
fn = utils.countdown(5)
# Should return True 5 times, then false
assert all([fn() for x in xrange(5)])
assert not any([fn() for x in xrange(100)]) # Excessive, but proves it
def test_apply_to_buffer():
fn = Mock(return_value='a')
ret = utils.apply_to_buffer(fn, xrange(100), size=5)
assert fn.call_count == 5
assert_list_equal(ret, ['a', 'a', 'a', 'a', 'a'])
@raises(StopIteration)
def test_apply_to_buffer_raises_stop_iteration():
fn = Mock(return_value='a')
utils.apply_to_buffer(fn, xrange(100), size=0)
def test_buffer_iterator():
expected = [0, 1, 4, 9, 16]
ret = [x for x in utils.buffer_iterator(lambda x: x**2, xrange(5), size=2)]
assert_list_equal(ret, expected)
| {
"content_hash": "d6a29b1ae5df222199b26b21aad4d5c5",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 79,
"avg_line_length": 25.62568306010929,
"alnum_prop": 0.6617976330099158,
"repo_name": "mpetyx/django-rdfGraph",
"id": "9f43f20db3dac5dea933594dbdd1fca664f7a672",
"size": "9379",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "graphBackend/graphBackend/api/neo4django/tests/test_utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Groovy",
"bytes": "9849"
},
{
"name": "Python",
"bytes": "329628"
}
],
"symlink_target": ""
} |
"""
Sessions that persist in the database.
Every L{SESSION_CLEAN_FREQUENCY} seconds, a pass is made over all persistent
sessions, and those that are more than L{PERSISTENT_SESSION_LIFETIME} seconds
old are deleted. Transient sessions die after L{TRANSIENT_SESSION_LIFETIME}
seconds.
These three globals can be overridden by passing appropriate values to the
L{PersistentSessionWrapper} constructor: C{sessionCleanFrequency},
C{persistentSessionLifetime}, and C{transientSessionLifetime}.
"""
from datetime import timedelta
from twisted.cred import credentials
from twisted.internet import reactor
from epsilon import extime
from axiom import attributes, item, userbase
from nevow import guard
SESSION_CLEAN_FREQUENCY = 60 * 60 * 25 # 1 day, almost
PERSISTENT_SESSION_LIFETIME = 60 * 60 * 24 * 7 * 2 # 2 weeks
TRANSIENT_SESSION_LIFETIME = 60 * 12 + 32 # 12 minutes, 32 seconds.
def usernameFromRequest(request):
"""
Take an HTTP request and return a username of the form <user>@<domain>.
@type request: L{inevow.IRequest}
@param request: A HTTP request
@return: A C{str}
"""
username = request.args.get('username', [''])[0]
if '@' not in username:
username = '%s@%s' % (
username, request.getHeader('host').split(':')[0])
return username
class PersistentSession(item.Item):
"""
A session that persists on the database.
These sessions should not store any state, but are used only to determine
that the user has previously authenticated and should be given a transient
session (a regular guard session, not database persistent) without
providing credentials again.
"""
typeName = 'persistent_session'
schemaVersion = 1
sessionKey = attributes.bytes(allowNone=False, indexed=True)
lastUsed = attributes.timestamp(defaultFactory=extime.Time, indexed=True)
authenticatedAs = attributes.bytes(allowNone=False, doc="""
The username and domain that this session was authenticated as.
""")
def renew(self):
"""
Renew the lifetime of this object.
Call this when the user logs in so this session does not expire.
"""
self.lastUsed = extime.Time()
class DBPassthrough(object):
"""
A dictionaryish thing that manages sessions and interfaces with guard.
This is set as the C{sessions} attribute on a L{nevow.guard.SessionWrapper}
instance, or in this case, a subclass. Guard uses a vanilla dict by
default; here we pretend to be a dict and introduce persistent-session
behaviour.
"""
def __init__(self, wrapper):
self.wrapper = wrapper
self._transientSessions = {}
def __contains__(self, key):
# We use __getitem__ here so that transient sessions are always
# created. Otherwise, sometimes guard will call __contains__ and assume
# the transient session is there, without creating it.
try:
self[key]
except KeyError:
return False
return True
has_key = __contains__
def __getitem__(self, key):
if key is None:
raise KeyError("None is not a valid session key")
try:
return self._transientSessions[key]
except KeyError:
if self.wrapper.authenticatedUserForKey(key):
session = self.wrapper.sessionFactory(self.wrapper, key)
self._transientSessions[key] = session
session.setLifetime(self.wrapper.sessionLifetime) # screw you guard!
session.checkExpired()
return session
raise
def __setitem__(self, key, value):
self._transientSessions[key] = value
def __delitem__(self, key):
del self._transientSessions[key]
def __repr__(self):
return 'DBPassthrough at %i; %r' % (id(self), self._transientSessions)
class PersistentSessionWrapper(guard.SessionWrapper):
"""
Extends L{nevow.guard.SessionWrapper} to reauthenticate previously
authenticated users.
There are 4 possible states:
1. new user, no persistent session, no transient session
2. anonymous user, no persistent session, transient session
3. returning user, persistent session, no transient session
4. active user, persistent session, transient session
Guard will look in the sessions dict, and if it finds a key matching a
cookie sent by the client, will return the value as the session. However,
if a user has a persistent session cookie, but no transient session, one is
created here.
"""
def __init__(
self,
store,
portal,
transientSessionLifetime=TRANSIENT_SESSION_LIFETIME,
persistentSessionLifetime=PERSISTENT_SESSION_LIFETIME,
sessionCleanFrequency=SESSION_CLEAN_FREQUENCY,
enableSubdomains=False,
domains=(),
clock=None,
**kw):
guard.SessionWrapper.__init__(self, portal, **kw)
self.store = store
self.sessions = DBPassthrough(self)
self.cookieKey = 'divmod-user-cookie'
self.sessionLifetime = transientSessionLifetime
self.persistentSessionLifetime = persistentSessionLifetime
self.sessionCleanFrequency = sessionCleanFrequency
self._enableSubdomains = enableSubdomains
self._domains = domains
self._clock = reactor if clock is None else clock
if self.store is not None:
self._cleanSessions()
def createSessionForKey(self, key, user):
"""
Create a persistent session in the database.
@type key: L{bytes}
@param key: The persistent session identifier.
@type user: L{bytes}
@param user: The username the session will belong to.
"""
PersistentSession(
store=self.store,
sessionKey=key,
authenticatedAs=user)
def authenticatedUserForKey(self, key):
"""
Find a persistent session for a user.
@type key: L{bytes}
@param key: The persistent session identifier.
@rtype: L{bytes} or C{None}
@return: The avatar ID the session belongs to, or C{None} if no such
session exists.
"""
session = self.store.findFirst(
PersistentSession, PersistentSession.sessionKey == key)
if session is None:
return None
else:
session.renew()
return session.authenticatedAs
def removeSessionWithKey(self, key):
"""
Remove a persistent session, if it exists.
@type key: L{bytes}
@param key: The persistent session identifier.
"""
self.store.query(
PersistentSession,
PersistentSession.sessionKey == key).deleteFromStore()
def _cleanSessions(self):
"""
Clean expired sesisons.
"""
tooOld = extime.Time() - timedelta(seconds=PERSISTENT_SESSION_LIFETIME)
self.store.query(
PersistentSession,
PersistentSession.lastUsed < tooOld).deleteFromStore()
self._lastClean = self._clock.seconds()
def _maybeCleanSessions(self):
"""
Clean expired sessions if it's been long enough since the last clean.
"""
sinceLast = self._clock.seconds() - self._lastClean
if sinceLast > self.sessionCleanFrequency:
self._cleanSessions()
def cookieDomainForRequest(self, request):
"""
Pick a domain to use when setting cookies.
@type request: L{nevow.inevow.IRequest}
@param request: Request to determine cookie domain for
@rtype: C{str} or C{None}
@return: Domain name to use when setting cookies, or C{None} to
indicate that only the domain in the request should be used
"""
host = request.getHeader('host')
if host is None:
# This is a malformed request that we cannot possibly handle
# safely, fall back to the default behaviour.
return None
host = host.split(':')[0]
for domain in self._domains:
suffix = "." + domain
if host == domain:
# The request is for a domain which is directly recognized.
if self._enableSubdomains:
# Subdomains are enabled, so the suffix is returned to
# enable the cookie for this domain and all its subdomains.
return suffix
# Subdomains are not enabled, so None is returned to allow the
# default restriction, which will enable this cookie only for
# the domain in the request, to apply.
return None
if self._enableSubdomains and host.endswith(suffix):
# The request is for a subdomain of a directly recognized
# domain and subdomains are enabled. Drop the unrecognized
# subdomain portion and return the suffix to enable the cookie
# for this domain and all its subdomains.
return suffix
if self._enableSubdomains:
# No directly recognized domain matched the request. If subdomains
# are enabled, prefix the request domain with "." to make the
# cookie valid for that domain and all its subdomains. This
# probably isn't extremely useful. Perhaps it shouldn't work this
# way.
return "." + host
# Subdomains are disabled and the domain from the request was not
# recognized. Return None to get the default behavior.
return None
def savorSessionCookie(self, request):
"""
Make the session cookie last as long as the persistent session.
@type request: L{nevow.inevow.IRequest}
@param request: The HTTP request object for the guard login URL.
"""
cookieValue = request.getSession().uid
request.addCookie(
self.cookieKey, cookieValue, path='/',
max_age=PERSISTENT_SESSION_LIFETIME,
domain=self.cookieDomainForRequest(request))
def login(self, request, session, creds, segments):
"""
Called to check the credentials of a user.
Here we extend guard's implementation to preauthenticate users if they
have a valid persistent session.
@type request: L{nevow.inevow.IRequest}
@param request: The HTTP request being handled.
@type session: L{nevow.guard.GuardSession}
@param session: The user's current session.
@type creds: L{twisted.cred.credentials.ICredentials}
@param creds: The credentials the user presented.
@type segments: L{tuple}
@param segments: The remaining segments of the URL.
@return: A deferred firing with the user's avatar.
"""
self._maybeCleanSessions()
if isinstance(creds, credentials.Anonymous):
preauth = self.authenticatedUserForKey(session.uid)
if preauth is not None:
self.savorSessionCookie(request)
creds = userbase.Preauthenticated(preauth)
def cbLoginSuccess(input):
"""
User authenticated successfully.
Create the persistent session, and associate it with the
username. (XXX it doesn't work like this now)
"""
user = request.args.get('username')
if user is not None:
# create a database session and associate it with this user
cookieValue = session.uid
if request.args.get('rememberMe'):
self.createSessionForKey(cookieValue, creds.username)
self.savorSessionCookie(request)
return input
return (
guard.SessionWrapper.login(
self, request, session, creds, segments)
.addCallback(cbLoginSuccess))
def explicitLogout(self, session):
"""
Handle a user-requested logout.
Here we override guard's behaviour for the logout action to delete the
persistent session. In this case the user has explicitly requested a
logout, so the persistent session must be deleted to require the user
to log in on the next request.
@type session: L{nevow.guard.GuardSession}
@param session: The session of the user logging out.
"""
guard.SessionWrapper.explicitLogout(self, session)
self.removeSessionWithKey(session.uid)
def getCredentials(self, request):
"""
Derive credentials from an HTTP request.
Override SessionWrapper.getCredentials to add the Host: header to the
credentials. This will make web-based virtual hosting work.
@type request: L{nevow.inevow.IRequest}
@param request: The request being handled.
@rtype: L{twisted.cred.credentials.1ICredentials}
@return: Credentials derived from the HTTP request.
"""
username = usernameFromRequest(request)
password = request.args.get('password', [''])[0]
return credentials.UsernamePassword(username, password)
| {
"content_hash": "e7a9271725cce723f2716b9c8afc6eac",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 84,
"avg_line_length": 33.93877551020408,
"alnum_prop": 0.6328171978352375,
"repo_name": "twisted/mantissa",
"id": "3b81a27c3b4345e5456a1d56a1960a7a83e42af8",
"size": "13423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xmantissa/websession.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27264"
},
{
"name": "HTML",
"bytes": "57439"
},
{
"name": "JavaScript",
"bytes": "865621"
},
{
"name": "Python",
"bytes": "1631375"
}
],
"symlink_target": ""
} |
import calendar
import datetime
import json
import logging
import time
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import ndb
BUILTIN_TYPES = (int, long, float, bool, dict, basestring, list)
class SerializableMixin(object):
def to_json_dict(self, includes=None, excludes=None):
"""Convert an ndb or db entity to a JSON-serializable dict."""
output = {}
if self.key:
output['id'] = self.key.id()
output['key'] = self.key.urlsafe()
for key, prop in self._properties.iteritems():
value = getattr(self, key)
if value is None or isinstance(value, BUILTIN_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
# Convert date/datetime to unix timestamp.
output[key] = time.mktime(value.utctimetuple())
elif isinstance(value, (db.GeoPt, ndb.GeoPt)):
output[key] = {'lat': value.lat, 'lon': value.lon}
elif isinstance(value, blobstore.BlobKey):
output[key] = str(value)
elif isinstance(value, (db.Key, ndb.Key)):
output[key] = value.id()
elif isinstance(value, SerializableMixin):
output[key] = value.to_json_dict()
elif isinstance(value, (db.Model, ndb.Model)):
output[key] = value.to_dict()
else:
raise ValueError('Cannot encode %s' % repr(prop))
if includes:
for inc in includes:
attr = getattr(self, inc, None)
if attr is None:
cls = self.__class__
logging.warn('Cannot encode %s' % cls)
continue
if callable(attr):
output[inc] = attr()
else:
output[inc] = attr
if excludes:
[output.pop(exc) for exc in excludes if exc in output]
return output
class EntityEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.date):
return calendar.timegm(obj.utctimetuple())
elif isinstance(obj, SerializableMixin):
return obj.to_json_dict()
elif isinstance(obj, (db.Model, ndb.Model)):
return obj.to_dict()
else:
return json.JSONEncoder.default(self, obj)
| {
"content_hash": "0a321111a9f35aa6850f4b0604d7763d",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 70,
"avg_line_length": 31.189873417721518,
"alnum_prop": 0.554788961038961,
"repo_name": "tylertreat/gaeutils",
"id": "7b04433d975b6ff89b5024e4a3822e864bc61aa2",
"size": "2464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gaeutils/models/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7424"
}
],
"symlink_target": ""
} |
import sqlite3
| {
"content_hash": "b70399fc848fb0795041e3582e12f589",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 14,
"avg_line_length": 15,
"alnum_prop": 0.8666666666666667,
"repo_name": "SocialNPHS/SocialNPHS",
"id": "615a6b855fbf3c60f4367e95c85df536719dea4b",
"size": "15",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "SocialNPHS/data/tweets/database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28505"
}
],
"symlink_target": ""
} |
import fileinput
import re
# Process raw output from govsm.com, either House, Senate or Governors, or
# campaigns for any of the three. Note that for campaigns for Governors,
# you will need to insert a line
#
# Welcome to the Governors Campaigns
#
# above the section containing the campaigns for governor.
campaigns = False
where = ""
party = ""
for line in fileinput.input():
line = line.strip()
if re.match("^Welcome to the.*House", line):
title = "Rep"
elif re.match("^Welcome to the.*Senate", line):
title = "Sen"
elif re.match("^Welcome to the.*Governors", line):
title = "Gov"
if re.match("^Welcome to the.*Campaigns", line):
print "set campaigns to True"
campaigns = True
fields = re.split("\|\|", line)
if len(fields) >= 10:
first = fields[1].strip()
last = fields[2].strip()
last = re.sub(r"<span.*?</span>", "", last)
if "//" in last:
last = re.sub(r"^[^ ]* ", "", last)
last = re.sub(r"\].*", "", last)
lastparty = party
party = fields[3].strip()
party = re.sub(".*\|", "", party).strip()
lastwhere = where
where = fields[4].strip()
#print "lastwhere = '%s', where = '%s'" % (lastwhere, where)
#if campaigns and lastwhere == where and lastparty == party:
fulltitle = "Cand-" + title
#else:
# fulltitle = title
account = fields[6].strip()
if account in ("No", "T"):
account = "-----------"
else:
#print account
account = re.sub(r"/?\]\].*", "", account)
#print account
account = re.sub(r"\|?<span.*", "", account)
#print account
account = re.sub(r".*twitter.com.*/", "", account)
#print account
account = re.sub(r".*=", "", account)
#print account
account = "@" + account
m = re.match("^([A-Z][A-Z])0*([0-9]+)?$", where)
if title == "Rep" and m:
state = m.group(1)
district = m.group(2) or "At Large"
office = "%s %s %s" % (title, state, district)
else:
office = title + " " + where
print "%s. %s, %s %s %s (%s)" % (fulltitle, last, first, account, party, office)
elif not line.startswith("|-"):
print line
| {
"content_hash": "1214c3ae7a7a70831434c1ed9d1a1e0a",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 84,
"avg_line_length": 29.88888888888889,
"alnum_prop": 0.5659851301115242,
"repo_name": "utcompling/textgrounder",
"id": "d06bea45b846683a3093a68112b5749a73e79554",
"size": "2171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/lists/python/proc-govsm.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "23949"
},
{
"name": "Python",
"bytes": "515705"
},
{
"name": "Scala",
"bytes": "1736497"
},
{
"name": "Shell",
"bytes": "99947"
}
],
"symlink_target": ""
} |
from proboscis import test
from trove.tests import PRE_INSTANCES
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
GROUP = "scenario.instance_create_group"
class InstanceCreateRunnerFactory(test_runners.RunnerFactory):
_runner_ns = 'instance_create_runners'
_runner_cls = 'InstanceCreateRunner'
@test(depends_on_groups=["services.initialize"],
runs_after_groups=[PRE_INSTANCES],
groups=[GROUP, groups.INST_CREATE])
class InstanceCreateGroup(TestGroup):
"""Test Instance Create functionality."""
def __init__(self):
super(InstanceCreateGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def create_empty_instance(self):
"""Create an empty instance."""
self.test_runner.run_empty_instance_create()
@test(depends_on_groups=[groups.INST_CREATE],
groups=[GROUP, groups.INST_INIT_CREATE])
class InstanceInitCreateGroup(TestGroup):
"""Test Instance Init Create functionality."""
def __init__(self):
super(InstanceInitCreateGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def create_initial_configuration(self):
"""Create a configuration group for a new initialized instance."""
self.test_runner.run_initial_configuration_create()
@test(runs_after=[create_initial_configuration])
def create_initialized_instance(self):
"""Create an instance with initial properties."""
self.test_runner.run_initialized_instance_create()
@test(depends_on_groups=[groups.INST_CREATE],
groups=[GROUP, groups.INST_CREATE_WAIT],
runs_after_groups=[groups.MODULE_CREATE, groups.CFGGRP_CREATE,
groups.INST_ERROR_DELETE])
class InstanceCreateWaitGroup(TestGroup):
"""Test that Instance Create Completes."""
def __init__(self):
super(InstanceCreateWaitGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def wait_for_instance(self):
"""Waiting for main instance to become active."""
self.test_runner.run_wait_for_instance()
@test(depends_on_groups=[groups.INST_INIT_CREATE],
groups=[GROUP, groups.INST_INIT_CREATE_WAIT],
runs_after_groups=[groups.INST_CREATE_WAIT])
class InstanceInitCreateWaitGroup(TestGroup):
"""Test that Instance Init Create Completes."""
def __init__(self):
super(InstanceInitCreateWaitGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def wait_for_init_instance(self):
"""Waiting for init instance to become active."""
self.test_runner.run_wait_for_init_instance()
@test(depends_on=[wait_for_init_instance])
def add_initialized_instance_data(self):
"""Add data to the initialized instance."""
self.test_runner.run_add_initialized_instance_data()
@test(runs_after=[add_initialized_instance_data])
def validate_initialized_instance(self):
"""Validate the initialized instance data and properties."""
self.test_runner.run_validate_initialized_instance()
@test(depends_on_groups=[groups.INST_INIT_CREATE_WAIT],
groups=[GROUP, groups.INST_INIT_DELETE])
class InstanceInitDeleteGroup(TestGroup):
"""Test Initialized Instance Delete functionality."""
def __init__(self):
super(InstanceInitDeleteGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def delete_initialized_instance(self):
"""Delete the initialized instance."""
self.test_runner.run_initialized_instance_delete()
@test(depends_on_groups=[groups.INST_INIT_DELETE],
runs_after_groups=[groups.INST_ERROR_DELETE],
groups=[GROUP, groups.INST_INIT_DELETE_WAIT])
class InstanceInitDeleteWaitGroup(TestGroup):
"""Test that Initialized Instance Delete Completes."""
def __init__(self):
super(InstanceInitDeleteWaitGroup, self).__init__(
InstanceCreateRunnerFactory.instance())
@test
def wait_for_init_delete(self):
"""Wait for the initialized instance to be gone."""
self.test_runner.run_wait_for_init_delete()
@test(runs_after=[wait_for_init_delete])
def delete_initial_configuration(self):
"""Delete the initial configuration group."""
self.test_runner.run_initial_configuration_delete()
| {
"content_hash": "fd45cdf8c1797de837c0aedb58544619",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 74,
"avg_line_length": 34.292307692307695,
"alnum_prop": 0.6888739344997756,
"repo_name": "hplustree/trove",
"id": "83a1f16b819638b10f8073878aae0693547c3238",
"size": "5085",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "trove/tests/scenario/groups/instance_create_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4757844"
},
{
"name": "Shell",
"bytes": "191911"
}
],
"symlink_target": ""
} |
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_list_request(
endpoint_name, # type: str
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
deployment = kwargs.pop('deployment', None) # type: Optional[str]
skiptoken = kwargs.pop('skiptoken', None) # type: Optional[str]
api_version = "2020-09-01-dataplanepreview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/jobs')
path_format_arguments = {
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if deployment is not None:
query_parameters['deployment'] = _SERIALIZER.query("deployment", deployment, 'str')
if skiptoken is not None:
query_parameters['$skiptoken'] = _SERIALIZER.query("skiptoken", skiptoken, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request(
endpoint_name, # type: str
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-01-dataplanepreview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/jobs')
path_format_arguments = {
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, 'str', pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,254}$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
endpoint_name, # type: str
id, # type: str
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
api_version = "2020-09-01-dataplanepreview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/jobs/{id}')
path_format_arguments = {
"endpointName": _SERIALIZER.url("endpoint_name", endpoint_name, 'str'),
"id": _SERIALIZER.url("id", id, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class BatchJobEndpointOperations(object):
"""BatchJobEndpointOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
endpoint_name, # type: str
resource_group_name, # type: str
workspace_name, # type: str
deployment=None, # type: Optional[str]
skiptoken=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.BatchJobResourceArmPaginatedResult"]
"""Lists batch inference endpoint jobs in this endpoint.
Lists batch inference endpoint jobs in this endpoint.
:param endpoint_name: Name of endpoint.
:type endpoint_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param deployment: Optional filter for jobs related to a specific deployment in the endpoint.
:type deployment: str
:param skiptoken: Continuation token for pagination.
:type skiptoken: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BatchJobResourceArmPaginatedResult or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.machinelearningservices.models.BatchJobResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchJobResourceArmPaginatedResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
deployment=deployment,
skiptoken=skiptoken,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
deployment=deployment,
skiptoken=skiptoken,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BatchJobResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/jobs'} # type: ignore
@distributed_trace
def create(
self,
endpoint_name, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body, # type: "_models.BatchJobResource"
**kwargs # type: Any
):
# type: (...) -> "_models.BatchJobResource"
"""Creates a batch inference endpoint job.
Creates a batch inference endpoint job.
:param endpoint_name: Name of endpoint.
:type endpoint_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:param body: Batch inference endpoint Job definition object.
:type body: ~azure.mgmt.machinelearningservices.models.BatchJobResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchJobResource, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchJobResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchJobResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'BatchJobResource')
request = build_create_request(
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.create.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BatchJobResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BatchJobResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/jobs'} # type: ignore
@distributed_trace
def get(
self,
endpoint_name, # type: str
id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BatchJobResource"
"""Gets a batch inference endpoint job by name.
Gets a batch inference endpoint job by name.
:param endpoint_name: Name of endpoint.
:type endpoint_name: str
:param id: Identifier for the batch endpoint job.
:type id: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BatchJobResource, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.BatchJobResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BatchJobResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
endpoint_name=endpoint_name,
id=id,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BatchJobResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/batchEndpoints/{endpointName}/jobs/{id}'} # type: ignore
| {
"content_hash": "966196cfa453e62d77fea51c3c0ba26a",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 223,
"avg_line_length": 43.11194029850746,
"alnum_prop": 0.6540303502394553,
"repo_name": "Azure/azure-sdk-for-python",
"id": "b0fea33c0b20bf4146308a7c0df12def82cea402",
"size": "17798",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2020_09_01_dataplanepreview/operations/_batch_job_endpoint_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Unit testing for servmon.common.errorhandler.invalid_usage_handler"""
from servmon import app
from servmon.common import invalid_usage
from servmon.common.errorhandler import invalid_usage_handler
import json
import pytest
@pytest.fixture
def app_mock():
"""Returns a mock of the application"""
app.config['TESTING'] = True
return app
@pytest.fixture
def invalid_usage_mock():
"""Returns an InvalidUsage object mock"""
# Register the function to be tested as the error handler
app.register_error_handler(invalid_usage.InvalidUsage, invalid_usage_handler.handle_invalid_usage)
return invalid_usage.InvalidUsage('Test Message', 403, {'payload': 'test'})
@pytest.fixture
def invalid_usage_mock_estatus():
"""Returns an InvalidUsage object mock with invalid status code"""
# Register the function to be tested as the error handler
app.register_error_handler(invalid_usage.InvalidUsage, invalid_usage_handler.handle_invalid_usage)
return invalid_usage.InvalidUsage('Test Message', 'Wrong status code', {'payload': 'test'})
@pytest.fixture
def invalid_usage_mock_epayload():
"""Returns an InvalidUsage object mock with invalid payload"""
# Register the function to be tested as the error handler
app.register_error_handler(invalid_usage.InvalidUsage, invalid_usage_handler.handle_invalid_usage)
return invalid_usage.InvalidUsage('Test Message', 400, 'Wrong payload')
class TestInvalidUsageHandler(object):
def test_invalid_usage_mock_estatus(self, app_mock, invalid_usage_mock_estatus):
"""Test handle_invalid_usage when passed with an object with wrong status"""
@app.route('/test_invalid_usage_mock_estatus')
def test_route_estatus():
"""Create a mock route that raises the InvalidUsage Exception"""
raise invalid_usage_mock_estatus
# Send a request to the mock route, asserting that a TypeError will be raised before it gets sent
with app_mock.test_client() as client, pytest.raises(TypeError):
res = client.get('/test_invalid_usage_mock_estatus')
def test_invalid_usage_mock_epayload(self, invalid_usage_mock_epayload):
"""Test handle_invalid_usage when passed with an oject with wrong payload"""
# Assert that the function raises ValueError
with pytest.raises(ValueError):
invalid_usage_handler.handle_invalid_usage(invalid_usage_mock_epayload)
def test_invalid_usage_mock(self, app_mock, invalid_usage_mock):
"""Test handle_invalid_usage when passed with an working InvalidUsage"""
@app.route('/test_invalid_usage_mock')
def test_route():
"""Create a mock route that raises the InvalidUsage Exception"""
raise invalid_usage_mock
# Send a request to the mock route and checks the result
with app_mock.test_client() as client:
res = client.get('/test_invalid_usage_mock')
assert json.loads(res.data) == {'message': 'Test Message', 'payload': 'test'}
| {
"content_hash": "cfbe956e82bcd8913eaefe4cad52aaf2",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 105,
"avg_line_length": 42.56944444444444,
"alnum_prop": 0.7017944535073409,
"repo_name": "hpsuenaa/servmon",
"id": "b74cc17f3cbc12e45e3ae8f0f6aabdbeb9721a8a",
"size": "3065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_common/test_errorhandler/test_invalid_usage_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31716"
}
],
"symlink_target": ""
} |
"""
HTMLParser-based link extractor
"""
from HTMLParser import HTMLParser
from six.moves.urllib.parse import urljoin
from w3lib.url import safe_url_string
from pyrake.link import Link
from pyrake.utils.python import unique as unique_list
class HtmlParserLinkExtractor(HTMLParser):
def __init__(self, tag="a", attr="href", process=None, unique=False):
HTMLParser.__init__(self)
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
def _extract_links(self, response_text, response_url, response_encoding):
self.reset()
self.feed(response_text)
self.close()
links = unique_list(self.links, key=lambda link: link.url) if self.unique else self.links
ret = []
base_url = urljoin(response_url, self.base_url) if self.base_url else response_url
for link in links:
if isinstance(link.url, unicode):
link.url = link.url.encode(response_encoding)
link.url = urljoin(base_url, link.url)
link.url = safe_url_string(link.url, response_encoding)
link.text = link.text.decode(response_encoding)
ret.append(link)
return ret
def extract_links(self, response):
# wrapper needed to allow to work directly with text
return self._extract_links(response.body, response.url, response.encoding)
def reset(self):
HTMLParser.reset(self)
self.base_url = None
self.current_link = None
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'base':
self.base_url = dict(attrs).get('href')
if self.scan_tag(tag):
for attr, value in attrs:
if self.scan_attr(attr):
url = self.process_attr(value)
link = Link(url=url)
self.links.append(link)
self.current_link = link
def handle_endtag(self, tag):
if self.scan_tag(tag):
self.current_link = None
def handle_data(self, data):
if self.current_link:
self.current_link.text = self.current_link.text + data
def matches(self, url):
"""This extractor matches with any url, since
it doesn't contain any patterns"""
return True
| {
"content_hash": "57fb5bff358b53a4367f97890a83e954",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 97,
"avg_line_length": 33.093333333333334,
"alnum_prop": 0.6087832393231265,
"repo_name": "elkingtowa/pyrake",
"id": "9c3c252253c54f11b56ad1c40054b178914a33f5",
"size": "2482",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/lib/pyrake/contrib/linkextractors/htmlparser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9681"
},
{
"name": "Perl",
"bytes": "1311"
},
{
"name": "Python",
"bytes": "1950905"
},
{
"name": "Shell",
"bytes": "3209"
}
],
"symlink_target": ""
} |
from upload import * | {
"content_hash": "8c38c64d2ecb8ba39ed374f649cdfaed",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 20,
"avg_line_length": 20,
"alnum_prop": 0.8,
"repo_name": "scottkleinman/WE1S",
"id": "d16e60cff872a782b5a813a7dc1aa39c3a15421b",
"size": "20",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "we1s-web/app/mod_upload/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2602"
},
{
"name": "HTML",
"bytes": "410877"
},
{
"name": "JavaScript",
"bytes": "15896"
},
{
"name": "Jupyter Notebook",
"bytes": "3649"
},
{
"name": "Python",
"bytes": "78423"
}
],
"symlink_target": ""
} |
"""
http.py : Trovebox HTTP Access
"""
from __future__ import unicode_literals
import sys
import requests
import requests_oauthlib
import logging
try:
from urllib.parse import urlparse, urlunparse # Python3
except ImportError:
from urlparse import urlparse, urlunparse # Python2
from trovebox.objects.trovebox_object import TroveboxObject
from .errors import TroveboxError, Trovebox404Error, TroveboxDuplicateError
from .auth import Auth
if sys.version < '3':
TEXT_TYPE = unicode
else: # pragma: no cover
TEXT_TYPE = str
DUPLICATE_RESPONSE = {"code": 409,
"message": "This photo already exists"}
class Http(object):
"""
Base class to handle HTTP requests to a Trovebox server.
If no parameters are specified, auth config is loaded from the
default location (~/.config/trovebox/default).
The config_file parameter is used to specify an alternate config file.
If the host parameter is specified, no config file is loaded and
OAuth tokens (consumer*, token*) can optionally be specified.
"""
_CONFIG_DEFAULTS = {"api_version" : None,
"ssl_verify" : True,
}
def __init__(self, config_file=None, host=None,
consumer_key='', consumer_secret='',
token='', token_secret='', api_version=None):
self.config = dict(self._CONFIG_DEFAULTS)
if api_version is not None: # pragma: no cover
print("Deprecation Warning: api_version should be set by "
"calling the configure function")
self.config["api_version"] = api_version
self._logger = logging.getLogger("trovebox")
self.auth = Auth(config_file, host,
consumer_key, consumer_secret,
token, token_secret)
self.host = self.auth.host
# Remember the most recent HTTP request and response
self.last_url = None
self.last_params = None
self.last_response = None
def configure(self, **kwds):
"""
Update Trovebox HTTP client configuration.
:param api_version: Include a Trovebox API version in all requests.
This can be used to ensure that your application will continue
to work even if the Trovebox API is updated to a new revision.
[default: None]
:param ssl_verify: If true, HTTPS SSL certificates will always be
verified [default: True]
"""
for item in kwds:
self.config[item] = kwds[item]
def get(self, endpoint, process_response=True, **params):
"""
Performs an HTTP GET from the specified endpoint (API path),
passing parameters if given.
The api_version is prepended to the endpoint,
if it was specified when the Trovebox object was created.
Returns the decoded JSON dictionary, and raises exceptions if an
error code is received.
Returns the raw response if process_response=False
"""
params = self._process_params(params)
url = self._construct_url(endpoint)
if self.auth.consumer_key:
auth = requests_oauthlib.OAuth1(self.auth.consumer_key,
self.auth.consumer_secret,
self.auth.token,
self.auth.token_secret)
else:
auth = None
with requests.Session() as session:
session.verify = self.config["ssl_verify"]
response = session.get(url, params=params, auth=auth)
self._logger.info("============================")
self._logger.info("GET %s" % url)
self._logger.info("---")
self._logger.info(response.text[:1000])
if len(response.text) > 1000: # pragma: no cover
self._logger.info("[Response truncated to 1000 characters]")
self.last_url = url
self.last_params = params
self.last_response = response
if process_response:
return self._process_response(response)
else:
if 200 <= response.status_code < 300:
return response.text
else:
raise TroveboxError("HTTP Error %d: %s" %
(response.status_code, response.reason))
def post(self, endpoint, process_response=True, files=None, **params):
"""
Performs an HTTP POST to the specified endpoint (API path),
passing parameters if given.
The api_version is prepended to the endpoint,
if it was specified when the Trovebox object was created.
Returns the decoded JSON dictionary, and raises exceptions if an
error code is received.
Returns the raw response if process_response=False
"""
params = self._process_params(params)
url = self._construct_url(endpoint)
if not self.auth.consumer_key:
raise TroveboxError("Cannot issue POST without OAuth tokens")
auth = requests_oauthlib.OAuth1(self.auth.consumer_key,
self.auth.consumer_secret,
self.auth.token,
self.auth.token_secret)
with requests.Session() as session:
session.verify = self.config["ssl_verify"]
if files:
# Need to pass parameters as URL query, so they get OAuth signed
response = session.post(url, params=params,
files=files, auth=auth)
else:
# Passing parameters as URL query doesn't work
# if there are no files to send.
# Send them as form data instead.
response = session.post(url, data=params, auth=auth)
self._logger.info("============================")
self._logger.info("POST %s" % url)
self._logger.info("params: %s" % repr(params))
if files:
self._logger.info("files: %s" % repr(files))
self._logger.info("---")
self._logger.info(response.text[:1000])
if len(response.text) > 1000: # pragma: no cover
self._logger.info("[Response truncated to 1000 characters]")
self.last_url = url
self.last_params = params
self.last_response = response
if process_response:
return self._process_response(response)
else:
if 200 <= response.status_code < 300:
return response.text
else:
raise TroveboxError("HTTP Error %d: %s" %
(response.status_code, response.reason))
def _construct_url(self, endpoint):
"""Return the full URL to the specified endpoint"""
parsed_url = urlparse(self.host)
scheme = parsed_url[0]
host = parsed_url[1]
# Handle host without a scheme specified (eg. www.example.com)
if scheme == "":
scheme = "http"
host = self.host
if not endpoint.startswith("/"):
endpoint = "/" + endpoint
if self.config["api_version"] is not None:
endpoint = "/v%d%s" % (self.config["api_version"], endpoint)
return urlunparse((scheme, host, endpoint, '', '', ''))
def _process_params(self, params):
""" Converts Unicode/lists/booleans inside HTTP parameters """
processed_params = {}
for key, value in params.items():
processed_params[key] = self._process_param_value(value)
return processed_params
def _process_param_value(self, value):
"""
Returns a UTF-8 string representation of the parameter value,
recursing into lists.
"""
# Extract IDs from objects
if isinstance(value, TroveboxObject):
return str(value.id).encode('utf-8')
# Ensure strings are UTF-8 encoded
elif isinstance(value, TEXT_TYPE):
return value.encode("utf-8")
# Handle lists
elif isinstance(value, list):
# Make a copy of the list, to avoid overwriting the original
new_list = list(value)
# Process each item in the list
for i, item in enumerate(new_list):
new_list[i] = self._process_param_value(item)
# new_list elements are UTF-8 encoded strings - simply join up
return b','.join(new_list)
# Handle booleans
elif isinstance(value, bool):
return b"1" if value else b"0"
# Unknown - just do our best
else:
return str(value).encode("utf-8")
@staticmethod
def _process_response(response):
"""
Decodes the JSON response, returning a dict.
Raises an exception if an invalid response code is received.
"""
if response.status_code == 404:
raise Trovebox404Error("HTTP Error %d: %s" %
(response.status_code, response.reason))
try:
json_response = response.json()
code = json_response["code"]
message = json_response["message"]
except (ValueError, KeyError):
# Response wasn't Trovebox JSON - check the HTTP status code
if 200 <= response.status_code < 300:
# Status code was valid, so just reraise the exception
raise
else:
raise TroveboxError("HTTP Error %d: %s" %
(response.status_code, response.reason))
if 200 <= code < 300:
return json_response
elif (code == DUPLICATE_RESPONSE["code"] and
DUPLICATE_RESPONSE["message"] in message):
raise TroveboxDuplicateError("Code %d: %s" % (code, message))
else:
raise TroveboxError("Code %d: %s" % (code, message))
| {
"content_hash": "07cd4e225d8f646836ccf79e79b5e5a3",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 80,
"avg_line_length": 38.27376425855513,
"alnum_prop": 0.5661633220743095,
"repo_name": "photo/openphoto-python",
"id": "6021dcca17a22206b252eafd88c14528e6c06dec",
"size": "10066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trovebox/http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "193614"
},
{
"name": "Shell",
"bytes": "1188"
}
],
"symlink_target": ""
} |
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import ode
import traceback
import sys
import warnings
import scenes
import games
import players
from graphics import render, init_graphics, lights, cameras, textures
from math_classes.vectors import Vector
from physics_engine import physics
from objects import shapes
from objects.text import TextBox
# TODO: Clean-up everywhere! More logical order of variable definitions,
# better readability and more detailed func-docs, among other things.
def game_loop(game):
# Take input
run, direction, jump, toggle_pause, \
mouse_movement, scroll_direction, camera_mode = game.take_input()
player = game.get_player()
camera = game.get_camera()
clock = game.get_clock()
fps = game.get_fps()
# Simulate
physics.update_physics(game)
# Move
forward_vector, up_vector = camera.update(player, mouse_movement, scroll_direction, camera_mode)
player.move(direction, forward_vector, up_vector, jump)
# Render
render.render_with_shadows(game)
clock.tick(fps)
return run, toggle_pause
def pause_loop(game):
game_input = game.take_input()
clock = game.get_clock()
fps = game.get_fps()
clock.tick(fps)
run = game_input[0]
toggle_pause = game_input[3]
return run, toggle_pause
def main():
''' Main routine of the game.'''
view = init_graphics.init_window('Shadow test', HAVE_FULLSCREEN = True)
game = scenes.init_scene_2(view)
player = game.get_player()
camera = game.get_camera()
clock = game.get_clock()
run = True
pause = False
toggle_pause = False
# Background music
# NOTE: Should this be in init_scene? If we want to have
# different background music in different scenes?
pygame.mixer.music.load('sound/sound_data/02. II. Molto vivace.ogg')
pygame.mixer.music.set_volume(0.8)
pygame.mixer.music.play(-1)
# An attempt to suppress warnings from ODE saying that two geoms are intersecting
# (it is a common error that we can simply ignore).
# NOTE: Why doesn't it work? Is it not a warning? Not even
# "warnings.simplefilter('ignore')", that should suppress all warnings, seems to work...
warnings.filterwarnings('ignore', 'ODE Message 3: LCP internal error, s <= 0')
while run:
if toggle_pause:
pause = not pause
pygame.mouse.set_visible(int(pause))
# Hack to prevent sudden rotation of the camera when
# toggling back to unpaused (doesn't work properly yet...)
#for i in range(4):
pygame.mouse.set_pos(width/ 2.0,
height / 2.0)
x, y = pygame.mouse.get_rel()
if not pause:
run, toggle_pause = game_loop(game)
else:
run, toggle_pause = pause_loop(game)
#Fade out the music after quitting the game
pygame.mixer.music.fadeout(1000)
pygame.time.wait(1000)
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc(file=sys.stdout)
finally:
pygame.quit()
| {
"content_hash": "f65824cf5a21a8d83bb227636ea6fbb0",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 100,
"avg_line_length": 28.309734513274336,
"alnum_prop": 0.6505157861831823,
"repo_name": "axelri/return-of-the-spheres",
"id": "abe1543ca891b7e3043ff6c9cec6b09988f70b69",
"size": "3199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rots/shadow_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "171401"
}
],
"symlink_target": ""
} |
from oslo.config import cfg
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import utils
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class DhcpRpcCallbackMixin(object):
"""A mix-in that enable DHCP agent support in plugin implementations."""
def _get_active_networks(self, context, **kwargs):
"""Retrieve and return a list of the active networks."""
host = kwargs.get('host')
plugin = manager.NeutronManager.get_plugin()
if utils.is_extension_supported(
plugin, constants.DHCP_AGENT_SCHEDULER_EXT_ALIAS):
if cfg.CONF.network_auto_schedule:
plugin.auto_schedule_networks(context, host)
nets = plugin.list_active_networks_on_active_dhcp_agent(
context, host)
else:
filters = dict(admin_state_up=[True])
nets = plugin.get_networks(context, filters=filters)
return nets
def get_active_networks(self, context, **kwargs):
"""Retrieve and return a list of the active network ids."""
# NOTE(arosen): This method is no longer used by the DHCP agent but is
# left so that neutron-dhcp-agents will still continue to work if
# neutron-server is upgraded and not the agent.
host = kwargs.get('host')
LOG.debug(_('get_active_networks requested from %s'), host)
nets = self._get_active_networks(context, **kwargs)
return [net['id'] for net in nets]
def get_active_networks_info(self, context, **kwargs):
"""Returns all the networks/subnets/ports in system."""
host = kwargs.get('host')
LOG.debug(_('get_active_networks_info from %s'), host)
networks = self._get_active_networks(context, **kwargs)
plugin = manager.NeutronManager.get_plugin()
filters = {'network_id': [network['id'] for network in networks]}
ports = plugin.get_ports(context, filters=filters)
filters['enable_dhcp'] = [True]
subnets = plugin.get_subnets(context, filters=filters)
for network in networks:
network['subnets'] = [subnet for subnet in subnets
if subnet['network_id'] == network['id']]
network['ports'] = [port for port in ports
if port['network_id'] == network['id']]
return networks
def get_network_info(self, context, **kwargs):
"""Retrieve and return a extended information about a network."""
network_id = kwargs.get('network_id')
host = kwargs.get('host')
LOG.debug(_('Network %(network_id)s requested from '
'%(host)s'), {'network_id': network_id,
'host': host})
plugin = manager.NeutronManager.get_plugin()
network = plugin.get_network(context, network_id)
filters = dict(network_id=[network_id])
network['subnets'] = plugin.get_subnets(context, filters=filters)
network['ports'] = plugin.get_ports(context, filters=filters)
return network
def get_dhcp_port(self, context, **kwargs):
"""Allocate a DHCP port for the host and return port information.
This method will re-use an existing port if one already exists. When a
port is re-used, the fixed_ip allocation will be updated to the current
network state.
"""
# NOTE(arosen): This method is no longer used by the DHCP agent but is
# left so that neutron-dhcp-agents will still continue to work if
# neutron-server is upgraded and not the agent.
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
LOG.debug(_('Port %(device_id)s for %(network_id)s requested from '
'%(host)s'), {'device_id': device_id,
'network_id': network_id,
'host': host})
plugin = manager.NeutronManager.get_plugin()
retval = None
filters = dict(network_id=[network_id])
subnets = dict([(s['id'], s) for s in
plugin.get_subnets(context, filters=filters)])
dhcp_enabled_subnet_ids = [s['id'] for s in
subnets.values() if s['enable_dhcp']]
try:
filters = dict(network_id=[network_id], device_id=[device_id])
ports = plugin.get_ports(context, filters=filters)
if ports:
# Ensure that fixed_ips cover all dhcp_enabled subnets.
port = ports[0]
for fixed_ip in port['fixed_ips']:
if fixed_ip['subnet_id'] in dhcp_enabled_subnet_ids:
dhcp_enabled_subnet_ids.remove(fixed_ip['subnet_id'])
port['fixed_ips'].extend(
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
retval = plugin.update_port(context, port['id'],
dict(port=port))
except exc.NoResultFound:
pass
if retval is None:
# No previous port exists, so create a new one.
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s '
'does not exist on %(host)s'),
{'device_id': device_id,
'network_id': network_id,
'host': host})
network = plugin.get_network(context, network_id)
port_dict = dict(
admin_state_up=True,
device_id=device_id,
network_id=network_id,
tenant_id=network['tenant_id'],
mac_address=attributes.ATTR_NOT_SPECIFIED,
name='',
device_owner='network:dhcp',
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
retval = plugin.create_port(context, dict(port=port_dict))
# Convert subnet_id to subnet dict
for fixed_ip in retval['fixed_ips']:
subnet_id = fixed_ip.pop('subnet_id')
fixed_ip['subnet'] = subnets[subnet_id]
return retval
def release_dhcp_port(self, context, **kwargs):
"""Release the port currently being used by a DHCP agent."""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
LOG.debug(_('DHCP port deletion for %(network_id)s request from '
'%(host)s'),
{'network_id': network_id, 'host': host})
plugin = manager.NeutronManager.get_plugin()
filters = dict(network_id=[network_id], device_id=[device_id])
ports = plugin.get_ports(context, filters=filters)
if ports:
plugin.delete_port(context, ports[0]['id'])
def release_port_fixed_ip(self, context, **kwargs):
"""Release the fixed_ip associated the subnet on a port."""
host = kwargs.get('host')
network_id = kwargs.get('network_id')
device_id = kwargs.get('device_id')
subnet_id = kwargs.get('subnet_id')
LOG.debug(_('DHCP port remove fixed_ip for %(subnet_id)s request '
'from %(host)s'),
{'subnet_id': subnet_id, 'host': host})
plugin = manager.NeutronManager.get_plugin()
filters = dict(network_id=[network_id], device_id=[device_id])
ports = plugin.get_ports(context, filters=filters)
if ports:
port = ports[0]
fixed_ips = port.get('fixed_ips', [])
for i in range(len(fixed_ips)):
if fixed_ips[i]['subnet_id'] == subnet_id:
del fixed_ips[i]
break
plugin.update_port(context, port['id'], dict(port=port))
def update_lease_expiration(self, context, **kwargs):
"""Release the fixed_ip associated the subnet on a port."""
# NOTE(arosen): This method is no longer used by the DHCP agent but is
# left so that neutron-dhcp-agents will still continue to work if
# neutron-server is upgraded and not the agent.
host = kwargs.get('host')
LOG.warning(_('Updating lease expiration is now deprecated. Issued '
'from host %s.'), host)
def create_dhcp_port(self, context, **kwargs):
"""Create the dhcp port."""
host = kwargs.get('host')
port = kwargs.get('port')
LOG.debug(_('Create dhcp port %(port)s '
'from %(host)s.'),
{'port': port,
'host': host})
port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP
port['port'][portbindings.HOST_ID] = host
if 'mac_address' not in port['port']:
port['port']['mac_address'] = attributes.ATTR_NOT_SPECIFIED
plugin = manager.NeutronManager.get_plugin()
return plugin.create_port(context, port)
def update_dhcp_port(self, context, **kwargs):
"""Update the dhcp port."""
host = kwargs.get('host')
port_id = kwargs.get('port_id')
port = kwargs.get('port')
LOG.debug(_('Update dhcp port %(port)s '
'from %(host)s.'),
{'port': port,
'host': host})
plugin = manager.NeutronManager.get_plugin()
return plugin.update_port(context, port_id, port)
| {
"content_hash": "3af2c66d5ff0819e35bdde6452ae6668",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 79,
"avg_line_length": 42.17672413793103,
"alnum_prop": 0.5678078691875319,
"repo_name": "netscaler/neutron",
"id": "a8a2b4bf2ade48651785aa8efadad4a6fbcd1d70",
"size": "10376",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/db/dhcp_rpc_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "6924102"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
'''Process creation helper
This script can be spawned as an intermediate process when creating a new
process to be able to daemonize the result process, or setuid/setgid the result
process.
'''
import os
import os.path
import sys
import time
import itertools
def find_gids_for_uid(uid):
'''Find all GIDs of groups the user with given name belongs to
@param uid: User UID
@type uid: number
@returns: GIDs of all groups the user belongs to
@rtype: list<number>
'''
import grp
import pwd
# Primary group
user = pwd.getpwuid(uid)
yield user.pw_gid
username = user.pw_name
for group in grp.getgrall():
if username in group.gr_mem:
yield group.gr_gid
def change_uid_gid(uid, gid):
'''Impersonate the given UID and/or GID
Do note this will only work if the current process has the privileges to
impersonate this user or group.
Here's how things are processed. Let's assume we're currently running as
root (uid=0, groups=root(0)), and there's one more user called 'test'
(uid=1000, gid=1001, groups=test1(1001), test2(1002)).
+=======================================================+
| Arguments | Resulting process environment |
| | |
| UID | GID | UID | GID | Extra groups |
+======+======+======+======+===========================+
| None | None | 0 | 0 | root |
+-------------+------+------+---------------------------+
| 0 | None | 0 | 0 | root |
+-------------+------+------+---------------------------+
| None | 0 | 0 | 0 | root |
+-------------+------+------+---------------------------+
| 0 | 0 | 0 | 0 | root |
+-------------+------+------+---------------------------+
| 1000 | None | 1000 | 1001 | test1, test2 |
+-------------+------+------+---------------------------+
| None | 1001 | 0 | 1001 | root |
+-------------+------+------+---------------------------+
| 1000 | 1001 | 1000 | 1001 | test1, test2 |
+-------------+------+------+---------------------------+
| 1000 | 1002 | 1000 | 1002 | test1, test2 |
+-------------+------+------+---------------------------+
| 1000 | 0 | 1000 | 0 | test1, test2, root |
+-------------+------+------+---------------------------+
@param uid: UID of the user to impersonate
@type uid: number
@param gid: GID of the group to impersonate
@type gid: number
'''
if uid is None and gid is None:
# Nothing to do
return
if uid is not None:
# Set up correct groups
#
# We need to make sure correct GIDs are set. Here's how it works:
#
# * If a GID is given, this one is added to the set of target GIDs
# * The GIDs of all groups the user with target UID belongs to is added
# to the set of target GIDs
#
# Do note we're not using libc.initgroups() since this doesn't take
# primary groups into account, and only works with /etc/group
if gid is not None:
standard_gids = (gid, )
else:
standard_gids = tuple()
gids = itertools.chain(standard_gids, find_gids_for_uid(uid))
# Calculate unique list of GIDs
gids = set(gids)
if not hasattr(os, 'setgroups'):
raise j.exceptions.RuntimeError('setgroups() not available on this platform')
os.setgroups(tuple(gids))
# Note: we need to call setgid() before calling setuid() because it might be
# possible setgid() fails once we impersonated another user
if gid is not None and not hasattr(os, 'setgid'):
raise j.exceptions.RuntimeError('GID provided but setgid() not available on this '
'platform')
if gid is not None:
os.setgid(gid)
if uid is not None and gid is None:
# Set primary group to GID of given UID
if not hasattr(os, 'setgid'):
raise j.exceptions.RuntimeError('setgid() not available on this platform')
import pwd
user = pwd.getpwuid(uid)
os.setgid(user.pw_gid)
if uid is not None and not hasattr(os, 'setuid'):
raise j.exceptions.RuntimeError('UID provided by setuid() not available on this '
'platform')
if uid is not None:
os.setuid(uid)
def daemonize(stdout, stderr, chdir='/', umask=0):
'''Daemonize a process using a double fork
This method will fork the current process to create a daemon process.
It will perform a double fork(2), chdir(2) to the given folder (or not
chdir at all if the C{chdir} argument is C{None}), and set the new
process umask(2) to the value of the C{umask} argument, or not reset
it if this argument is -1.
The stdout and stderr arguments can be used to output the output to the
corresponding streams of the daemon process to files at the provided
location. Make sure the parent folder of these files already exists. When
set to None, all output will vanish.
While forking, a setsid(2) call will be done to become session leader
and detach from the controlling TTY.
In the child process, all existing file descriptors will be closed,
including stdin, stdout and stderr, which will be re-opened to
/dev/null, unless a corresponding parameter is provided as an argument to
this function.
The method returns a tuple<bool, number>. If the first item is True,
the current process is the daemonized process. If it is False,
the current process is the process which called the C{daemonize}
method, which can most likely be closed now. The second item is the
PID of the current process.
@attention: Make sure you know really well what fork(2) does before using this method
@param stdout: Path to file to dump stdout output of daemon process to
@type stdout: string
@param stderr: Path to file to dump stderr output of daemon process to
@type stderr: string
@param chdir: Path to chdir(2) to after forking. Set to None to disable chdir'ing
@type chdir: string or None
@param umask: Umask to set after forking. Set to -1 not to set umask
@type umask: number
@param uid: UID of the user to impersonate
@type uid: number
@param gid: GID of the group to impersonate
@type gid: number
@returns: Daemon status and PID
@rtype: tuple<bool, number>
@raise RuntimeError: System does not support fork(2)
'''
# pylint: disable-msg=R0912
if not hasattr(os, 'fork'):
raise j.exceptions.RuntimeError(
'os.fork not found, daemon mode not supported on your system')
def check_output_permissions(file_):
'''
Test whether the current user (which might no longer be the user
running the parent process) is allowed to write to the requested
output files, before performing a double fork.
'''
try:
fd = os.open(file_, os.O_CREAT | os.O_WRONLY)
except IOError as e:
try:
import errno
except ImportError:
# We can't provide a nicer error message, re-raise the original
# exception
# Note we can't use plain 'raise' here, since this would
# re-raise the ImportError we're catching
raise e
if not hasattr(errno, 'EACCES'):
# Same as above
raise e
if e.errno == errno.EACCES:
try:
import pwd
except ImportError:
# Same as above
raise e
try:
user = pwd.getpwuid(os.getuid()).pw_name
except (KeyError, AttributeError):
# Unknown user or no os.getuid() available or something
# alike
user = None
if user:
raise j.exceptions.RuntimeError('User %s has no permissions to open '
'file \'%s\' for writing' %
(user, file_))
else:
raise j.exceptions.RuntimeError('Current user has no permissions to '
'open file \'%s\' for writing' % file_)
else:
raise
else:
os.close(fd)
if stdout:
check_output_permissions(stdout)
if stderr:
check_output_permissions(stderr)
# Output redirection should be safe once we're here
pid = os.fork()
if pid == 0:
# First child
# Become session leader...
os.setsid()
# Double fork
pid = os.fork()
if pid == 0:
# Second child
if umask >= 0:
os.umask(umask)
if chdir:
os.chdir(chdir)
else:
# First child is useless now
print(('CHILDPID=%d' % pid))
if hasattr(os, 'getuid'):
print(('UID=%d' % os.getuid()))
if hasattr(os, 'getgid'):
print(('GID=%d' % os.getgid()))
sys.exit()
else:
return False, os.getpid()
# Close all FDs
import resource
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = 1024
sys.stdin.close()
if not stdout:
sys.stdout.close()
if not stderr:
sys.stderr.close()
def close_safe(fd):
'''Close a file descriptor ignoring any exception it generates'''
# pylint: disable-msg=W0704
try:
os.close(fd)
except OSError:
pass
close_safe(0)
if not stdout:
close_safe(1)
if not stderr:
close_safe(2)
for fd in range(3, maxfd):
close_safe(fd)
# Open fd0 to /dev/null
redirect = getattr(os, 'devnull', '/dev/null')
os.open(redirect, os.O_RDWR)
# dup to stdout and stderr
if not stdout:
os.dup2(0, 1)
else:
fd = os.open(stdout, os.O_CREAT | os.O_WRONLY)
os.dup2(fd, 1)
close_safe(fd)
if not stderr:
os.dup2(0, 2)
else:
fd = os.open(stderr, os.O_CREAT | os.O_WRONLY)
os.dup2(fd, 2)
close_safe(fd)
return True, os.getpid()
def main():
'''Main entry point'''
import optparse
parser = optparse.OptionParser()
parser.add_option('-d', '--daemonize', dest='daemonize',
help='daemonize the child process', action='store_true')
parser.add_option('-o', '--stdout', dest='stdout',
help='file to redirect stdout output', metavar='FILE')
parser.add_option('-e', '--stderr', dest='stderr',
help='file to redirect stderr output', metavar='FILE')
parser.add_option('-u', '--uid', dest='uid',
help='UID of user to setuid() to before running the daemon '
'process', metavar='UID', type='int')
parser.add_option('-g', '--gid', dest='gid',
help='GID of group to setgid() to before running the daemon '
'process', metavar='GID', type='int')
# Only parse until a '--' argument
if '--' not in sys.argv:
raise j.exceptions.RuntimeError('No -- argument found')
begin_idx = 0 if sys.argv[0] != '-c' else 1
options, args = parser.parse_args(
args=sys.argv[begin_idx:sys.argv.index('--')])
if not options.daemonize and options.stdout:
raise j.exceptions.RuntimeError('Stdout redirection is not available in '
'foreground mode')
if not options.daemonize and options.stderr:
raise j.exceptions.RuntimeError('Stderr redirection is not available in '
'foreground mode')
if options.stdout and not os.path.isdir(os.path.dirname(options.stdout)):
raise ValueError('Folder of stdout file does not exist')
if options.stderr and not os.path.isdir(os.path.dirname(options.stderr)):
raise ValueError('Folder of stderr file does not exist')
change_uid_gid(options.uid, options.gid)
if options.daemonize:
daemon, _ = daemonize(options.stdout, options.stderr)
if not daemon:
# Give first fork time to print daemon info
time.sleep(0.2)
return
# We're the daemon now, or no daemonization was requested.
# execlp to replace ourself with the application our consumer actually
# wants to run
args = sys.argv[sys.argv.index('--') + 1:]
# Reset all signal handlers
# Check reset_signals in process.py for a more in-depth explanation
import signal
for i in range(1, signal.NSIG):
if signal.getsignal(i) != signal.SIG_DFL:
# pylint: disable-msg=W0704
try:
signal.signal(i, signal.SIG_DFL)
except RuntimeError:
pass
os.execlp(args[0], *args)
if __name__ == '__main__':
main()
| {
"content_hash": "d68916e74671c3294be2969cae812a71",
"timestamp": "",
"source": "github",
"line_count": 380,
"max_line_length": 91,
"avg_line_length": 35.41315789473684,
"alnum_prop": 0.5403879022070298,
"repo_name": "Jumpscale/core9",
"id": "aa132be628bc0432c57c65bd53d85f9f323006e9",
"size": "13459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "JumpScale9/sal/process/processhelperOLD.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Cap'n Proto",
"bytes": "7695"
},
{
"name": "Lua",
"bytes": "31125"
},
{
"name": "Python",
"bytes": "1171144"
},
{
"name": "Shell",
"bytes": "42008"
}
],
"symlink_target": ""
} |
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import matplotlib
import matplotlib.pyplot as pyplot
import random
import sys
import brfss
import myplot
class Respondents(brfss.Respondents):
"""Represents the respondent table."""
def GetHeightWeight(self, jitter=0.0):
"""Get sequences of height and weight.
Args:
jitter: float magnitude of random noise added to heights
Returns:
tuple of sequences (heights, weights)
"""
heights = []
weights = []
for r in self.records:
if r.wtkg2 == 'NA' or r.htm3 == 'NA':
continue
height = r.htm3 + random.uniform(-jitter, jitter)
heights.append(height)
weights.append(r.wtkg2)
return heights, weights
def ScatterPlot(self, root, heights, weights, alpha=1.0):
pyplot.scatter(heights, weights, alpha=alpha, edgecolors='none')
myplot.Save(root=root,
xlabel='Height (cm)',
ylabel='Weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def HexBin(self, root, heights, weights, cmap=matplotlib.cm.Blues):
pyplot.hexbin(heights, weights, cmap=cmap)
myplot.Save(root=root,
xlabel='Height (cm)',
ylabel='Weight (kg)',
axis=[140, 210, 20, 200],
legend=False)
def MakeFigures():
resp = Respondents()
resp.ReadRecords(n=1000)
heights, weights = resp.GetHeightWeight(jitter=0.0)
pyplot.clf()
resp.ScatterPlot('scatter1', heights, weights)
heights, weights = resp.GetHeightWeight(jitter=1.3)
pyplot.clf()
resp.ScatterPlot('scatter2', heights, weights)
pyplot.clf()
resp.ScatterPlot('scatter3', heights, weights, alpha=0.2)
# read more respondents for the hexplot
resp = Respondents()
resp.ReadRecords(n=10000)
heights, weights = resp.GetHeightWeight(jitter=1.3)
pyplot.clf()
resp.HexBin('scatter4', heights, weights)
def main(name):
MakeFigures()
if __name__ == '__main__':
main(*sys.argv)
| {
"content_hash": "bced885ae2b55085816703ca5f8da552",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 72,
"avg_line_length": 26.707865168539325,
"alnum_prop": 0.5902397980647875,
"repo_name": "flychen50/thinkstat",
"id": "69a935028d4957b33ce1e55d3a5e772f5438b845",
"size": "2377",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "brfss_scatter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "225492"
}
],
"symlink_target": ""
} |
from devilry.apps.core import models
from devilry.simplified import FieldSpec, FilterSpec, FilterSpecs, ForeignFilterSpec
class SimplifiedSubjectMetaMixin(object):
""" Defines the django model to be used, resultfields returned by
search and which fields can be used to search for a Subject object
using the Simplified API """
model = models.Subject
resultfields = FieldSpec('id',
'parentnode',
'short_name',
'long_name',
)
searchfields = FieldSpec('short_name',
'long_name')
filters = FilterSpecs(FilterSpec('parentnode'),
FilterSpec('short_name'),
FilterSpec('long_name'),
ForeignFilterSpec('parentnode', # Node
FilterSpec('parentnode'),
FilterSpec('short_name'),
FilterSpec('long_name')))
| {
"content_hash": "af91f90d6fb64a6691c4d56a667ec8cd",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 84,
"avg_line_length": 44.916666666666664,
"alnum_prop": 0.49814471243042674,
"repo_name": "vegarang/devilry-django",
"id": "04df9393ceee3bc6a0f57cf5f24aa1f816830b38",
"size": "1078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/coreutils/simplified/metabases/subject.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "697906"
},
{
"name": "Python",
"bytes": "931589"
}
],
"symlink_target": ""
} |
'''
A Simple API util which used to post test reports on Slack Channel.
Steps to Use:
1. Generate Slack incoming webhook url by reffering our blog: https://qxf2.com/blog/post-pytest-test-results-on-slack/ & add url in our code
2. Generate test report log file by adding ">log/pytest_report.log" command at end of py.test command for e.g. py.test -k example_form -I Y -r F -v > log/pytest_report.log
Note: Your terminal must be pointed to root address of our POM while generating test report file using above command
3. Check you are calling correct report log file or not
'''
import json,os,requests
def post_reports_to_slack():
#To generate incoming webhook url ref: https://qxf2.com/blog/post-pytest-test-results-on-slack/
url= "incoming webhook url" #Add your Slack incoming webhook url here
#To generate pytest_report.log file add ">pytest_report.log" at end of py.test command for e.g. py.test -k example_form -I Y -r F -v > log/pytest_report.log
test_report_file = os.path.abspath(os.path.join(os.path.dirname(__file__),'..','log','pytest_report.log'))#Change report file name & address here
with open(test_report_file, "r") as in_file:
testdata = ""
for line in in_file:
testdata = testdata + '\n' + line
# Set Slack Pass Fail bar indicator color according to test results
if 'FAILED' in testdata:
bar_color = "#ff0000"
else:
bar_color = "#36a64f"
data = {"attachments":[
{"color": bar_color,
"title": "Test Report",
"text": testdata}
]}
json_params_encoded = json.dumps(data)
slack_response = requests.post(url=url,data=json_params_encoded,headers={"Content-type":"application/json"})
if slack_response.text == 'ok':
print('\n Successfully posted pytest report on Slack channel')
else:
print('\n Something went wrong. Unable to post pytest report on Slack channel. Slack Response:', slack_response)
#---USAGE EXAMPLES
if __name__=='__main__':
post_reports_to_slack()
| {
"content_hash": "5bb3f2e8db5288e14957c7c87b0c6865",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 171,
"avg_line_length": 48.28260869565217,
"alnum_prop": 0.6208914903196758,
"repo_name": "qxf2/qxf2-page-object-model",
"id": "94aea48676d6426b890361e68c29cc6c604882df",
"size": "2221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/post_test_reports_to_slack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "4299"
},
{
"name": "Python",
"bytes": "331188"
}
],
"symlink_target": ""
} |
from .base import (
FileBase,
DirectoryBase,
)
__all__ = ['EmptyFile', 'EmptyDirectory',]
class EmptyFile(FileBase):
pass
class EmptyDirectory(DirectoryBase):
pass
| {
"content_hash": "95f97906406c14e2bc1d5ddab10c04a3",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 42,
"avg_line_length": 13.214285714285714,
"alnum_prop": 0.6648648648648648,
"repo_name": "risuoku/wataru",
"id": "02402868eff04d65789541dea57b06f82f3f11d8",
"size": "185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wataru/rules/models/empty.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58649"
},
{
"name": "Shell",
"bytes": "216"
},
{
"name": "Smarty",
"bytes": "1910"
}
],
"symlink_target": ""
} |
import utils
import torch
import random
from consts import DEVICE
from torch.optim import Adam
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from tqdm import tqdm
from model_att.model import AttmapModel
class AttmapTrainLoader:
def __init__(self, random_seed=42, max_num_subwords=10):
self.random_seed = random_seed
self.max_num_subwords = max_num_subwords
def get_batch_size(self):
return 2048
def get_loader(self, instances, is_train=True):
batch_size = self.get_batch_size()
print(f'original training instances: {len(instances)}')
instances = [instance for instance in instances if instance[2].shape[-1] <= self.max_num_subwords]
print(f'useful training instances: {len(instances)}')
gtlabels = [instance[0] for instance in instances]
spanlens = [instance[1] for instance in instances]
attention_maps = [instance[2] for instance in instances]
gtlabels = torch.tensor(gtlabels, dtype=torch.long)
spanlens = torch.tensor(spanlens, dtype=torch.long)
attmap_features = AttmapModel.pad_attention_maps(attention_maps, max_num_subwords=self.max_num_subwords)
dataset = TensorDataset(attmap_features, gtlabels)
sampler = RandomSampler(dataset) if is_train else SequentialSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size)
return dataloader
def load_train_data(self, filepath, sample_ratio=-1):
print('Loading training data...',)
instances = utils.Pickle.load(filepath)
print(f'OK! {len(instances)} training instances')
if sample_ratio > 0.0:
assert sample_ratio < 1.0
num_instances = int(sample_ratio * len(instances))
instances = random.choices(instances, k=num_instances)
print(f'[Trainer] Sampled {len(instances)} instances.')
train_instances, valid_instances = train_test_split(instances, test_size=0.1, shuffle=True, random_state=self.random_seed)
return self.get_loader(train_instances), self.get_loader(valid_instances)
class AttmapTrainer:
def __init__(self, model: AttmapModel, sample_ratio=-1):
self.sample_ratio = sample_ratio
self.model = model.to(DEVICE)
self.output_dir = model.model_dir
self.train_loader = AttmapTrainLoader()
self.optimizer = Adam(self.model.parameters(), lr=1e-3)
model_config_path = self.output_dir / 'model_config.json'
utils.Json.dump(self.model.config, model_config_path)
def train(self, path_sampled_train_data, num_epochs=20):
path_train_data = None
utils.Log.info('Feature extraction...')
path_train_data = self.model.feature_extractor.generate_train_instances(path_sampled_train_data)
utils.Log.info(f'Start training: {path_train_data}')
train_data, valid_data = self.train_loader.load_train_data(path_train_data, sample_ratio=self.sample_ratio)
num_train = len(train_data)
num_valid = len(valid_data)
best_epoch = -1
best_valid_f1 = -1.0
for epoch in range(1, num_epochs + 1):
utils.Log.info(f'Epoch [{epoch} / {num_epochs}]')
# Train
self.model.train()
epoch_loss = 0
for attmap_features, gtlabels in tqdm(train_data, total=num_train, ncols=100):
self.model.zero_grad()
gtlabels = gtlabels.to(DEVICE)
attmap_features = attmap_features.to(DEVICE)
batch_loss = self.model.get_loss(gtlabels, attmap_features)
epoch_loss += batch_loss.item()
batch_loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
self.optimizer.step()
train_loss = epoch_loss / num_train
utils.Log.info(f'Train loss: {train_loss}')
# Valid
self.model.eval()
gold_labels = []
pred_labels = []
with torch.no_grad():
for attmap_features, gtlabels in tqdm(valid_data, total=num_valid, ncols=100):
attmap_features = attmap_features.to(DEVICE)
pred_probs = self.model.get_probs(attmap_features).detach().cpu()
gold_labels.extend(gtlabels.numpy().tolist())
pred_labels.extend([int(p > .5) for p in pred_probs.numpy().tolist()])
valid_f1 = f1_score(gold_labels, pred_labels, average="micro")
utils.Log.info(f'valid f1: {valid_f1}')
if valid_f1 < best_valid_f1:
utils.Log.info(f'Stop training. Best epoch: {epoch - 1}')
break
best_epoch = epoch - 1
best_valid_f1 = valid_f1
ckpt_dict = {
'epoch': epoch,
'model': self.model,
'valid_f1': valid_f1,
'train_loss': train_loss,
}
ckpt_path = self.output_dir / f'epoch-{epoch}.ckpt'
torch.save(ckpt_dict, ckpt_path)
return best_epoch
| {
"content_hash": "5ccbb8ab2d345b861b3eeff58a1b84fe",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 130,
"avg_line_length": 42.78861788617886,
"alnum_prop": 0.6148584457533726,
"repo_name": "xgeric/UCPhrase-exp",
"id": "8786963d4b2df37c7ab7d5b23ce8e956c9e3bb89",
"size": "5263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/model_att/trainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cython",
"bytes": "27305"
},
{
"name": "Python",
"bytes": "79610"
},
{
"name": "Shell",
"bytes": "115"
}
],
"symlink_target": ""
} |
import test_framework.loginit
#
# Test REST interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from struct import *
from io import BytesIO
from codecs import encode
import http.client
import urllib.parse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls
def http_get_call(host, port, path, response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read().decode('utf-8')
#allows simple http post calls with a request body
def http_post_call(host, port, path, requestdata = '', response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('POST', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinTestFramework):
FORMAT_SEPARATOR = "."
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_full(self.nodes)
self.is_network_split=False
self.sync_all()
def run_test(self):
url = urllib.parse.urlparse(self.nodes[0].url)
print("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
######################################
# GETUTXOS: query a unspent outpoint #
######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
################################################
# GETUTXOS: now query a already spent outpoint #
################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utox in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += hex_str_to_bytes(txid)
binaryRequest += pack("i", n)
binaryRequest += hex_str_to_bytes(vintx)
binaryRequest += pack("i", 0)
bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = BytesIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64)
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be a outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be a outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 500) #must be a 500 because we send a invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 500) #must be a 500 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 500 because we exceeding the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160])
# check json format
block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
block_json_obj = json.loads(block_json_string)
assert_equal(block_json_obj['hash'], bb_hash)
# compare with json block header
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str, parse_float=Decimal)
assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
#compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
assert_equal(json_obj[0]['height'], rpc_block_json['height'])
assert_equal(json_obj[0]['version'], rpc_block_json['version'])
assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
assert_equal(json_obj[0]['time'], rpc_block_json['time'])
assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
#see if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 5) #now we should have 5 header objects
# do tx test
tx_hash = block_json_obj['tx'][0]['txid']
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# check that there are exactly 3 transactions in the TX memory pool before generating the block
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# check that there are our submitted transactions in the TX memory pool
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for i, tx in enumerate(txs):
assert_equal(tx in json_obj, True)
assert_equal(json_obj[tx]['spentby'], txs[i+1:i+2])
assert_equal(json_obj[tx]['depends'], txs[i-1:i])
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
| {
"content_hash": "003e926d20cfba95075c691cbf862dab",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 132,
"avg_line_length": 45.61515151515152,
"alnum_prop": 0.6134325383644457,
"repo_name": "BitcoinUnlimited/BitcoinUnlimited",
"id": "44309ac42022e093597bb464c0ce55782e01cca8",
"size": "15326",
"binary": false,
"copies": "2",
"ref": "refs/heads/release",
"path": "qa/rpc-tests/rest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28173"
},
{
"name": "Batchfile",
"bytes": "30639"
},
{
"name": "C",
"bytes": "1026500"
},
{
"name": "C++",
"bytes": "7400417"
},
{
"name": "CMake",
"bytes": "4435"
},
{
"name": "Dockerfile",
"bytes": "2888"
},
{
"name": "GDB",
"bytes": "455"
},
{
"name": "HTML",
"bytes": "20970"
},
{
"name": "Java",
"bytes": "41235"
},
{
"name": "M4",
"bytes": "261908"
},
{
"name": "Makefile",
"bytes": "121033"
},
{
"name": "Objective-C++",
"bytes": "6778"
},
{
"name": "Python",
"bytes": "1623121"
},
{
"name": "QMake",
"bytes": "2067"
},
{
"name": "Roff",
"bytes": "3821"
},
{
"name": "Sage",
"bytes": "30188"
},
{
"name": "Shell",
"bytes": "86081"
}
],
"symlink_target": ""
} |
from py2neo.ext.spatial.exceptions import NodeNotFoundError
from py2neo.ext.spatial.util import parse_lat_long
from py2neo.ext.spatial.plugin import NAME_PROPERTY
class TestBase(object):
@staticmethod
def _layer_exists(graph, layer_name):
results = graph.cypher.execute(
"MATCH (r { name:'spatial_root' }), (r)-[:LAYER]->(n) RETURN n")
for record in results:
node = record[0]
if node.properties['layer'] == layer_name:
return True
return False
@staticmethod
def _geometry_exists(graph, geometry_name, layer_name):
# assert a geometry exists in the *application* graph
resp = graph.find(
label=layer_name, property_key=NAME_PROPERTY,
property_value=geometry_name)
results = [r for r in resp]
return len(results) == 1
@staticmethod
def load(api, data, layer):
api.create_layer(layer)
for location in data:
shape = parse_lat_long(location.coords)
api.create_geometry(location.name, shape.wkt, layer)
@staticmethod
def get_geometry_node(api, geometry_name):
query = """MATCH (application_node {_py2neo_geometry_name:\
{geometry_name}}),
(application_node)<-[:LOCATES]-(geometry_node)
RETURN geometry_node"""
params = {
'geometry_name': geometry_name,
}
result = api.graph.cypher.execute(query, params)
record = result[0]
geometry_node = record[0]
return geometry_node
@staticmethod
def get_application_node(api, geometry_name):
query = """MATCH (application_node {_py2neo_geometry_name:\
{geometry_name}}) RETURN application_node"""
params = {
'geometry_name': geometry_name,
}
result = api.graph.cypher.execute(query, params)
try:
record = result[0]
except KeyError as exc:
raise NodeNotFoundError(exc)
application_node = record[0]
return application_node
| {
"content_hash": "fb85d71c620f7c2b2e930cbffefde526",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 76,
"avg_line_length": 31.446153846153845,
"alnum_prop": 0.6115459882583171,
"repo_name": "fpieper/py2neo",
"id": "93140d2682ae34eb4be468b6ee6a4ce291abce6c",
"size": "2044",
"binary": false,
"copies": "2",
"ref": "refs/heads/release/2.0.8",
"path": "test/ext/spatial/basetest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3840"
},
{
"name": "Makefile",
"bytes": "6765"
},
{
"name": "Python",
"bytes": "874722"
},
{
"name": "Shell",
"bytes": "8124"
}
],
"symlink_target": ""
} |
"""
@author: Nicu Tofan <nicu.tofan@gmail.com>
"""
from types import BuiltinFunctionType, TypeType, IntType, LongType, DictType
from types import FloatType, BooleanType, StringType, UnicodeType
import numpy
from PyQt4 import QtGui, QtCore
from pylearn2.datasets.dataset import Dataset
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.train import Train
from pylearn2.models.model import Model
from theano.tensor import Tensor
from theano.gof import Variable
from theano.gof.fg import MissingInputError
from pyl2extra.gui.guihelpers import center
from learn_spot.gui.utils import get_icon
import logging
logger = logging.getLogger(__name__)
interx = 0
STRINGABLE_TYPES = (IntType, LongType, FloatType, TypeType,
BooleanType, StringType, UnicodeType)
PYL2_TYPED = (Model, Train, Dataset)
class ObjectTreeWindow(QtGui.QWidget):
"""
Presents the content of a PyLearn2 Dataset.
TODO: Actually only DenseDesignMatrix is supported right now.
Parameters
----------
obj : object
The object to be presented.
"""
def __init__(self, mw, obj_tree):
"""
Constructor
"""
super(ObjectTreeWindow, self).__init__()
self.yaml_build_ins = False
self.yaml_methods = False
self.yaml_filter_hidden = True
self.yaml_max_depth = 10
self.mw = mw
self.obj_tree = obj_tree
self.tree_widget = None
self.flat_object_list = []
self.init_ui()
self.refresh_object_tree()
def init_ui(self):
"""
Prepares the GUI.
"""
self.resize(800, 600)
center(self)
self.grid = QtGui.QVBoxLayout()
self.grid.setSpacing(10)
caption = "Object Tree "
if hasattr(self.obj_tree, 'YAML'):
caption = caption + self.obj_tree.YAML
self.setWindowTitle(caption)
tree_widget = QtGui.QTreeWidget()
header = QtGui.QTreeWidgetItem(["Name", "Type", "Value"])
tree_widget.setHeaderItem(header)
tree_widget.setAlternatingRowColors(True)
# event handlers like custom menu, double click
tree_widget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
tree_widget.customContextMenuRequested.connect(self.object_tree_contextual)
tree_widget.itemDoubleClicked.connect(self.object_tree_double_click)
btn_refresh = QtGui.QToolButton()
btn_refresh.setIcon(get_icon('refresh_all.png'))
btn_refresh.setToolTip('Reload the object')
self.connect(btn_refresh, QtCore.SIGNAL('clicked()'),
self.refresh_object_tree)
btn_methods = QtGui.QToolButton()
#btn_methods.setIcon(get_icon('refresh_all.png'))
btn_methods.setToolTip('Hide methods in tree')
btn_methods.setCheckable(True)
btn_methods.setCheckable(self.yaml_methods)
self.connect(btn_methods, QtCore.SIGNAL('toggled(bool)'),
self.toggle_methods)
btn_hidden = QtGui.QToolButton()
#btn_hidden.setIcon(get_icon('refresh_all.png'))
btn_methods.setCheckable(True)
btn_methods.setCheckable(self.yaml_filter_hidden)
btn_hidden.setToolTip('Hide hidden components in tree')
self.connect(btn_hidden, QtCore.SIGNAL('toggled(bool)'),
self.toggle_hidden)
btn_bldins = QtGui.QToolButton()
#btn_bldins.setIcon(get_icon('refresh_all.png'))
btn_methods.setCheckable(True)
btn_methods.setCheckable(self.yaml_build_ins)
btn_bldins.setToolTip('Hide build-in components in tree')
self.connect(btn_bldins, QtCore.SIGNAL('toggled(bool)'),
self.toggle_buildins)
lbl_max_d = QtGui.QLabel('Max depth')
sp_port_rcv = QtGui.QSpinBox()
sp_port_rcv.setMinimum(1)
sp_port_rcv.setMaximum(100)
sp_port_rcv.setValue(self.yaml_max_depth)
sp_port_rcv.setToolTip('Port for command and control.')
self.connect(sp_port_rcv, QtCore.SIGNAL('valueChanged(int)'),
self.change_max_depth)
spacer1 = QtGui.QSpacerItem(20, 40,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Fixed)
grid_btm = QtGui.QHBoxLayout()
grid_btm.setSpacing(10)
grid_btm.addSpacerItem(spacer1)
grid_btm.addWidget(lbl_max_d)
grid_btm.addWidget(sp_port_rcv)
grid_btm.addWidget(btn_methods)
grid_btm.addWidget(btn_hidden)
grid_btm.addWidget(btn_bldins)
grid_btm.addWidget(btn_refresh)
self.grid.addWidget(tree_widget)
self.grid.addLayout(grid_btm)
self.setLayout(self.grid)
self.tree_widget = tree_widget
def change_max_depth(self, value):
"""
Slot to change the maximum depth of the tree
"""
self.yaml_max_depth = value
self.refresh_object_tree()
def toggle_buildins(self, value):
"""
Slot to change buildins visibility
"""
self.yaml_build_ins = value
self.refresh_object_tree()
def toggle_hidden(self, value):
"""
Slot to change hidden symbols visibility
"""
self.yaml_filter_hidden = value
self.refresh_object_tree()
def toggle_methods(self, value):
"""
Slot to change methods visibility
"""
self.yaml_methods = value
self.refresh_object_tree()
def object_tree_contextual(self, position):
"""
Slot for contextual menu in the YAML tree view.
"""
tree_widget = self.sender()
indexes = tree_widget.selectedIndexes()
if len(indexes) != 1:
return
menu = QtGui.QMenu(tree_widget)
act_methods = menu.addAction('Methods')
act_methods.setCheckable(True)
act_methods.setChecked(self.yaml_methods)
sel_act = menu.exec_(tree_widget.mapToGlobal(position))
if sel_act is None:
return
elif sel_act is act_methods:
self.yaml_methods = act_methods.isChecked()
tree_widget.clear()
menu.close()
menu.deleteLater()
self.refresh_object_tree()
def object_tree_double_click(self):
"""
Slot for double-click in the YAML/PKL tree view.
"""
tree_widget = self.sender()
item = tree_widget.currentItem()
if not hasattr(item, 'tag'):
return
tag = item.tag
if isinstance(tag, DenseDesignMatrix):
self.mw.show_dataset(tag)
elif isinstance(tag, Variable) or isinstance(tag, numpy.ndarray):
self.mw.show_variable(tag)
def refresh_object_tree(self):
"""
Reloads the YAML tree.
"""
def divein_object(parent, oitr, depth):
"""
Explore an object.
"""
global interx
try:
interx = interx+ 1
if interx == 254:
print interx
# only expand the objects first time
if isinstance(oitr, object):
try:
if oitr in self.flat_object_list:
return
self.flat_object_list.append(oitr)
except (TypeError, ValueError, AttributeError):
pass
if isinstance(oitr, Tensor):
recurse_object(parent, dir(oitr),
self.yaml_max_depth-1, False, oitr)
elif isinstance(oitr, Variable):
recurse_object(parent, dir(oitr),
self.yaml_max_depth-1, False, oitr)
elif isinstance(oitr, PYL2_TYPED):
recurse_object(parent, dir(oitr), depth+1, True, oitr)
elif isinstance(oitr, DictType):
recurse_object(parent, oitr, depth+1, False, oitr)
elif hasattr(oitr, '__iter__'):
recurse_object(parent, oitr, depth+1, False, oitr)
elif isinstance(oitr, Model):
recurse_object(parent, dir(oitr), depth+1, True, oitr)
else:
recurse_object(parent, dir(oitr), depth+1, True, oitr)
except Exception:
logger.debug('Failed to dive in object', exc_info=True)
def recurse_object(parent, obj, depth, force_dict=False, original_obj=None):
"""
Explore the components of the object.
"""
if depth >= self.yaml_max_depth:
return
try:
index = -1
for i in obj:
index = index + 1
b_recurse = True
tag = None
if isinstance(obj, DictType):
label = i
oitr = obj[i]
elif hasattr(obj, '__iter__') and not force_dict:
label = str(index)
oitr = i
else:
label = i
oitr = getattr(original_obj, i)
kind = type(oitr).__name__
value = ''
if label.startswith('__') and label.endswith('__'):
if self.yaml_filter_hidden:
continue
if isinstance(oitr, BuiltinFunctionType):
if not self.yaml_build_ins:
continue
b_recurse = False
if kind == 'method-wrapper' or kind == 'instancemethod':
if not self.yaml_methods:
continue
b_recurse = False
label = label + '()'
elif kind == 'ndarray':
b_recurse = False
value = str(oitr.shape)
tag = oitr
if isinstance(oitr, STRINGABLE_TYPES):
value = str(oitr)
b_recurse = False
elif isinstance(oitr, PYL2_TYPED):
value = str(oitr)
b_recurse = True
tag = oitr
elif kind == 'tuple':
value = str(oitr)
b_recurse = False
elif isinstance(oitr, Variable):
label = label + ' <' + str(oitr.name) + '>'
kind = kind + ' <' + str(oitr.type) + '>'
value = str(oitr.eval())
if len(value) > 20:
value = value[:20] + '...'
b_recurse = False
tag = oitr
if label == 'yaml_src':
if len(value) > 20:
value = value[:20] + '...'
#print '-'*depth, label, kind, value
tree_it = QtGui.QTreeWidgetItem(parent,
[label, kind, value])
tree_it.tag = tag
if b_recurse:
divein_object(tree_it, oitr, depth)
except (MissingInputError, RuntimeError,
AttributeError, TypeError):
pass
self.flat_object_list = []
self.tree_widget.clear()
root = QtGui.QTreeWidgetItem(self.tree_widget,
['root', type(self.obj_tree).__name__])
divein_object(root, self.obj_tree, 0)
# expand first layer
root.setExpanded(True)
child_count = root.childCount()
for i in range(child_count):
item = root.child(i)
item.setExpanded(True)
self.tree_widget.resizeColumnToContents(0)
self.tree_widget.resizeColumnToContents(1)
| {
"content_hash": "52b8de3b41388477f817b20fd9aaeff9",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 84,
"avg_line_length": 35.25142857142857,
"alnum_prop": 0.5175068892851353,
"repo_name": "TNick/pyl2extra",
"id": "007b6a6b1838ec4af03fad7ec34d4a7f7b06de56",
"size": "12384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyl2extra/gui/debugger/object_tree_window.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "11767"
},
{
"name": "Python",
"bytes": "831896"
},
{
"name": "Shell",
"bytes": "4624"
}
],
"symlink_target": ""
} |
"""
sentry.db.models
~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from copy import copy
import logging
import six
from bitfield.types import BitHandler
from django.db import models
from django.db.models import signals
from django.db.models.query_utils import DeferredAttribute
from django.utils import timezone
from .fields.bounded import BoundedBigAutoField
from .manager import BaseManager
from .query import update
__all__ = ('BaseModel', 'Model', 'sane_repr')
UNSAVED = object()
DEFERRED = object()
def sane_repr(*attrs):
if 'id' not in attrs and 'pk' not in attrs:
attrs = ('id', ) + attrs
def _repr(self):
cls = type(self).__name__
pairs = ('%s=%s' % (a, repr(getattr(self, a, None))) for a in attrs)
return u'<%s at 0x%x: %s>' % (cls, id(self), ', '.join(pairs))
return _repr
class BaseModel(models.Model):
class Meta:
abstract = True
objects = BaseManager()
update = update
def __init__(self, *args, **kwargs):
super(BaseModel, self).__init__(*args, **kwargs)
self._update_tracked_data()
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_Model__data', None)
return d
def __hash__(self):
# Django decided that it shouldnt let us hash objects even though they have
# memory addresses. We need that behavior, so let's revert.
if self.pk:
return models.Model.__hash__(self)
return id(self)
def __reduce__(self):
(model_unpickle, stuff, _) = super(BaseModel, self).__reduce__()
return (model_unpickle, stuff, self.__getstate__())
def __setstate__(self, state):
self.__dict__.update(state)
self._update_tracked_data()
def __get_field_value(self, field):
if isinstance(type(field).__dict__.get(field.attname), DeferredAttribute):
return DEFERRED
if isinstance(field, models.ForeignKey):
return getattr(self, field.column, None)
return getattr(self, field.attname, None)
def _update_tracked_data(self):
"Updates a local copy of attributes values"
if self.id:
data = {}
for f in self._meta.fields:
# XXX(dcramer): this is how Django determines this (copypasta from Model)
if isinstance(type(f).__dict__.get(f.attname),
DeferredAttribute) or f.column is None:
continue
try:
v = self.__get_field_value(f)
except AttributeError as e:
# this case can come up from pickling
logging.exception(six.text_type(e))
else:
if isinstance(v, BitHandler):
v = copy(v)
data[f.column] = v
self.__data = data
else:
self.__data = UNSAVED
def _update_timestamps(self):
if hasattr(self, 'date_updated'):
self.date_updated = timezone.now()
def has_changed(self, field_name):
"Returns ``True`` if ``field`` has changed since initialization."
if self.__data is UNSAVED:
return False
field = self._meta.get_field(field_name)
value = self.__get_field_value(field)
if value is DEFERRED:
return False
return self.__data.get(field_name) != value
def old_value(self, field_name):
"Returns the previous value of ``field``"
if self.__data is UNSAVED:
return None
value = self.__data.get(field_name)
if value is DEFERRED:
return None
return self.__data.get(field_name)
class Model(BaseModel):
id = BoundedBigAutoField(primary_key=True)
class Meta:
abstract = True
__repr__ = sane_repr('id')
def __model_post_save(instance, **kwargs):
if not isinstance(instance, BaseModel):
return
instance._update_tracked_data()
def __model_pre_save(instance, **kwargs):
if not isinstance(instance, BaseModel):
return
instance._update_timestamps()
def __model_class_prepared(sender, **kwargs):
if not issubclass(sender, BaseModel):
return
if not hasattr(sender, '__core__'):
raise ValueError('{!r} model has not defined __core__'.format(sender))
signals.pre_save.connect(__model_pre_save)
signals.post_save.connect(__model_post_save)
signals.class_prepared.connect(__model_class_prepared)
| {
"content_hash": "6193f6a1d6e44b79ace62335cf364b26",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 89,
"avg_line_length": 28.74846625766871,
"alnum_prop": 0.58898847631242,
"repo_name": "looker/sentry",
"id": "b5ca511f817f560b174f705d57acec0331154c2f",
"size": "4686",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/db/models/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289931"
},
{
"name": "HTML",
"bytes": "241322"
},
{
"name": "JavaScript",
"bytes": "3112298"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "36341504"
},
{
"name": "Ruby",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
} |
from scapy.all import *
sniff(prn=lambda x: x.summary())
| {
"content_hash": "5fb8a094a2ffd0bb38c81f34f33b6fac",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 32,
"avg_line_length": 19.333333333333332,
"alnum_prop": 0.7068965517241379,
"repo_name": "krkini16/Floascope",
"id": "6a422947a6cc2b2791b062c4fc8e0865ecdc54fe",
"size": "58",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7172"
},
{
"name": "JavaScript",
"bytes": "6540"
},
{
"name": "Python",
"bytes": "3393"
}
],
"symlink_target": ""
} |
'''
Regression tests for issue #69
'''
import parsl
import pytest
from parsl.app.app import App
from parsl.tests.configs.local_threads import config
parsl.clear()
parsl.load(config)
@App('bash')
def echo_slow_message(msg, sleep=0, fu=None, outputs=[], stderr='std.err', stdout='std.out'):
cmd_line = 'sleep {sleep}; echo {0} > {outputs[0]}'
return cmd_line
@App('python')
def sleep(sleep_dur=0.1):
import time
time.sleep(sleep_dur)
return True
@pytest.mark.skip('fails in pytest')
def test_immediate_datafuture():
"""Test DataFuture string representation, for AppFutures launched with parent
"""
import time
fu = echo_slow_message("Hello world", sleep=1, outputs=["hello.1.txt"])
d_fu = fu.outputs[0]
time.sleep(0.1)
state_2 = d_fu.__str__()
print("State_2 : ", state_2, "Fu:", fu.parent)
assert "running" in state_2, "DataFuture should now be running"
d_fu.result()
state_3 = d_fu.__str__()
print("State_3 : ", state_3, "Fu:", fu.parent)
assert "finished" in state_3, "DataFuture should now be finished"
@pytest.mark.skip('fails in pytest')
def test_delayed_datafuture():
"""Test DataFuture string representation, for AppFutures with delayed parent
"""
import time
sleep_fu = sleep()
fu = echo_slow_message("Hello world", sleep=1, fu=sleep_fu,
outputs=["hello.1.txt"])
d_fu = fu.outputs[0]
state_1 = d_fu.__str__()
print("State_1 : ", state_1, "Fu:", fu.parent)
assert "pending" in state_1, "DataFuture should now be pending"
time.sleep(0.2)
state_2 = d_fu.__str__()
print("State_2 : ", state_2, "Fu:", fu.parent)
assert "running" in state_2, "DataFuture should now be running"
d_fu.result()
state_3 = d_fu.__str__()
print("State_3 : ", state_3, "Fu:", fu.parent)
assert "finished" in state_3, "DataFuture should now be finished"
if __name__ == "__main__":
test_immediate_datafuture()
test_delayed_datafuture()
| {
"content_hash": "7658d568122885e0516504e9925ef4b8",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 93,
"avg_line_length": 26.07792207792208,
"alnum_prop": 0.6254980079681275,
"repo_name": "swift-lang/swift-e-lab",
"id": "32b65f56143f2ce125fc19c27432423b9e120ad8",
"size": "2008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsl/tests/test_regression/test_69a.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "59197"
},
{
"name": "Python",
"bytes": "104539"
},
{
"name": "Shell",
"bytes": "1283"
}
],
"symlink_target": ""
} |
import pytest
from libqtile import layout
import libqtile.manager
import libqtile.config
from ..conftest import no_xinerama
from .layout_utils import assertFocused, assertFocusPath
class BspConfig(object):
auto_fullscreen = True
main = None
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
layout.Bsp(),
]
floating_layout = libqtile.layout.floating.Floating()
keys = []
mouse = []
screens = []
follow_mouse_focus = False
bsp_config = lambda x: \
no_xinerama(pytest.mark.parametrize("qtile", [BspConfig], indirect=True)(x))
# This currently only tests the window focus cycle
@bsp_config
def test_bsp_window_focus_cycle(qtile):
# setup 3 tiled and two floating clients
qtile.testWindow("one")
qtile.testWindow("two")
qtile.testWindow("float1")
qtile.c.window.toggle_floating()
qtile.testWindow("float2")
qtile.c.window.toggle_floating()
qtile.testWindow("three")
# test preconditions, columns adds clients at pos of current, in two stacks
assert qtile.c.layout.info()['clients'] == ['one', 'three', 'two']
# last added window has focus
assertFocused(qtile, "three")
# assert window focus cycle, according to order in layout
assertFocusPath(qtile, 'two', 'float1', 'float2', 'one', 'three')
| {
"content_hash": "3213af2ca18507dc9e82284e7cac52fa",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 80,
"avg_line_length": 28.54,
"alnum_prop": 0.6692361597757533,
"repo_name": "kynikos/qtile",
"id": "85fe5dab6ae689e1e5b309b7f52c82a4413730b2",
"size": "2485",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test/layouts/test_bsp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1135"
},
{
"name": "Python",
"bytes": "1141487"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "5603"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 4c32b038f2b
Revises: None
Create Date: 2016-01-10 18:17:13.253747
"""
# revision identifiers, used by Alembic.
revision = '4c32b038f2b'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| {
"content_hash": "494a3005889f89c867544705aaaa6aca",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 63,
"avg_line_length": 18.692307692307693,
"alnum_prop": 0.6790123456790124,
"repo_name": "LIHTU/pythonCoderRepo",
"id": "d43e67d1b6892d96b22d832e2cc05d8efee3751f",
"size": "486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/4c32b038f2b_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5159"
},
{
"name": "HTML",
"bytes": "38443"
},
{
"name": "JavaScript",
"bytes": "7229"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "33914"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
} |
'''
Copyright (c) 2014, Helen Ramsden
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import os
import matplotlib.pyplot as plt
import numpy as np
from time import localtime, strftime
import __main__ as main
import h5py
import matplotlib.cm as cm
plt.rc('axes', linewidth=1.5)
plt.rc('lines', linewidth=2)
plt.rc('lines', markersize=10)
plt.rc('ytick', labelsize=20)
plt.rc('xtick', labelsize=20)
plt.rc('axes', labelsize=20)
plt.rc(('xtick.major','xtick.minor','ytick.major','ytick.minor'), pad=5,size=5)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
plt.rc('text', usetex=True)
params = {'legend.fontsize':15,'legend.linewidth':0}
plt.rcParams.update(params)
def getcolours(colmap,start, stop, interval):
# Returns the colours of a colormap as a list
# useful for making new colormaps or using colors from particular colormaps
#colmap needs to be in the form cm.jet
cmaplist = list(colmap(np.linspace(start,stop,interval)))
return cmaplist
def makefilelog():
datestr = strftime("%y%m%d_%H.%M",localtime())
logfile = '/Users/helenlramsden/MyDocuments/PhDOther/ABAProject/RegistrationProject/PythonLogs/' + os.path.basename(main.__file__) + datestr + '.py'
os.system("cp %s %s" % (os.path.basename(main.__file__),logfile))
def fixticks(ax,width):
for line in ax.yaxis.get_ticklines():line.set_markeredgewidth(2)
for line in ax.xaxis.get_ticklines():line.set_markeredgewidth(width)
def checkOSpath(macfilename, officefilename):
if os.path.exists(macfilename) == True:
print 'Mac'
desiredfolder = macfilename
else:
print 'Office', macfilename
desiredfolder = officefilename
return desiredfolder
def adjust_spines(ax,spines):
for loc, spine in ax.spines.iteritems():
if loc in spines:
spine.set_position(('outward',0)) # outward by 10 points
#spine.set_smart_bounds(True)
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
def maketicksthicker(ax):
#ax = pylab.gca() #for each axis or whichever axis you want you should
for line in ax.xaxis.get_ticklines():
line.set_markeredgewidth(3)
for line in ax.yaxis.get_ticklines():
line.set_markeredgewidth(3)
def makefig(ymax, xlimmax,xrang,yrang):
fig = plt.figure(figsize = (8,10)) # 4,4, for small ones
fig.subplots_adjust(bottom=0.2)
fig.subplots_adjust(left=0.2)
ax = fig.add_subplot(1,1,1)
adjust_spines(ax, ['left','bottom'])
cols = ['r','b','k','g','m','y']
ax.set_ylim(0,ymax)
ax.set_xlim(0,xlimmax)
if xrang != 'None':
ax.set_xticks(np.linspace(0, xlimmax-xlimmax/20, xrang))
ax.set_xticklabels([int(x) for x in np.linspace(0, xlimmax, xrang)])
if yrang != 'None':
ax.set_yticks(np.linspace(0, ymax-ymax/50, yrang))
ax.set_yticklabels([int(x) for x in np.linspace(0, ymax, yrang)])
maketicksthicker(ax)
return ax,fig,cols
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.itervalues():
sp.set_visible(False)
def formatgraph(ax,roinames, rois,icount, ylabs, ylaboffs):
[xmin, xmax, ymin, ymax] = ax.axis()
ax.set_xticklabels([r"$\mathbf{%s}$" % r for r in roinames])
ax.set_xlim(0., xmax) # -
#ytlabels = ax.get_yticklabels()
#ax.set_yticklabels(ytlabels)
ymin = max(ymin,0)
ax.set_yticks(getyticks(ymin,ymax))
ax.set_yticklabels([r"$\mathbf{%s}$" % x for x in getyticks(ymin,ymax)])
newymin = ymin-(ymax-ymin)/50
ax.set_ylim(newymin, ymax) # so that you can see zero
#ax.yaxis.set_major_locator(MaxNLocator(5))
ax.set_ylabel(ylabs[icount])
ax.yaxis.set_label_coords(ylaboffs, 0.5) # depends on size of plot
maketicksthicker(ax)
def getyticks(ymin,ymax):
roughinterval = (ymax-ymin)/4
interval = float('%.1g' % roughinterval); #print interval,ymin
if interval > 0.05 and interval < 0.1: interval = 0.1
if interval > 0.5 and interval < 1: interval = 1
if interval > 5 and interval < 10: interval = 10
yrange = np.arange(ymin, ymax+interval,interval)
return yrange
def makelegend(ax, fig):
print 'Making legend'
handles, labels = ax.get_legend_handles_labels()
leg = fig.legend(handles,labels, loc="upper right")
for l in leg.get_lines():l.set_linewidth(2)
frame = leg.get_frame()
frame.set_edgecolor('w')
frame.set_alpha(0.2)
def st(x):
newx = x.strip().split('\t')
return newx
def figsfolderalias(genename, iseries, oldfolder, newfolder):
#print 'Figsfolder'
newfile = iseries + '_' + genename + '*'
#newfile = iseries + '_' + genename[0] + '_' + genename[1] + '*' # genename contains name and section number
if os.path.exists(newfolder):
os.system("ln -s %s %s" % (oldfolder + newfile, newfolder))
#os.system("echo %s %s" % (oldfolder + newfile, newfolder))
else:
os.system("mkdir %s" % newfolder)
os.system("ln -s %s %s" % (oldfolder + newfile, newfolder))
def figsfoldercopy(genename, iseries, oldfolder, newfolder):
newfile = iseries + '_*'# + genename + '*'
# print newfile
if os.path.exists(newfolder) == False:
os.system("mkdir %s" % newfolder)
os.system("cp %s %s" % (oldfolder + newfile, newfolder))
# #if os.path.exists(oldfolder + newfile):
# os.system("cp -n %s %s" % (oldfolder + newfile, newfolder))
# #else: writemissing(newfolder,newfile)
def getallfiles(filefolder,filetype):
# Function finds all the files within a folder and returns a dictionary of their image series (val) and full filename (key) - can use ABA files or immuno files
#print 'Getting filenames'
filelist =filefolder + 'filelist.txt'
os.system("ls %s | grep %s > %s" % (filefolder, filetype,filelist))
allfilesdict = dict((line.strip(),line.strip().split('_')[0]) for line in open(filelist, 'r')) # key = whole filename, val = iseries
iseriesdict = dict((line.strip().split('\t')[0].split('.jpg')[0], line.strip().split('\t')) for line in open(filelist,'r')) # key = filename without jpg, filename (replace tif with jpg)
return allfilesdict, iseriesdict
def rebin(a, shape):
#http://stackoverflow.com/questions/8090229/resize-with-averaging-or-rebin-a-numpy-2d-array
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).mean(-1).mean(1)
def writehdffile(narray, outpath,groupname,dataname):
if os.path.exists(outpath+ '.hdf5'): rw = 'r+'
else: rw = 'w-'
f = h5py.File(outpath + '.hdf5', rw)
dataset = f.create_dataset(groupname + '/' +dataname, narray.shape, dtype=narray.dtype)
if len(narray.shape) ==1:dataset[:] = narray
elif len(narray.shape) ==2:dataset[:,:] = narray
f.close()
def readhdfdata(filename,group,dataset):
# groups needs to include subgroups
fopen = h5py.File(filename, 'r+')
datasetopen = fopen[group + dataset]
openarray = datasetopen[:,:]
fopen.close()
return openarray
def makecbar(fig, im, ax):
cbar =fig.colorbar(im, shrink=0.4)#,ticks = [np.log(x) for x in [1,10,100,500]])
ticko = plt.getp(cbar.ax.axes,'yticklabels')
plt.setp(ticko, color = 'k', fontsize=20)
| {
"content_hash": "03c3d1213c811dde4e9b1976790e4f39",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 186,
"avg_line_length": 37.954337899543376,
"alnum_prop": 0.7121029836381135,
"repo_name": "MattNolanLab/Ramsden_MEC",
"id": "ca69477c798dc29f82bb9baaa3c63573e118e042",
"size": "8312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ABAFunctions/GenericFunctions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "966534"
},
{
"name": "C++",
"bytes": "63886"
},
{
"name": "CSS",
"bytes": "13177"
},
{
"name": "Clean",
"bytes": "7372"
},
{
"name": "M",
"bytes": "23120"
},
{
"name": "Matlab",
"bytes": "334715"
},
{
"name": "Objective-C",
"bytes": "24890"
},
{
"name": "Perl",
"bytes": "24532"
},
{
"name": "Python",
"bytes": "125809"
},
{
"name": "Shell",
"bytes": "13636"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from entropy import base
from polymorphic import PolymorphicModel
CONTENT_MODELS = ['skill', 'interest',]
# Create your models here.
class Agent(PolymorphicModel):
name = models.CharField(max_length=100)
class Organisation(Agent):
address = models.CharField(max_length=100)
number = models.CharField(max_length=10)
interest_in_courses = models.BooleanField(default=False)
def __unicode__(self):
return self.name
class Person(Agent):
email = models.EmailField()
position = models.CharField(max_length=50)
organisation = models.ForeignKey('Organisation', blank=True, null=False)
user = models.ForeignKey(User, blank=True, null=True)
def __unicode__(self):
return self.name
class Epitome(PolymorphicModel, base.NameMixin, base.SlugMixin, base.TextMixin):
pass
class EpitomeCategory(base.NameMixin, base.SlugMixin, base.TextMixin):
content_type = models.ForeignKey(ContentType, limit_choices_to={'model__in': CONTENT_MODELS },)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class EpitomeInstance(models.Model):
person = models.ForeignKey('Person', related_name='people')
epitome = models.ForeignKey('Epitome')
class Skill(Epitome):
pass
class Interest(Epitome):
pass
class ProgramingLanguage(models.Model):
language = models.CharField(max_length=100)
class Rating(PolymorphicModel):
name = models.CharField(max_length=100)
def validate(self, value):
try:
int(value)
return True
except:
return False
def __unicode__(self):
return self.name
class OutofTen(Rating):
def validate(self, value):
try:
int(value)
return value <=10 and value > 0
except:
return False
def printable(self, value):
if value:
return 'THUMPS UP!'
else:
return 'THUMBS DOWN!'
class RatingInstance(base.CreatedMixin, base.TextMixin):
rating = models.ForeignKey('Rating')
epitome_instance = models.ForeignKey('EpitomeInstance')
value = models.TextField()
# text
# created_at
# created_by ??
def clean(self):
if not self.rating.validate(self.value):
raise ValidationError
| {
"content_hash": "fc388779a9a579fc24a0a3b54023508e",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 99,
"avg_line_length": 26.9468085106383,
"alnum_prop": 0.6829846032372681,
"repo_name": "DarrenFrenkel/django-rea-people",
"id": "604bae4aa7da34339816f9f76c747199040afcfc",
"size": "2533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rea_people/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8836"
}
],
"symlink_target": ""
} |
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/requests/requests/issues/3578.
import encodings.idna
from pip._vendor.urllib3.fields import RequestField
from pip._vendor.urllib3.filepost import encode_multipart_formdata
from pip._vendor.urllib3.util import parse_url
from pip._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/requests/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, collections.Mapping))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
| {
"content_hash": "43727a90b3f647f3af7f24c4dcea6b2a",
"timestamp": "",
"source": "github",
"line_count": 946,
"max_line_length": 119,
"avg_line_length": 35.96828752642706,
"alnum_prop": 0.583142302944807,
"repo_name": "Code-In-Action/python-in-action",
"id": "4254fbd20cff7316da191e54fe2e7b1559deafee",
"size": "34051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask/todo-api/flask/lib/python3.6/site-packages/pip/_vendor/requests/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5936"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Python",
"bytes": "2982843"
},
{
"name": "Shell",
"bytes": "3292"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^login/$', views.login, name='login'),
url(r'^signup/$', views.signup, name='signup'),
url(r'^verify/$', views.verify, name='verify'),
url(r'^message/$', views.send_message_admin, name='message'),
url(r'^(\d+)/$', views.user_info, name='userinfo'),
url(r'^all/$', views.getall, name='alluser'),
] | {
"content_hash": "e8922759d2b88508e1323c5e70136a0a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 65,
"avg_line_length": 33.083333333333336,
"alnum_prop": 0.6171284634760705,
"repo_name": "pkumercury/pkucourier",
"id": "3c6d814d50bdf0926bf2c8e3605a00d3e315ff4f",
"size": "397",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "user/urls.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38899"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.plugins import bindings
class OrganizationConfigRepositoriesEndpoint(OrganizationEndpoint):
def get(self, request, organization):
provider_bindings = bindings.get('repository.provider')
providers = []
for provider_id in provider_bindings:
provider = provider_bindings.get(provider_id)(id=provider_id)
# TODO(jess): figure out better way to exclude this
if provider_id == 'github_apps':
continue
providers.append(
{
'id': provider_id,
'name': provider.name,
'config': provider.get_config(),
}
)
return Response({
'providers': providers,
})
| {
"content_hash": "d57e7919b743b557a9b23592eece8627",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 73,
"avg_line_length": 32,
"alnum_prop": 0.5915948275862069,
"repo_name": "ifduyue/sentry",
"id": "6ad7946051aeb7e2f81a0b8b870e261164d03311",
"size": "928",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/organization_config_repositories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "301292"
},
{
"name": "HTML",
"bytes": "241298"
},
{
"name": "JavaScript",
"bytes": "3295572"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "6892"
},
{
"name": "Python",
"bytes": "36910084"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
} |
from rdr_service.model.code import CodeType
from tests.helpers.unittest_base import BaseTestCase
class CheckPpiDataApiTest(BaseTestCase):
def setUp(self):
super(CheckPpiDataApiTest, self).setUp(with_consent_codes=True)
self.participant_summary = self.data_generator.create_database_participant_summary(email='test@example.com')
questions_and_answers = [
('first_question_code', 'first_answer_code'),
('Second_CODE', 'ANOTHER_ANSWER'),
('LAST_CODE', 'Final_Answer|with_additional_option')
]
questionnaire = self.data_generator.create_database_questionnaire_history()
for question_code_value, _ in questions_and_answers:
question_code = self.data_generator.create_database_code(
value=question_code_value,
codeType=CodeType.QUESTION
)
self.data_generator.create_database_questionnaire_question(
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version,
codeId=question_code.codeId
)
questionnaire_response = self.data_generator.create_database_questionnaire_response(
participantId=self.participant_summary.participantId,
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version
)
for question_index, (_, answer_code_values) in enumerate(questions_and_answers):
question = questionnaire.questions[question_index]
for answer_value in answer_code_values.split('|'):
answer_code = self.data_generator.create_database_code(value=answer_value)
self.data_generator.create_database_questionnaire_response_answer(
questionnaireResponseId=questionnaire_response.questionnaireResponseId,
questionId=question.questionnaireQuestionId,
valueCodeId=answer_code.codeId
)
def test_case_insensitive_answer_code_matching(self):
"""Make sure case doesn't matter when matching answer codes against what the server has"""
ppi_check_payload = {
'ppi_data': {
self.participant_summary.email: {
'fIrSt_QuEsTiOn_CoDe': 'First_Answer_Code',
'SECOND_CODE': 'another_answer',
'last_code': 'Final_ANSWER|WITH_ADDITIONAL_OPTION'
}
}
}
response = self.send_post('CheckPpiData', ppi_check_payload)
response_error_count = response['ppi_results']['test@example.com']['errors_count']
self.assertEqual(0, response_error_count, 'Differences in case should not cause errors')
| {
"content_hash": "caf3b9bc46bbc3a819a5edc6b50e43ba",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 116,
"avg_line_length": 46.46666666666667,
"alnum_prop": 0.6384505021520803,
"repo_name": "all-of-us/raw-data-repository",
"id": "50d98e987e224274b1078667760562e8b2be65ca",
"size": "2788",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "tests/api_tests/test_ppi_data_check_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1866"
},
{
"name": "Mako",
"bytes": "1715"
},
{
"name": "Python",
"bytes": "17040924"
},
{
"name": "R",
"bytes": "2212"
},
{
"name": "Shell",
"bytes": "92213"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('image_collection', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='imageslide',
name='caption_headline',
field=models.CharField(help_text='This text is displayed as title of the image.', max_length=256, verbose_name='caption', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='imageslide',
name='caption',
field=models.CharField(help_text='This text is displayed as description of the image.', max_length=512, verbose_name='caption', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='imageslide',
name='link',
field=models.URLField(help_text='Enter URL, that the image should link to.', verbose_name='link'),
preserve_default=True,
),
]
| {
"content_hash": "51e2b4281eef5f5224c53337c8d7f6e7",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 152,
"avg_line_length": 34.32258064516129,
"alnum_prop": 0.6033834586466166,
"repo_name": "bitmazk/django-image-collection",
"id": "1efe232a2cfdb9992d69494df656776670e75925",
"size": "1088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "image_collection/migrations/0002_auto_20160113_0239.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2188"
},
{
"name": "Makefile",
"bytes": "325"
},
{
"name": "Python",
"bytes": "26740"
}
],
"symlink_target": ""
} |
from kokemomo.plugins.engine.model.km_user_table import KMUser
from kokemomo.plugins.engine.model.km_group_table import KMGroup
from kokemomo.plugins.engine.model.km_role_table import KMRole
class KMUserAdmin:
@classmethod
def save_user(cls, data):
id = data.get_request_parameter("id")
delete = data.get_request_parameter("delete", default=None)
if delete is None:
user = KMUser.get(id)
user.set_data(data)
user.save()
else:
KMUser.delete_by_id(id)
@classmethod
def save_group(cls, data):
id = data.get_request_parameter("id")
delete = data.get_request_parameter("delete", default=None)
if delete is None:
group = KMGroup.get(id)
group.set_data(data)
group.save()
else:
KMGroup.delete_by_id(id)
@classmethod
def save_role(cls, data):
id = data.get_request_parameter("id")
delete = data.get_request_parameter("delete", default=None)
if delete is None:
role = KMRole.get(id)
role.set_data(data)
role.save()
else:
KMRole.delete_by_id(id)
| {
"content_hash": "b90fbcafadb5f60ec28d0add1dafaf20",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 67,
"avg_line_length": 30.897435897435898,
"alnum_prop": 0.5900414937759336,
"repo_name": "Kokemomo/Kokemomo",
"id": "09edb3e54b74b932b00f8213c535ca9733de67d9",
"size": "1250",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kokemomo/plugins/admin/model/km_user_admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8296"
},
{
"name": "HTML",
"bytes": "213"
},
{
"name": "JavaScript",
"bytes": "25668"
},
{
"name": "Python",
"bytes": "125814"
},
{
"name": "Smarty",
"bytes": "75782"
}
],
"symlink_target": ""
} |
"""
WSGI config for hangout_project project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "hangout_project.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hangout_project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "d8bc81abd6fbd7893f4d12817b4c7d43",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 79,
"avg_line_length": 45.1875,
"alnum_prop": 0.7946058091286307,
"repo_name": "desecho/hangout-discontinued",
"id": "fa1d7083c0919c961e1bdf27f0c39dfed766ad38",
"size": "1446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hangout_project/hangout_project/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "370"
},
{
"name": "HTML",
"bytes": "15325"
},
{
"name": "JavaScript",
"bytes": "2316"
},
{
"name": "Python",
"bytes": "30156"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
} |
import urllib
import urllib2
import hashlib
import random
import email
import email.message
import email.encoders
import StringIO
import sys
""""""""""""""""""""
""""""""""""""""""""
class NullDevice:
def write(self, s):
pass
def submit():
print '==\n== [sandbox] Submitting Solutions \n=='
(login, password) = loginPrompt()
if not login:
print '!! Submission Cancelled'
return
print '\n== Connecting to Coursera ... '
# Part Identifier
# (partIdx, sid) = partPrompt()
partIdx = 0
sid = partIds[partIdx]
# Get Challenge
(login, ch, state, ch_aux) = getChallenge(login, sid) #sid is the "part identifier"
if((not login) or (not ch) or (not state)):
# Some error occured, error string in first return element.
print '\n!! Error: %s\n' % login
return
# Attempt Submission with Challenge
ch_resp = challengeResponse(login, password, ch)
(result, string) = submitSolution(login, ch_resp, sid, output(partIdx), \
source(partIdx), state, ch_aux)
print '== %s' % string.strip()
# =========================== LOGIN HELPERS - NO NEED TO CONFIGURE THIS =======================================
def loginPrompt():
"""Prompt the user for login credentials. Returns a tuple (login, password)."""
(login, password) = basicPrompt()
return login, password
def basicPrompt():
"""Prompt the user for login credentials. Returns a tuple (login, password)."""
login = raw_input('Login (Email address): ')
password = raw_input('One-time Password (from the assignment page. This is NOT your own account\'s password): ')
return login, password
def partPrompt():
print 'Hello! These are the assignment parts that you can submit:'
counter = 0
for part in partFriendlyNames:
counter += 1
print str(counter) + ') ' + partFriendlyNames[counter - 1]
partIdx = int(raw_input('Please enter which part you want to submit (1-' + str(counter) + '): ')) - 1
return (partIdx, partIds[partIdx])
def getChallenge(email, sid):
"""Gets the challenge salt from the server. Returns (email,ch,state,ch_aux)."""
url = challenge_url()
values = {'email_address' : email, 'assignment_part_sid' : sid, 'response_encoding' : 'delim'}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
text = response.read().strip()
# text is of the form email|ch|signature
splits = text.split('|')
if(len(splits) != 9):
print 'Badly formatted challenge response: %s' % text
return None
return (splits[2], splits[4], splits[6], splits[8])
def challengeResponse(email, passwd, challenge):
sha1 = hashlib.sha1()
sha1.update("".join([challenge, passwd])) # hash the first elements
digest = sha1.hexdigest()
strAnswer = ''
for i in range(0, len(digest)):
strAnswer = strAnswer + digest[i]
return strAnswer
def challenge_url():
"""Returns the challenge url."""
return "https://class.coursera.org/" + URL + "/assignment/challenge"
def submit_url():
"""Returns the submission url."""
return "https://class.coursera.org/" + URL + "/assignment/submit"
def submitSolution(email_address, ch_resp, sid, output, source, state, ch_aux):
"""Submits a solution to the server. Returns (result, string)."""
source_64_msg = email.message.Message()
source_64_msg.set_payload(source)
email.encoders.encode_base64(source_64_msg)
output_64_msg = email.message.Message()
output_64_msg.set_payload(output)
email.encoders.encode_base64(output_64_msg)
values = { 'assignment_part_sid' : sid, \
'email_address' : email_address, \
'submission' : output_64_msg.get_payload(), \
'submission_aux' : source_64_msg.get_payload(), \
'challenge_response' : ch_resp, \
'state' : state \
}
url = submit_url()
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
string = response.read().strip()
result = 0
return result, string
## This collects the source code (just for logging purposes)
def source(partIdx):
# open the file, get all lines
f = open(sourceFiles[partIdx])
src = f.read()
f.close()
return src
###### BEGIN ASSIGNMENT SPECIFIC CODE - YOU'LL HAVE TO EDIT THIS ###########
from problem_set0 import hello_world
URL = 'neuraldata-001'
partIds = ['hello-world']
partFriendlyNames = ['submit your first assignment']
sourceFiles = ['problem_set0.py']
def output(partIdx):
outputString = ''
if partIdx == 0: # This is the hello_world() string generator
resultString = hello_world()
if type(resultString) == type(None):
resultString = 'None'
outputString = resultString + '\n'
return outputString.strip()
submit()
| {
"content_hash": "626d9b3bae0a429f1144a17975a61711",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 114,
"avg_line_length": 29.808641975308642,
"alnum_prop": 0.6407123628080348,
"repo_name": "ngr/sandbox",
"id": "2c4a9a8e74bf7a7da8b02120ca9dff952f500882",
"size": "4972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neuraldata/problem_set0/problem_set0_submit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "64641"
},
{
"name": "Python",
"bytes": "76690"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import common.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='emailuser',
name='phone',
field=common.fields.PhoneField(error_messages={'unique': 'That phone number is already taken.'}, max_length=15, null=True, unique=True),
),
]
| {
"content_hash": "aa4409bd4c87c80ee33865ec99202006",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 148,
"avg_line_length": 25.05263157894737,
"alnum_prop": 0.6239495798319328,
"repo_name": "pannkotsky/groupmate",
"id": "a79af35790732a909bd12f4a2840c465bca2ade3",
"size": "549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/apps/users/migrations/0002_emailuser_phone.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22993"
},
{
"name": "HTML",
"bytes": "9200"
},
{
"name": "JavaScript",
"bytes": "56351"
},
{
"name": "Python",
"bytes": "26949"
},
{
"name": "Shell",
"bytes": "56"
},
{
"name": "Vue",
"bytes": "6735"
}
],
"symlink_target": ""
} |
import click
from .BioCompass import download_hits
from .BioCompass import download_mibig
@click.group()
def main():
pass
@main.command(name="download-hits")
@click.option('--outputdir', default='./', type=click.Path(exists=True),
help="Path to save the NCBI clusters.")
@click.argument('mgbfile', type=click.Path(exists=True))
#help="Multigeneblast file containing NCBI references to be downloaded.")
def downloadHits(mgbfile, outputdir):
"""Download NCBI clusters listed t in multigeneblast file."""
download_hits(mgbfile, outputdir)
@main.command(name="download-MIBiG")
@click.option('--outputdir', default='./', type=click.Path(exists=True),
help="Path to save the MIBig genbank files.")
@click.option('--version', type=unicode, default='1.3',
help="Version of MIBiG to download.")
def downloadMIBiG(outputdir, version):
"""Download MIBiG gbk database."""
download_mibig(outputdir, version=version)
if __name__ == "__main__":
main()
| {
"content_hash": "1eb3dd4482586bf75d90c9f0f7807fcb",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 81,
"avg_line_length": 34.41379310344828,
"alnum_prop": 0.6973947895791583,
"repo_name": "tiagolbiotech/BioCompass",
"id": "df4db027f38fb1050acbdee15dd67e7590a56c77",
"size": "1023",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "BioCompass/cli.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5231"
},
{
"name": "Python",
"bytes": "36877"
},
{
"name": "Shell",
"bytes": "977"
}
],
"symlink_target": ""
} |
from Child import Child
from Node import Node # noqa: I201
GENERIC_NODES = [
# generic-where-clause -> 'where' requirement-list
Node('GenericWhereClause', kind='Syntax',
children=[
Child('WhereKeyword', kind='WhereToken'),
Child('RequirementList', kind='GenericRequirementList',
collection_element_name='Requirement'),
]),
Node('GenericRequirementList', kind='SyntaxCollection',
element='Syntax',
element_name='GenericRequirement'),
# same-type-requirement -> type-identifier == type
Node('SameTypeRequirement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('LeftTypeIdentifier', kind='Type'),
Child('EqualityToken', kind='Token',
token_choices=[
'SpacedBinaryOperatorToken',
'UnspacedBinaryOperatorToken',
]),
Child('RightTypeIdentifier', kind='Type'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
Node('GenericParameterList', kind='SyntaxCollection',
element='GenericParameter'),
# generic-parameter -> type-name
# | type-name : type-identifier
# | type-name : protocol-composition-type
Node('GenericParameter', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Attributes', kind='AttributeList',
collection_element_name='Attribute', is_optional=True),
Child('Name', kind='IdentifierToken'),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('InheritedType', kind='Type',
is_optional=True),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# generic-parameter-clause -> '<' generic-parameter-list '>'
Node('GenericParameterClause', kind='Syntax',
children=[
Child('LeftAngleBracket', kind='LeftAngleToken'),
Child('GenericParameterList', kind='GenericParameterList',
collection_element_name='GenericParameter'),
Child('RightAngleBracket', kind='RightAngleToken'),
]),
# conformance-requirement -> type-identifier : type-identifier
Node('ConformanceRequirement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('LeftTypeIdentifier', kind='Type'),
Child('Colon', kind='ColonToken'),
Child('RightTypeIdentifier', kind='Type'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
]
| {
"content_hash": "1b75115e60a1b4a3ff61cd31ae5d6b8d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 74,
"avg_line_length": 39.394366197183096,
"alnum_prop": 0.5609581694672864,
"repo_name": "xedin/swift",
"id": "e93a80b3bfe059374dd3a1e69d9020ff415e3436",
"size": "2797",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "utils/gyb_syntax_support/GenericNodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12337"
},
{
"name": "C",
"bytes": "228137"
},
{
"name": "C++",
"bytes": "32912364"
},
{
"name": "CMake",
"bytes": "517803"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2438"
},
{
"name": "Emacs Lisp",
"bytes": "57265"
},
{
"name": "LLVM",
"bytes": "70793"
},
{
"name": "MATLAB",
"bytes": "2576"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "414130"
},
{
"name": "Objective-C++",
"bytes": "252650"
},
{
"name": "Perl",
"bytes": "2211"
},
{
"name": "Python",
"bytes": "1546631"
},
{
"name": "Roff",
"bytes": "3495"
},
{
"name": "Ruby",
"bytes": "2091"
},
{
"name": "Shell",
"bytes": "226197"
},
{
"name": "Swift",
"bytes": "28759466"
},
{
"name": "Vim Script",
"bytes": "16761"
},
{
"name": "sed",
"bytes": "1050"
}
],
"symlink_target": ""
} |
import sys
import numpy as np
import tensorflow as tf
import datetime
#Start computational time counter
startSimulationTime = datetime.datetime.now()
#Conditions
# 0 do with the full stack
# 1 do the loop in function of the number of images
# 2 do the loop in function of the threshold
condition = 1
#Load data
#Use with the numpy array
train_cond = "mnist_double_train_3bar_train_3bar"
test_cond = "mnist_double_test_4bar_test_4bar"
if len(sys.argv) > 2:
train_cond = sys.argv[1]
test_cond = sys.argv[2]
mnist = np.load("Stimuli/"+train_cond+".npy", encoding="latin1")
mnist_test = np.load("Stimuli/"+test_cond+".npy", encoding="latin1")
mnist = mnist.reshape((np.shape(mnist)[0],1568))
mnist_test = mnist_test.reshape((np.shape(mnist_test)[0],1568))
idx = np.load('MNIST/mnist_train_label.npy', encoding="latin1")
idx_test = np.load('MNIST/mnist_test_label.npy', encoding="latin1")
all_batch = np.load('Stimuli/batch.npy', encoding="latin1")
# all_batch = np.load('../../Stimuli/batch_CNN2.npy', encoding="latin1")
print('Data loaded Train Image: ', np.shape(mnist), ' idx: ',np.shape(idx))
print('Data loaded Test Image: ', np.shape(mnist_test), ' idx: ',np.shape(idx_test))
print('Data loaded batch ', np.shape(all_batch))
#Use with tensorflow tutorial
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
#Functions to initialize ReLU neurons
def weight_variable(shape,n):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=n)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
#Conv and pooling definitions
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def CNN(batch, mnist, idx, mnist_test, idx_test, best):
# Start tensorflow session
with tf.Graph().as_default(), tf.Session() as sess:
#Define placeholder, input and output of the program
x = tf.placeholder(tf.float32, shape=[None, 1568])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
x_image = tf.reshape(x, [-1,56,28,1])
#-------- Model Definition --------#
#### First convolutional Layer ####
W_conv1 = weight_variable([5, 5, 1, 32],"w1")
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
#### Second convolutional Layer ####
W_conv2 = weight_variable([5, 5, 32, 64],"w2")
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
#### Fully connected layer ####
W_fc1 = weight_variable([14 * 7 * 64, 1024],"w_fc1")
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 14*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
#### Dropout ####
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#### Readout layer ####
W_fc2 = weight_variable([1024, 10],"w_fc2")
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
#-------- Train and Evaluate --------#
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer()) #need to update tensorflow ?!?!?2)
# sess.run(tf.initialize_all_variables())
saver = tf.train.Saver()
size_batch = 50
#Used with numpy
for i in range(int(np.shape(batch)[0]/size_batch)):
start = i*size_batch
end = start + size_batch
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:mnist[batch[start:end]], y_: idx[batch[start:end]], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: mnist[batch[start:end]], y_: idx[batch[start:end]], keep_prob: 0.5})
final_accuracy = []
for i in range(int(np.shape(mnist_test)[0]/size_batch)):
start = i*size_batch
end = start + size_batch
final_accuracy.append(accuracy.eval(feed_dict={x: mnist_test[start:end], y_: idx_test[start:end], keep_prob: 1.0}))
final_accuracy = np.mean(final_accuracy)
print("test accuracy %g"%final_accuracy)
# if final_accuracy > best:
# saver.save(sess, '../../ModelWeights/CNN_fast_lamik')
# print("model saved")
sess.close()
del sess
return final_accuracy
total_accuracy = []
if condition == 0:
accuracy = CNN(all_batch[-1], mnist, idx, mnist_test, idx_test)
#accuracy = CNN(all_batch[0], mnist, idx, mnist_test, idx_test)
total_accuracy.append(accuracy)
elif condition == 1:
for batch in all_batch:
maxi = 0
first = True
for j in range(15):
accuracy = CNN(batch, mnist, idx, mnist_test, idx_test, maxi)
print("accuracy :", accuracy)
if accuracy > maxi:
if first:
total_accuracy.append(accuracy)
maxi = accuracy
first = False
else:
total_accuracy[-1] = accuracy
maxi = accuracy
# Old paradigm
# elif condition == 2:
# for i in range(10):
# maxi = 0
# first = True
#
# thresh = float(i)/10
# mnist = np.load('../../../Stimuli/mnist_train_thresh_'+str(thresh)+'.npy')
# mnist_test = np.load('../../../Stimuli/mnist_test_thresh_'+str(thresh)+'.npy')
#
# mnist = mnist.reshape((np.shape(mnist)[0], 1568))
# mnist_test = mnist_test.reshape((np.shape(mnist_test)[0], 1568))
#
# for j in range(3):
# accuracy = CNN(all_batch[-1], mnist, idx, mnist_test, idx_test, maxi)
# print("accuracy :", accuracy)
#
# if accuracy > maxi:
# if first:
# total_accuracy.append(accuracy)
# maxi = accuracy
# first = False
# else:
# total_accuracy[-1] = accuracy
# maxi = accuracy
else:
print("Please enter a valid condition")
print(total_accuracy)
np.save("results/accuracy_"+train_cond+"_"+test_cond, total_accuracy)
#End computational time counter
endSimulationTime = datetime.datetime.now()
print("Simulation time: " + str(endSimulationTime - startSimulationTime))
| {
"content_hash": "e9f4a918a5bbe22746b8436d3f38d6f8",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 127,
"avg_line_length": 34.6551724137931,
"alnum_prop": 0.5876332622601279,
"repo_name": "michaelStettler/HISI",
"id": "f888dee5bc1d72d5e506976169d96761e68f7780",
"size": "7035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HISI/CNN_double.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "245430"
},
{
"name": "Shell",
"bytes": "6949"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import urllib2
from pprint import pprint
from colorama import init
from colorama import Fore, Back, Style
init()
filepath = "./word.md"
f = open(filepath,"a+")
baseurl = 'https://api.shanbay.com/bdc/search/?word='
class JSONObject:
def __init__(self,d):
self.__dict__ = d
def CheckCommandLine():
if len(sys.argv) == 1:
ShowHelp()
sys.exit(0)
def ShowHelp():
print(Fore.RED + "Usage: python %s word" %(sys.argv[0]))
print(Style.RESET_ALL)
def GetDefinitonFromShanBay(word):
url = baseurl+word
u = urllib2.urlopen(url)
#resp = json.loads(u.read().decode('utf-8'),object_hook=JSONObject)
resp = json.loads(u.read().decode('utf-8'))
if resp.get('data',0) == {}: # resp is empty
print
print(Fore.RED + "Cannot get the definition\nPlease check out your word:"+Fore.GREEN+"%s") %(word)
print(Style.RESET_ALL)
sys.exit(0)
s = resp.data.definition.encode('utf-8').strip('\n')
definition = " ".join(s.split())
return definition
def WriteWordIntoFile(word):
definition = GetDefinitonFromShanBay(word)
print >> f, "%-20s\t%s" %(word, definition)
def CheckOutWord(word):
for line in f:
if line.find(word) != -1:
print(Style.BRIGHT)
print(Fore.YELLOW+"The word you have inserted before, see:\n"+Fore.GREEN+" %s") %(line.strip())
print(Style.RESET_ALL)
return 1 #means the word exsit
return 0 #means the word not exsit
def CheckFileIsEmpty():
if os.stat(filepath).st_size == 0:
return 1
else:
return 0
def InsertWord():
CheckCommandLine()
# get word
word = sys.argv[1].encode('utf-8').strip()
# check file
ret = CheckFileIsEmpty()
#if the file is not empty, so we need to
# check whether the word exsited.if exsited
# just print the word and the definition
if ret == 0:
IsExsit = CheckOutWord(word)
if IsExsit == 1:
sys.exit(0)
WriteWordIntoFile(word)
InsertWord()
| {
"content_hash": "44c8015addb5e6c7fb1cf010ea461c76",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 100,
"avg_line_length": 24.17948717948718,
"alnum_prop": 0.6765641569459173,
"repo_name": "JesseEisen/WordList",
"id": "3b167639a185ea8673392b21c3d593be70b9d65d",
"size": "1926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InsertWord.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6662"
},
{
"name": "Shell",
"bytes": "624"
}
],
"symlink_target": ""
} |
import math
import urllib
import base64
import logging
import datetime
import tweepy
from google.appengine.api import xmpp
from google.appengine.api import urlfetch
from django.conf import settings
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.core import serializers
from django.utils import simplejson
from django.shortcuts import render_to_response
from django.utils.translation import ugettext as _
from ask_undrgz.utils import oauth
from ask_undrgz.question.models import Question
from ask_undrgz.question.forms import QuestionForm
def _recent_stupid_questions():
'''The latest stupid questions.
'''
logging.debug('In question.views::_recent_stupid_questions()')
questions = Question.all()
if not settings.DEBUG:
logging.debug('Debug mode active')
questions.filter('answered != ', None)
questions = questions.order('-answered').fetch(30)
return questions
def _send_invite_xmpp(user_address):
'''Sends an invitation to address gtalk.
'''
logging.debug('In question.views::_send_invite_xmpp()')
xmpp.send_invite(user_address)
def _send_message_xmpp(user_address, message):
'''Sends a message to the address gtalk.
'''
logging.debug('In question.views::_send_message_xmpp()')
chat_message_sent = False
# Sends an invitation to address gtalk
#_send_invite_xmpp(user_address)
# Checks whether the user is online and talk on the list of appengine
if xmpp.get_presence(user_address):
logging.info('Presence OK: %s' % (user_address,))
users_address = [user_address,] #'nycholas@gmail.com']
status_code = xmpp.send_message(users_address, message)
chat_message_sent = (status_code != xmpp.NO_ERROR)
return chat_message_sent
def index(request):
'''Main page of the application.
'''
logging.debug('In question.views::index()')
if request.method == 'POST':
question_form = QuestionForm(request.POST)
if question_form.is_valid():
new_question = question_form.save(commit=False)
if new_question.is_exists():
new_question = Question.all() \
.filter('ask =', new_question.ask).get()
new_question.asked = datetime.datetime.now()
new_question.save()
_send_message_xmpp('underguiz@ilostmyself.org',
'%s: %s' % (new_question.key().id(),
new_question.ask))
return HttpResponseRedirect(new_question.get_absolute_url())
else:
question_form = QuestionForm()
return render_to_response('index.html', {
'question_form': question_form,
'recent_stupid_questions': _recent_stupid_questions(),
})
def new_ask(request, ask):
'''Writes a new question and redirects to the response page.
'''
logging.debug('In question.views::new_answer()')
new_question = Question.all().filter('ask = ', ask).get()
if not new_question.is_exists():
new_question = Question(ask=ask)
new_question = datetime.datetime.now()
new_question.save()
_send_message_xmpp('underguiz@ilostmyself.org',
'%s: %s' % (new_question.key().id(),
new_question.ask))
return HttpResponseRedirect(new_question.get_absolute_url())
def answer(request, ask_slug):
'''Response page.
'''
logging.debug('In question.views::answer()')
question = Question.all().filter('ask_slug = ', ask_slug).get()
if question is None:
question = Question.get(ask_slug)
if not question.answer:
d1 = datetime.datetime.now()
d2 = question.asked
if (abs(d1.minute-d2.minute) % 5) == 0 and d1.second == 0:
question.asked = d1
question.save()
_send_message_xmpp('underguiz@ilostmyself.org',
'%s: %s' % (question.key().id(), question.ask))
if request.is_ajax():
return HttpResponse(simplejson.dumps(question.to_dict()),
mimetype='application/json')
initial = {}
initial['ask'] = question.ask
initial['ask_slug'] = question.ask_slug
question_form = QuestionForm(initial=initial)
return render_to_response('index.html', {
'question_form': question_form,
'recent_stupid_questions': _recent_stupid_questions(),
'ask_slug': question.slugify(),
'answer': question.answer,
})
def recent_stupid_questions(request):
'''Latest stupid questions.
'''
logging.debug('In question.views::recent_stupid_questions()')
question_top10 = _recent_stupid_questions()
if request.is_ajax():
return HttpResponse(
simplejson.dumps([q.to_dict() for q in question_top10]),
mimetype='application/json')
return HttpResponse('')
def is_online(request):
'''Checks if the underguiz (clients) is online.
'''
logging.debug('In question.views::is_online()')
user_address = request.REQUEST.get('from')
# Brazil - America/Sao_Paulo, ;-)
dt = datetime.datetime.utcnow()
dt_hour = dt.hour - 3 if dt.hour - 3 > 0 else (dt.hour - 3) + 24
# Easter Egg! HA!
easter_egg = False
if dt_hour == 6 and dt.minute == 6 and dt.second >= 6:
easter_egg = True
# Is online?
chat_message_sent = False
# If the parameter 'from' (e-mail) GET is empty, then returns user offline
if not user_address:
if request.is_ajax():
return HttpResponse(simplejson.dumps({
'is_online': chat_message_sent,
'easter_egg': easter_egg,
}), mimetype='application/json')
return HttpResponse('from is required', status=405)
chat_message_sent = xmpp.get_presence(user_address)
# If debug mode, then always online
if settings.DEBUG:
logging.debug('Debug mode active')
chat_message_sent = True
if request.is_ajax():
return HttpResponse(simplejson.dumps({
'is_online': chat_message_sent,
'easter_egg': easter_egg,
}), mimetype='application/json')
return HttpResponse(chat_message_sent)
def send_message(request):
'''Sends message to underguiz (clients).
'''
logging.debug('In question.views::send_message()')
user_address = request.REQUEST.get('from')
message = request.REQUEST.get('body')
chat_message_sent = False
if not user_address or not message:
if request.is_ajax():
return HttpResponse(simplejson.dumps(chat_message_sent),
mimetype='application/json')
return HttpResponse(_('From and message is required'), status=405)
chat_message_sent = _send_message_xmpp(user_address, message)
if request.is_ajax():
return HttpResponse(simplejson.dumps(chat_message_sent),
mimetype='application/json')
return HttpResponse(chat_message_sent)
def incoming_chat(request):
'''Mounts a chat with the underguiz (clients).
'''
logging.debug('In question.views::incoming_chat()')
if request.method != 'POST':
return HttpResponse(_('XMPP requires POST'), status=405)
st = False
sender = request.POST.get('from')
toaddr = request.POST.get('to')
message = request.POST.get('body')
if not sender:
logging.warn('Incoming chat without \'from\' key ignored')
return HttpResponse(st)
elif not message:
logging.warning('Incoming chat without \'body\' key ignored')
return HttpResponse(st)
try:
body = message.split(':')
if len(body) <= 1 and not body[0].isdigit():
logging.warn('Message not format ID:MESSAGE: %s' % (body,))
return HttpResponse(st)
id_question = int(body[0]) if body[0].isdigit() else 0
answer = ''.join(body[1:]).strip()
question = Question.get_by_id(id_question)
# If the answer already exists, then concatenates the responses
# with HTML formatting
if question.answer:
space = '<br />' + ' '*16
space = ''
question.answer = '%s; %s%s' % (question.answer, space, answer)
else:
question.answer = answer
question.answered = datetime.datetime.now()
question.save()
# Send XMPP message
toaddrs = [toaddr,] #'nycholas@gmail.com']
sts = xmpp.send_message(toaddrs, answer)
logging.debug('XMPP status %s', (str(sts),))
except Exception, e:
logging.error('Error in send for xmpp: %s' % (str(e),))
return HttpResponse(st)
# Send twitter message
if answer:
username = settings.TWITTER_USERNAME
token_key = settings.TWITTER_CONSUMER_KEY
token_secret = settings.TWITTER_CONSUMER_SECRET
oauth_token = settings.TWITTER_OAUTH_TOKEN
oauth_token_secret = settings.TWITTER_OAUTH_TOKEN_SECRET
token_callback = settings.TWITTER_CALLBACK
try:
logging.info('Creating an OAuthHandler instance')
auth = tweepy.OAuthHandler(token_key, token_secret,
token_callback)
except Exception, e:
logging.error('Error: %s' % (str(e),))
return HttpResponse(st)
try:
auth.set_access_token(oauth_token, oauth_token_secret)
api = tweepy.API(auth)
except Exception, e:
logging.error('Error: %s' % (str(e),))
return HttpResponse(st)
try:
s = '%s %s' % (question.ask, answer)
logging.debug('Send twitter: %s' % (s,))
if int(math.ceil(len(s)/140.0)) > 1:
s1 = '%s ' % (question.ask,)
if int(math.ceil(len(s1)/140.0)) > 1:
api.update_status(_('+1 stupid question!'))
else:
for i in range(int(math.ceil(len(s1)/140.0))):
api.update_status(
s1 + answer[140*i:(140*i)+140])
else:
api.update_status(s)
except Exception, e:
logging.error('Error in send for twitter: %s' % (str(e),))
return HttpResponse(st)
return HttpResponse(st)
def oauth_twitter(request):
logging.debug('In question.views::oauth_twitter()')
token_key = settings.TWITTER_CONSUMER_KEY
token_secret = settings.TWITTER_CONSUMER_SECRET
token_callback = settings.TWITTER_CALLBACK
auth = tweepy.OAuthHandler(token_key, token_secret, token_callback)
#auth = oauth.TwitterClient(token_key, token_secret, token_callback)
try:
logging.info(
'Build a new oauth handler and display authorization url to user')
auth_url = auth.get_authorization_url()
logging.debug('auth_url: %s' % (str(auth_url),))
except Exception, e:
logging.error('Failed to get a request token: %s' % (str(e),))
return HttpResponse(_('Failed to get a request token: %(error)s') % \
{'error': str(e)})
logging.info(
'We must store the request token for later use in the callback page')
return HttpResponseRedirect(auth_url)
def oauth_twitter_callback(request):
logging.debug('In question.views::oauth_twitter_callback()')
oauth_token = request.GET.get('oauth_token', None)
oauth_verifier = request.GET.get('oauth_verifier', None)
if not oauth_token or not oauth_verifier:
logging.warning('Invalid request!')
return HttpResponse(_('Missing required parameters!'))
logging.info('Lookup the request token')
token_key = settings.TWITTER_CONSUMER_KEY
token_secret = settings.TWITTER_CONSUMER_SECRET
token_callback = settings.TWITTER_CALLBACK
logging.info('Rebuild the auth handler')
auth = tweepy.OAuthHandler(token_key, token_secret, token_callback)
#auth = oauth.TwitterClient(token_key, token_secret, token_callback)
logging.info('Fetch the access token')
try:
auth.get_access_token(oauth_verifier)
#auth.set_access_token(auth.access_token.key, auth.access_token.secret)
except Exception, e:
logging.error('Failed to get access token: %s', (str(e),))
return HttpResponse(_('Failed to get access token: %(error)s') % \
{'error': str(e)})
#api = tweepy.API(auth)
#api.update_status('test from hell!')
return HttpResponse(True)
def show_me_underguiz(request):
'''Shows a underguiz (Easter Egg).
'''
logging.debug('In question.views::show_me_underguiz()')
question_form = QuestionForm()
return render_to_response('easter_egg.html', {
'question_form': question_form,
'recent_stupid_questions': _recent_stupid_questions(),
}) | {
"content_hash": "3e133f0b07bc6aa26689e528d3d7d286",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 79,
"avg_line_length": 37.982608695652175,
"alnum_prop": 0.603556166056166,
"repo_name": "nycholas/ask-undrgz",
"id": "de0198a816c37d151575870c79723273cc6088b8",
"size": "14786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ask-undrgz/ask_undrgz/question/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Module houses class that implements ``BaseIO`` using Dask as an execution engine."""
from modin.core.io import BaseIO
from modin.core.storage_formats.pandas.query_compiler import PandasQueryCompiler
from modin.core.execution.dask.implementations.pandas_on_dask.dataframe.dataframe import (
PandasOnDaskDataframe,
)
from modin.core.execution.dask.implementations.pandas_on_dask.partitioning.partition import (
PandasOnDaskDataframePartition,
)
from modin.core.io import (
CSVDispatcher,
JSONDispatcher,
ParquetDispatcher,
FeatherDispatcher,
SQLDispatcher,
ExcelDispatcher,
)
from modin.core.storage_formats.pandas.parsers import (
PandasCSVParser,
PandasJSONParser,
PandasParquetParser,
PandasFeatherParser,
PandasSQLParser,
PandasExcelParser,
)
from modin.core.execution.dask.common import DaskWrapper
class PandasOnDaskIO(BaseIO):
"""The class implements interface in ``BaseIO`` using Dask as an execution engine."""
frame_cls = PandasOnDaskDataframe
query_compiler_cls = PandasQueryCompiler
build_args = dict(
frame_cls=PandasOnDaskDataframe,
frame_partition_cls=PandasOnDaskDataframePartition,
query_compiler_cls=PandasQueryCompiler,
)
read_csv = type("", (DaskWrapper, PandasCSVParser, CSVDispatcher), build_args).read
read_json = type(
"", (DaskWrapper, PandasJSONParser, JSONDispatcher), build_args
).read
read_parquet = type(
"", (DaskWrapper, PandasParquetParser, ParquetDispatcher), build_args
).read
# Blocked on pandas-dev/pandas#12236. It is faster to default to pandas.
# read_hdf = type("", (DaskWrapper, PandasHDFParser, HDFReader), build_args).read
read_feather = type(
"", (DaskWrapper, PandasFeatherParser, FeatherDispatcher), build_args
).read
read_sql = type("", (DaskWrapper, PandasSQLParser, SQLDispatcher), build_args).read
read_excel = type(
"", (DaskWrapper, PandasExcelParser, ExcelDispatcher), build_args
).read
| {
"content_hash": "3a77db52c3e6f8d2a9933bb79f095d6f",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 93,
"avg_line_length": 36.19642857142857,
"alnum_prop": 0.7296497286630489,
"repo_name": "modin-project/modin",
"id": "4130aa02e13d39735410086f000aae2051048ca9",
"size": "2810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modin/core/execution/dask/implementations/pandas_on_dask/io/io.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2330"
},
{
"name": "Python",
"bytes": "3914783"
},
{
"name": "Shell",
"bytes": "2377"
}
],
"symlink_target": ""
} |
import sys
from optparse import OptionParser
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
parser = OptionParser(usage="vagrant-playbook [OPTIONS]",
description="Parser for declarative cluster definition for vagrant, aka vagrant playbooks, and generates a yaml to be used with vagrant-compose plugin.")
parser.add_option("-f", "--file", dest="file",
help="File name containing the vagrant playbook", metavar="PLAYBOOK FILE")
parser.add_option("-p", "--playbook", dest="playbook",
help="String containing the vagrant playbook", metavar="PLAYBOOK STRING")
(options, args) = parser.parse_args()
if not options.file and not options.playbook:
parser.error('Playbook not provided. Execute vagrant-playbook -h for available options.')
from vagrantplaybook.playbook.executor import Executor
yaml = Executor().execute(yamlfile=options.file, yamlplaybook=options.playbook)
print (yaml)
if __name__ == "__main__":
main()
| {
"content_hash": "d70224ee89800b7d284ac7bcf8ed7d6c",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 157,
"avg_line_length": 36.689655172413794,
"alnum_prop": 0.6738721804511278,
"repo_name": "fabriziopandini/vagrant-playbook",
"id": "e3312d9f6b974c8b8c24c8a8ee752575d23a9fd4",
"size": "1064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vagrantplaybook/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67737"
}
],
"symlink_target": ""
} |
r"""\
A :class:`CII` object represents a CII message sent from server to client via the CSS-CII protocol.
A :class:`TimelineOption` object describes a timeline selector
and the tick rate of the timeline if that selector is used to request a
timeline from the CSS-TS server. It is carried in a list in the
:data:`~CII.timelines` property of a :class:`CII` message.
Examples
--------
:class:`CII` messages:
.. code-block:: python
>>> from dvbcss.protocol.cii import CII
>>> from dvbcss.protocol import OMIT
>>> jsonCiiMessage = \"""\
... { "protocolVersion":"1.1",
... "contentId":"dvb://1234.5678.01ab",
... "contentIdStatus":"partial"
... }
... \"""
>>> cii = CII.unpack(jsonCiiMessage)
>>> cii.contentId
'dvb://1234.5678.01ab'
>>> print cii.mrsUrl
OMIT
>>> cii.protocolVersion = OMIT
>>> cii.pack()
'{contentId":"dvb://1234.5678.01ab","contentIdStatus":"partial"}'
:class:`TimelineOption` within CII messages:
.. code-block:: python
>>> from dvbcss.protocol.cii import CII, TimelineOption
>>> t1 = TimelineOption(timelineSelector="urn:dvb:css:timeline:pts", unitsPerTick=1, unitsPerSecond=90000)
>>> t2 = TimelineOption(timelineSelector="urn:dvb:css:timeline:temi:1:1", unitsPerTick=1, unitsPerSecond=1000)
>>> print t1.timelineSelector, t1.unitsPerTick, t1.unitsPerSecond, t1.accuracy
urn:dvb:css:timeline:pts 1 90000 OMIT
.. code-block:: python
>>> cii = CII(presentationStatus="final", timelines=[t1, t2])
>>> cii.pack()
'{ "presentationStatus": "final",
"timelines": [ { "timelineProperties": {"unitsPerSecond": 90000, "unitsPerTick": 1},
"timelineSelector": "urn:dvb:css:timeline:pts"
},
{ "timelineProperties": {"unitsPerSecond": 1000, "unitsPerTick": 1},
"timelineSelector": "urn:dvb:css:timeline:temi:1:1"
}
]
}'
"""
import json
import re
import copy
import logging
from dvbcss.protocol.transformers import encodeOneOf, decodeOneOf
from dvbcss.protocol.transformers import Transformer
from dvbcss.protocol import OMIT
class CIITransformer(object):
class presentationStatus(object):
"""\
Transformer object for CII message presentationStatus with methods:
encode(value) : [ "primaryAspect", "secondary1", "secondary2" ... ] -> "primaryAspect secondary1 secondary2 ..."
decode(value) : "primaryAspect secondary1 secondary2 ..." -> "primaryAspect secondary1 secondary2"
Raises ValueError if input value is not expected form.
"""
@classmethod
def decode(cls,value):
if re.match("^(okay|transitioning|fault|[^ ]+)( [^ ]+)*$", value):
return list(value.split(" "))
else:
raise ValueError("Format of presentationStatus not recognised: "+str(value))
@classmethod
def encode(cls,value):
if isinstance(value,str):
raise ValueError("presentationStatus should have been a list of strings, not a single string.")
try:
return " ".join(value)
except:
raise ValueError("presentationStatus should have been a list of strings")
contentIdStatus = Transformer.matchOneOf("partial","final")
class TimelineOption(object):
def __init__(self, timelineSelector, unitsPerTick, unitsPerSecond, accuracy=OMIT, private=OMIT):
"""\
Object representing a CSS-CII Timeline Option used in the "timelines" property of a CII message.
**Initialisation takes the following parameters:**
:param str timelineSelector: The timeline selector
:param int unitsPerTick: Denominator of tick rate (in ticks per second) for the corresponding timeline
:param int unitsPerSecond: Numerator of tick rate (in ticks per second) for the corresponding timeline
:param accuracy: Optional indication of timeline accuracy
:type accuracy: :obj:`~dvbcss.protocol.OMIT` or :class:`float`
:param private: Optional private data.
:type private: :obj:`~dvbcss.protocol.OMIT` or :ref:`private-data`
It represents a timeline selector and the tick rate of the timeline
if that selector is used to request a timeline from the CSS-TS server.
It is carried in a :class:`list` in the :data:`~CII.timelines` property of a :class:`CII` message.
The tick rate of the timeline is expressed by the `unitsPerTick` and `unitsPerSecond` values.
The tick rate in ticks per second is equal to `unitsPerTick` / `unitsPerSecond`.
Accuracy and private data are optional, but the other fields are mandatory.
The attributes of the object have the same name as the relevant CII message properties:
* :data:`timelineSelector`
* :data:`unitsPerTick`
* :data:`unitsPerSecond`
* :data:`accuracy`
* :data:`private`
Converting to and from JSON representation is performed using the :func:`pack` method and :func:`unpack` class method.
Properties set to equal :data:`~dvbcss.protocol.OMIT` will be omitted when the message
is packed to a JSON representation.
"""
super(TimelineOption,self).__init__()
self.timelineSelector = timelineSelector #: (:class:`str`) The timeline selector
self.unitsPerTick = unitsPerTick #: (:class:`int`) The units per tick of the timeline
self.unitsPerSecond = unitsPerSecond #: (:class:`int`) The units per second of the timeline
self.accuracy = accuracy #: (:data:`~dvbcss.protocol.OMIT` or :class:`float`) The accuracy of the timeline with respect to the content in seconds.
self.private = private #: (read/write :obj:`~dvbcss.protocol.OMIT` or :ref:`private-data`) Optional private data.
"""\
(:data:`~dvbcss.protocol.OMIT` or :class:`list` of :class:`dict` )
Private data as a :class:`list` of :class:`dict` objects that can be converted to JSON by :func:`json.dumps`.
Each dict must contain at least a key called "type" with a URI string as its value.
"""
self.log = logging.getLogger("dvbcss.protocol.cii.TimelineOption")
@classmethod
def encode(cls,item):
"""Internal class method used by a :class:`CII` message object when packing to JSON format."""
return item._encode()
def _encode(self):
"""Internal method used by a :class:`CII` message object when packing to JSON format."""
struct = {}
struct["timelineSelector"] = self.timelineSelector
substruct={}
substruct["unitsPerTick"]= int(self.unitsPerTick)
substruct["unitsPerSecond"]= int(self.unitsPerSecond)
if self.accuracy != OMIT:
substruct["accuracy"]= float(self.accuracy)
if self.private != OMIT:
struct["private"] = encodeOneOf(self.private,"Not a valid private property.", Transformer.private)
struct["timelineProperties"] = substruct
return struct
def pack(self):
""":returns: string containing JSON presentation of this message."""
return json.dumps(self.encode())
@classmethod
def unpack(cls, msg):
"""\
Convert JSON string representation of this message encoded as a :class:`TimelineOption` object.
:throws ValueError: if not possible.
"""
struct = json.loads(msg)
return cls.decode(struct)
@classmethod
def decode(cls, struct):
"""Internal method used by a :class:`CII` message object when unpacking to JSON format."""
opt={}
if "private" in struct:
opt["private"] = decodeOneOf(struct["private"],"Not a valid private property.", Transformer.private)
substruct = struct["timelineProperties"]
if "accuracy" in substruct:
opt["accuracy"] = decodeOneOf(substruct["accuracy"], "Not a valid accuracy property.", Transformer.float)
try:
return TimelineOption(
timelineSelector = struct["timelineSelector"],
unitsPerTick=substruct["unitsPerTick"],
unitsPerSecond=substruct["unitsPerSecond"],
**opt)
except KeyError:
raise ValueError("Not all fields in TimelineOption present as expected")
def __str__(self):
return self.__repr__()
def __repr__(self):
return 'TimelineOption(timelineSelector="%s", unitsPertick=%d, unitsPerSecond=%d, accuracy=%s private=%s)' %\
(self.timelineSelector, self.unitsPerTick, self.unitsPerSecond, self.accuracy, repr(self.private))
def __eq__(self, other):
"""Equality test, returns true if all properties are equal"""
return self.timelineSelector == other.timelineSelector and \
self.unitsPerTick == other.unitsPerTick and \
self.unitsPerSecond == other.unitsPerSecond and \
self.accuracy == other.accuracy and \
self.private == other.private
def __deepcopy__(self,memo):
properties={}
for name in ["timelineSelector", "unitsPerTick","unitsPerSecond","accuracy","private"]:
original = getattr(self,name)
if original != OMIT:
properties[name] = copy.deepcopy(original,memo)
return TimelineOption(**properties)
class CII(object):
# map of property names to the list of transformer objects used to encode or decode
# the various forms each argument can take.
_propertyTransform = {
"protocolVersion" : [Transformer.null, Transformer.matchOneOf("1.1")],
"mrsUrl" : [Transformer.null, Transformer.uriString],
"contentId" : [Transformer.null, Transformer.uriString],
"contentIdStatus" : [Transformer.null, CIITransformer.contentIdStatus],
"presentationStatus" : [Transformer.null, CIITransformer.presentationStatus],
"wcUrl" : [Transformer.null, Transformer.uriString],
"tsUrl" : [Transformer.null, Transformer.uriString],
"teUrl" : [Transformer.null, Transformer.uriString],
"timelines" : [Transformer.null, Transformer.listOf(TimelineOption)],
"private" : [Transformer.null, Transformer.private],
}
def __init__(self,**kwargs):
"""\
Object representing a CII message used in the CSS-CII protocol.
**Initialisation takes the following parameters, all of which are optional
keyword arguments that default to** :obj:`~dvbcss.protocol.OMIT` :
:param protocolVersion: The protocol version being used by the server.
:param mrsUrl: The URL of an MRS server known to the server.
:param contentId: Content identifier URI.
:param contentIdStatus: Content identifier status.
:param presentationStatus: Presentation status as a :class:`list` of one or more strings, e.g. ``[ "okay" ]``
:param wcUrl: CSS-WC server endpoint URL in the form "udp://<host>:<port>"
:param tsUrl: CSS-TS server endpoint WebSocket URL
:param teUrl: CSS-TE server endpoint WebSocket URL
:param timelines: List of timeline options.
:param private: Private data.
:type protocolVersion: :data:`~dvbcss.protocol.OMIT` or "1.1"
:type mrsUrl: :data:`~dvbcss.protocol.OMIT` or :class:`str`
:type contentId: :data:`~dvbcss.protocol.OMIT` or :class:`str`
:type contentIdStatus: :data:`~dvbcss.protocol.OMIT` or "partial" or "final"
:type presentationStatus: :data:`~dvbcss.protocol.OMIT` or :class:`list` of :class:`str`
:type wcUrl: :data:`~dvbcss.protocol.OMIT` or :class:`str`
:type tsUrl: :data:`~dvbcss.protocol.OMIT` or :class:`str`
:type teUrl: :data:`~dvbcss.protocol.OMIT` or :class:`str`
:type timelines: :data:`~dvbcss.protocol.OMIT` or :class:`list` of :class:`TimelineOption`
:type private: :data:`~dvbcss.protocol.OMIT` or :ref:`private-data`
The attributes of the object have the same name as the CII message properties:
* :data:`protocolVersion`
* :data:`mrsUrl`
* :data:`contentId`
* :data:`contentIdStatus`
* :data:`presentationStatus`
* :data:`wcUrl`
* :data:`tsUrl`
* :data:`teUrl`
* :data:`timelines`
* :data:`private`
Properties are accessed as attributes of this object using the same name as their JSON property name.
Converting to and from JSON representation is performed using the :func:`pack` method and :func:`unpack` class method.
Properties set to equal :data:`~dvbcss.protocol.OMIT` will be omitted when the message
is packed to a JSON representation.
"""
super(CII,self).__init__()
self.log = logging.getLogger("dvbcss.protocol.cii.CII")
self.protocolVersion = OMIT #: (read/write :data:`~dvbcss.protocol.OMIT` or "1.1") The protocol version being used by the server.
self.mrsUrl = OMIT #: (read/write :data:`~dvbcss.protocol.OMIT` or :class:`str`) The URL of an MRS server known to the server
self.contentId = OMIT #: (read/write :data:`~dvbcss.protocol.OMIT` or :class:`str`) Content identifier (URL)
self.contentIdStatus = OMIT #: (read/write :data:`~dvbcss.protocol.OMIT` or "partial" or "final") Content identifier status
self.presentationStatus = OMIT #: (read/write :data:`~dvbcss.protocol.OMIT` or :class:`list` of :class:`str`) Presentation status, e.g. ``[ "okay" ]``
self.wcUrl = OMIT #: (read/write :data:`~dvbcss.protocol.OMIT` or :class:`str`) CSS-WC server endpoint URL in form "udp://<host>:<port>"
self.tsUrl = OMIT #: (read/write :data:`~dvbcss.protocol.OMIT` or :class:`str`) CSS-TS server endpoint WebSocket URL
self.teUrl = OMIT #: (read/write :data:`~dvbcss.protocol.OMIT` or :class:`str`) CSS-TE server endpoint WebSocket URL
self.timelines = OMIT #: (read/write :data:`~dvbcss.protocol.OMIT` or :class:`list`(:class:`TimelineOption`)) Timeline options
self.private = OMIT #: (read/write :obj:`~dvbcss.protocol.OMIT` or :ref:`private-data`) Optional private data.
"""\
(:data:`~dvbcss.protocol.OMIT` or :class:`list` of :class:`dict` )
Private data as a :class:`list` of :class:`dict` objects that can be converted
to JSON by :func:`json.dumps`.
Each dict must contain at least a key called "type" with a URI string as its value.
"""
for key in kwargs:
if key in self._propertyTransform:
setattr(self, key, kwargs[key])
else:
raise ValueError("Unrecognised property name provided as parameter: "+key)
def pack(self):
"""\
:returns: string containing JSON representation of this message.
:throws ValueError: if there are values for properties that are not permitted.
"""
struct = {}
for name in self._propertyTransform:
value=getattr(self, name)
if value != OMIT:
transformers=self._propertyTransform[name]
struct[name]= encodeOneOf(value, "Value of "+name+" property not valid.", *transformers)
return json.dumps(struct)
@classmethod
def unpack(cls, msg):
"""\
Convert JSON string representation of this message encoded as a :class:`CII` object.
:throws ValueError: if not possible.
"""
struct = json.loads(msg)
kwargs={}
for name in cls._propertyTransform:
if name in struct:
value=struct[name]
transformers=cls._propertyTransform[name]
kwargs[name] = decodeOneOf(value, "Value of "+name+" property not valid.", *transformers)
return CII(**kwargs)
def __str__(self):
return self.__repr__()
def __repr__(self):
return "CII(" + ", ".join(
[ key+"="+repr(getattr(self, key))
for key in
filter(lambda k:getattr(self,k) != OMIT,
self._propertyTransform.keys()
)
]
)+")"
def copy(self):
""":returns: a copy of this CII object. The copy is a deep copy."""
properties={}
for name in self.definedProperties():
original=getattr(self,name)
if original != OMIT:
properties[name] = copy.deepcopy(original)
return CII(**properties)
@classmethod
def diff(cls, old, new):
"""\
:param old: (:class:`~dvbcss.protocol.cii.CII`) A CII object
:param new: (:class:`~dvbcss.protocol.cii.CII`) A CII object
:Returns: CII object representing changes from old to new CII objects.
If in the new CII object a property is OMITted, it property won't appear in the returned CII object that represents the changes.
If in the old CII object a property is OMITted, but it has a non-omitted value in the new object, then it is assumed
to be a change.
"""
changes=CII()
for name in cls._propertyTransform:
if getattr(old,name) != getattr(new,name) and getattr(new,name) != OMIT:
setattr(changes, name, getattr(new, name))
return changes
def definedProperties(self):
"""Returns a list of the names of properties whose value is not OMIT"""
return [name for name in self._propertyTransform if getattr(self,name) != OMIT]
@classmethod
def allProperties(cls):
"""Returns a list of all property names, whether OMITted or not"""
return [name for name in cls._propertyTransform]
def update(self, diff):
"""\
Updates this CII object with the values of any properties (that are not omitted) in the CII object provided as the `diff` argument.
Note that this changes this object.
:param diff: (:class:`~dvbcss.protocol.cii.CII`) A CII object whose properties (that are not omitted) will be used to update this CII object.
"""
for name in self._propertyTransform:
value = getattr(diff,name)
if value != OMIT:
setattr(self,name,value)
def combine(self, diff):
"""\
Copies this CII object, and updates that copy with any properties (that are not omitted) in the CII object supplied as the `diff` argument.
The updated copy is then returned.
:param diff: (:class:`~dvbcss.protocol.cii.CII`) A CII object whose properties (that are not omitted) will be used to update the copy before it is returned.
new = old.combine(diff) is equivalent to the following operations:
.. code-block:: python
new = old.copy()
new.update(diff)
"""
new=self.copy()
new.update(diff)
return new
__all__ = [
"CII",
"TimelineOption",
]
| {
"content_hash": "9f710648b356eefc51d5f0b8eb7416d6",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 170,
"avg_line_length": 44.625,
"alnum_prop": 0.6120193531958238,
"repo_name": "bbc/pydvbcss",
"id": "88505f12ae89a3265133980ee9423e4fddf7859f",
"size": "20259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dvbcss/protocol/cii.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "411110"
}
],
"symlink_target": ""
} |
from lib.RequestValidator import RequestValidator
class Test(RequestValidator):
rules = {
'request' : {
'method' : 'GET',
'protocol' : 'HTTPS',
'dns' : 'api.coupondunia.in',
'path' : '/timestamp',
'headers' : {
'custom_header': 'additional_header'
},
'content_type': None
},
'response':{
'content_type': 'json', # json, xml
'expected_response' : {
'timestamp' : '\d+',
},
'headers': {}
}
# 'content_type' : 'application/json'
}
def __init__(self, configuration, logger):
super().__init__(configuration, logger)
def additional_header(self):
return {
'test':'test'
}
| {
"content_hash": "0f3e72911de335ffef09a5f496779280",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 52,
"avg_line_length": 22.54054054054054,
"alnum_prop": 0.44364508393285373,
"repo_name": "ratanphayade/APIValidator",
"id": "c2788c3c452f2a0f3f614203486ede1fcb02757a",
"size": "834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Endpoints/Test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28962"
}
],
"symlink_target": ""
} |
from unittest import TestCase
import json
import os
from werkzeug.wrappers import BaseResponse
from werkzeug.test import Client
from rxjson import Rx
from rest_api_framework.controllers import WSGIDispatcher
from .app import ApiApp
HERE = os.path.dirname(os.path.abspath(__file__))
class TestSpore(TestCase):
def test_spore(self):
client = Client(WSGIDispatcher([ApiApp], name='ApiApp', version='1.0',
base_url='http://apiapp.com'),
response_wrapper=BaseResponse)
resp = client.get("/spore/")
self.assertEqual(resp.status_code, 200)
spore = json.loads(resp.data)
# basic fields
self.assertEqual(spore['name'], 'ApiApp')
self.assertEqual(spore['base_url'], 'http://apiapp.com')
self.assertEqual(spore['version'], '1.0')
# methods
self.assertIn('list_address', spore['methods'])
self.assertEqual('/address/:identifier/',
spore['methods']['get_address']['path'])
self.assertIn(
'identifier',
spore['methods']['get_address']['required_params'])
def test_rxjson_spore(self):
rx = Rx.Factory({'register_core_types': True})
client = Client(WSGIDispatcher([ApiApp], name='ApiApp', version='1.0',
base_url='http://apiapp.com'),
response_wrapper=BaseResponse)
resp = client.get("/spore/")
with open(os.path.join(HERE, 'spore_validation.rx')) as f:
spore_json_schema = json.loads(f.read())
spore_schema = rx.make_schema(spore_json_schema)
self.assertTrue(spore_schema.check(json.loads(resp.data)))
| {
"content_hash": "f02b4a05294d741e14fff48a7bf5274f",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 78,
"avg_line_length": 35.61224489795919,
"alnum_prop": 0.5925501432664756,
"repo_name": "boblefrag/python-rest-api-framework",
"id": "5c7458c663c3eee047f489944b0b0f77dab0671b",
"size": "1745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_spore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "83"
},
{
"name": "Python",
"bytes": "90887"
}
],
"symlink_target": ""
} |
from federation_api.application.helper import StarFleetsHelper
class PeopleHelper(StarFleetsHelper):
pass
| {
"content_hash": "8d609f069f3d4fe74dc63ea9eaac5a43",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 62,
"avg_line_length": 22.4,
"alnum_prop": 0.8303571428571429,
"repo_name": "practo/federation",
"id": "56b55f36411ab4f03fa2eee6184e6eb431feee44",
"size": "112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "federation_api/people/helper/people_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "44327"
}
],
"symlink_target": ""
} |
"""Base class for the Project Euler problems in Python"""
##@file
#@ingroup
#
# @brief
# Base class for the Project Euler problems in Python
#
#
# @version $$
# @details
#
# @b Description
# This module implements a base class, that can be used
# for all project eurler solutions.
#
# @b Usage
# Usage:
# Extend this class to implement a test
import math
import time
import sys
class ProblemBaseClass(object):
"""
@class
@brief
"""
def __init__(self):
self.result = None
def label(self):
self.label = "No label"
def setup(self):
"""setup method for all solutions
Override to:
- process options, etc.
"""
def initiate(self, *_args, **_kwargs):
"""initiate method for all solutions
Override to:
- load data
- initiate connections to Spark
"""
def compute(self, *_args, **_kwargs):
"""@method compute the solution
Override to:
- implement a solution for the given problem.
Must be implemented
"""
return None
def teardown(self):
"""@method teardown method for all solutions
Override to:
- tidy up
Must be implemented
"""
def isPrime(self, number):
"""Returns True is number is prime, False otherwise"""
# This algorithm checks if the given number can be divided by integers of the form 6k +/- 1
# see: http://en.wikipedia.org/wiki/Primality_test#Naive_methods
if number <= 3:
return number > 1
if number % 2 == 0 or number % 3 == 0:
return False
for i in range(5, int(number ** 0.5) + 1, 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
if __name__ == '__main__':
problem = problem1()
problem.compute()
print problem.result
| {
"content_hash": "ee4e4f1e0b0d06d6e81448a87ccf958d",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 99,
"avg_line_length": 23.65432098765432,
"alnum_prop": 0.5652400835073069,
"repo_name": "jakubczaplicki/projecteuler",
"id": "30f2cf89b0ad5a516e768dc0436b47dcd532e78f",
"size": "1938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problembaseclass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "538"
},
{
"name": "Python",
"bytes": "31787"
}
],
"symlink_target": ""
} |
from io import BytesIO
from .compat import py2to3
from .platform import IRONPYTHON
from .robottypes import is_string
_ERROR = 'No valid ElementTree XML parser module found'
if not IRONPYTHON:
try:
from xml.etree import cElementTree as ET
except ImportError:
try:
import cElementTree as ET
except ImportError:
try:
from xml.etree import ElementTree as ET
except ImportError:
try:
from elementtree import ElementTree as ET
except ImportError:
raise ImportError(_ERROR)
else:
# Cannot use standard ET available on IronPython because it is broken
# both in 2.7.0 and 2.7.1:
# http://ironpython.codeplex.com/workitem/31923
# http://ironpython.codeplex.com/workitem/21407
try:
from elementtree import ElementTree as ET
except ImportError:
raise ImportError(_ERROR)
from StringIO import StringIO
# cElementTree.VERSION seems to always be 1.0.6. We want real API version.
if ET.VERSION < '1.3' and hasattr(ET, 'tostringlist'):
ET.VERSION = '1.3'
@py2to3
class ETSource(object):
def __init__(self, source):
self._source = source
self._opened = None
def __enter__(self):
self._opened = self._open_source_if_necessary()
return self._opened or self._source
def __exit__(self, exc_type, exc_value, exc_trace):
if self._opened:
self._opened.close()
def __unicode__(self):
if self._source_is_file_name():
return self._source
if hasattr(self._source, 'name'):
return self._source.name
return '<in-memory file>'
def _source_is_file_name(self):
return is_string(self._source) \
and not self._source.lstrip().startswith('<')
def _open_source_if_necessary(self):
if self._source_is_file_name() or not is_string(self._source):
return None
if IRONPYTHON:
return StringIO(self._source)
return BytesIO(self._source.encode('UTF-8'))
| {
"content_hash": "acf3708fad4e0475df0d60e246c2c39a",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 74,
"avg_line_length": 29.08219178082192,
"alnum_prop": 0.6090438059349976,
"repo_name": "joongh/robotframework",
"id": "5fbd81940fd798fb10b9f31e45625b41e301b5b5",
"size": "2767",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/robot/utils/etreewrapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "245"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140926"
},
{
"name": "Java",
"bytes": "57497"
},
{
"name": "JavaScript",
"bytes": "160797"
},
{
"name": "Python",
"bytes": "2209566"
},
{
"name": "RobotFramework",
"bytes": "2048926"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
} |
"""
81. Search in Rotated Sorted Array II
Follow up for "Search in Rotated Sorted Array":
What if duplicates are allowed?
Would this affect the run-time complexity? How and why?
Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
Write a function to determine if a given target is in the array.
The array may contain duplicates.
http://blog.csdn.net/aliceyangxi1987/article/details/50560697
有重复的话,多了一个判断条件就是三点相等时,左右端点同时变化
影响就是,如果在重复中间截断逆转,
之后再用 nums[start]<=target<nums[mid] 去判断,就找不到这个target
"""
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: bool
"""
start = 0
end = len(nums) - 1
while start <= end:
mid = (start + end) / 2
if nums[mid] == target:
return True
if nums[mid] == nums[start] == nums[end]:
start += 1
end -= 1
elif nums[start] <= nums[mid]:
if nums[start] <= target <= nums[mid]:
end = mid - 1
else:
start = mid + 1
else:
if nums[mid] <= target <= nums[end]:
start = mid + 1
else:
end = mid - 1
return False
if __name__ == '__main__':
print Solution().search([3, 1, 1, 2], 2)
| {
"content_hash": "a6e3c07f21122688a36b7017f22f8485",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 94,
"avg_line_length": 24.883333333333333,
"alnum_prop": 0.5311453449430676,
"repo_name": "gengwg/leetcode",
"id": "77eed413cc8ed583509659b1b9de7d388ae53ce1",
"size": "1639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "081_search_rotated_sorted_array_ii.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "779"
},
{
"name": "Python",
"bytes": "627348"
},
{
"name": "SQLPL",
"bytes": "779"
},
{
"name": "Shell",
"bytes": "4149"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'UCSC TumorMap'
copyright = u'2018, UCSC TumorMap developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# lighter themes: alabaster, nature, pyramid, sphinxdoc, default
html_theme = 'agogo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_options = {
'headerfont': '"Helvetica Neue", Helvetica, Arial, sans-serif',
'bodyfont': '"Helvetica Neue", Helvetica, Arial, sans-serif',
'headerlinkcolor': '#05366B',
'linkcolor': '#05366B',
'textalign': 'left',
'headerbg': 'linear-gradient(#05366B, #dddddd)',
'footerbg': 'linear-gradient(#dddddd, #05366B)',
#'bgcolor': '#dddddd', # broken
#'headercolor1': '#dddddd', # broken
#'headercolor2': '#dddddd', # broken
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> Documentation".
html_title = "TumorMap Developer Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "TumorMap Developer Documentation"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TumorMapdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TumorMap.tex', u'TumorMap Documentation',
u'TumorMap developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tumormap', u'TumorMap Documentation',
[u'TumorMap developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TumorMap', u'TumorMap Documentation',
u'TumorMap developers', 'TumorMap', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "0017c162b60ac7f93825ceb779fd47f1",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 79,
"avg_line_length": 33.38181818181818,
"alnum_prop": 0.6992374727668845,
"repo_name": "ucscHexmap/hexagram",
"id": "e79fa1fd5c7f9a622749bb467a84ec11d41a7640",
"size": "9601",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/devSource/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "93035"
},
{
"name": "HTML",
"bytes": "439112"
},
{
"name": "JavaScript",
"bytes": "1062507"
},
{
"name": "Shell",
"bytes": "18313"
}
],
"symlink_target": ""
} |
from zaqar.tests.functional import base
class TestVersions(base.FunctionalTestBase):
"""Tests for Versions Resource."""
server_class = base.ZaqarServer
def setUp(self):
super(TestVersions, self).setUp()
self.base_url = "{url}/".format(url=self.cfg.zaqar.url)
self.client.set_base_url(self.base_url)
def test_get_versions_without_headers(self):
result = self.client.get('', headers={})
self.assertIn("versions", result.json())
def test_get_versions_with_headers(self):
result = self.client.get('')
self.assertIn("versions", result.json())
| {
"content_hash": "5610cae73a6ce2fc29d66cc54ce7f3fd",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 63,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.6505636070853462,
"repo_name": "openstack/zaqar",
"id": "66540aeb16401084c46b380bc03773d0f5dd9d3e",
"size": "1204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zaqar/tests/functional/wsgi/test_versions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "5002"
},
{
"name": "HTML",
"bytes": "22106"
},
{
"name": "Lua",
"bytes": "4555"
},
{
"name": "Mako",
"bytes": "952"
},
{
"name": "NASL",
"bytes": "15981"
},
{
"name": "Python",
"bytes": "1912931"
},
{
"name": "Shell",
"bytes": "20061"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.