text stringlengths 4 1.02M | meta dict |
|---|---|
import sys
from pathlib import Path
from typing import Any, List, Optional, Tuple
from airflow_breeze import global_constants
from airflow_breeze.console import console
from airflow_breeze.global_constants import BUILD_CACHE_DIR
def check_if_cache_exists(param_name: str) -> bool:
return (Path(BUILD_CACHE_DIR) / f".{param_name}").exists()
def read_from_cache_file(param_name: str) -> Optional[str]:
cache_exists = check_if_cache_exists(param_name)
if cache_exists:
return (Path(BUILD_CACHE_DIR) / f".{param_name}").read_text().strip()
else:
return None
def touch_cache_file(param_name: str):
(Path(BUILD_CACHE_DIR) / f".{param_name}").touch()
def write_to_cache_file(param_name: str, param_value: str, check_allowed_values: bool = True) -> None:
allowed = False
if check_allowed_values:
allowed, allowed_values = check_if_values_allowed(param_name, param_value)
if allowed or not check_allowed_values:
print('BUID CACHE DIR:', BUILD_CACHE_DIR)
Path(BUILD_CACHE_DIR, f".{param_name}").write_text(param_value)
else:
console.print(f'[cyan]You have sent the {param_value} for {param_name}')
console.print(f'[cyan]Allowed value for the {param_name} are {allowed_values}')
console.print('[cyan]Provide one of the supported params. Write to cache dir failed')
sys.exit()
def check_cache_and_write_if_not_cached(
param_name: str, default_param_value: str
) -> Tuple[bool, Optional[str]]:
is_cached = False
allowed = False
cached_value = read_from_cache_file(param_name)
if cached_value is None:
write_to_cache_file(param_name, default_param_value)
cached_value = default_param_value
else:
allowed, allowed_values = check_if_values_allowed(param_name, cached_value)
if allowed:
is_cached = True
else:
write_to_cache_file(param_name, default_param_value)
cached_value = default_param_value
return is_cached, cached_value
def check_if_values_allowed(param_name: str, param_value: str) -> Tuple[bool, List[Any]]:
allowed = False
allowed_values: List[Any] = []
allowed_values = getattr(global_constants, f'ALLOWED_{param_name.upper()}')
if param_value in allowed_values:
allowed = True
return allowed, allowed_values
def delete_cache(param_name: str) -> bool:
deleted = False
if check_if_cache_exists(param_name):
(Path(BUILD_CACHE_DIR) / f".{param_name}").unlink()
deleted = True
return deleted
| {
"content_hash": "a32970deb090b1a85c5bf30f8ccf8322",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 102,
"avg_line_length": 35.16438356164384,
"alnum_prop": 0.666536813400857,
"repo_name": "mistercrunch/airflow",
"id": "bc00b1fe0ea650203ff327e52f7a3d090b4b3a70",
"size": "3353",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dev/breeze/src/airflow_breeze/cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
} |
"""Schema processing for discovery based APIs
Schemas holds an APIs discovery schemas. It can return those schema as
deserialized JSON objects, or pretty print them as prototype objects that
conform to the schema.
For example, given the schema:
schema = \"\"\"{
"Foo": {
"type": "object",
"properties": {
"etag": {
"type": "string",
"description": "ETag of the collection."
},
"kind": {
"type": "string",
"description": "Type of the collection ('calendar#acl').",
"default": "calendar#acl"
},
"nextPageToken": {
"type": "string",
"description": "Token used to access the next
page of this result. Omitted if no further results are available."
}
}
}
}\"\"\"
s = Schemas(schema)
print s.prettyPrintByName('Foo')
Produces the following output:
{
"nextPageToken": "A String", # Token used to access the
# next page of this result. Omitted if no further results are available.
"kind": "A String", # Type of the collection ('calendar#acl').
"etag": "A String", # ETag of the collection.
},
The constructor takes a discovery document in which to look up named schema.
"""
# TODO(jcgregorio) support format, enum, minimum, maximum
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
class Schemas(object):
"""Schemas for an API."""
def __init__(self, discovery):
"""Constructor.
Args:
discovery: object, Deserialized discovery document from which we pull
out the named schema.
"""
self.schemas = discovery.get('schemas', {})
# Cache of pretty printed schemas.
self.pretty = {}
def _prettyPrintByName(self, name, seen=None, dent=0):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
if name in seen:
# Do not fall into an infinite loop over recursive definitions.
return '# Object with schema name: %s' % name
seen.append(name)
if name not in self.pretty:
self.pretty[name] = _SchemaToStruct(self.schemas[name],
seen, dent).to_str(self._prettyPrintByName)
seen.pop()
return self.pretty[name]
def prettyPrintByName(self, name):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
def _prettyPrintSchema(self, schema, seen=None, dent=0):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
return _SchemaToStruct(schema, seen, dent).to_str(self._prettyPrintByName)
def prettyPrintSchema(self, schema):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintSchema(schema, dent=1)[:-2]
def get(self, name):
"""Get deserialized JSON schema from the schema name.
Args:
name: string, Schema name.
"""
return self.schemas[name]
class _SchemaToStruct(object):
"""Convert schema to a prototype object."""
def __init__(self, schema, seen, dent=0):
"""Constructor.
Args:
schema: object, Parsed JSON schema.
seen: list, List of names of schema already seen while parsing. Used to
handle recursive definitions.
dent: int, Initial indentation depth.
"""
# The result of this parsing kept as list of strings.
self.value = []
# The final value of the parsing.
self.string = None
# The parsed JSON schema.
self.schema = schema
# Indentation level.
self.dent = dent
# Method that when called returns a prototype object for the schema with
# the given name.
self.from_cache = None
# List of names of schema already seen while parsing.
self.seen = seen
def emit(self, text):
"""Add text as a line to the output.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text, '\n'])
def emitBegin(self, text):
"""Add text to the output, but with no line terminator.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text])
def emitEnd(self, text, comment):
"""Add text and comment to the output with line terminator.
Args:
text: string, Text to output.
comment: string, Python comment.
"""
if comment:
divider = '\n' + ' ' * (self.dent + 2) + '# '
lines = comment.splitlines()
lines = [x.rstrip() for x in lines]
comment = divider.join(lines)
self.value.extend([text, ' # ', comment, '\n'])
else:
self.value.extend([text, '\n'])
def indent(self):
"""Increase indentation level."""
self.dent += 1
def undent(self):
"""Decrease indentation level."""
self.dent -= 1
def _to_str_impl(self, schema):
"""Prototype object based on the schema, in Python code with comments.
Args:
schema: object, Parsed JSON schema file.
Returns:
Prototype object based on the schema, in Python code with comments.
"""
stype = schema.get('type')
if stype == 'object':
self.emitEnd('{', schema.get('description', ''))
self.indent()
for pname, pschema in schema.get('properties', {}).iteritems():
self.emitBegin('"%s": ' % pname)
self._to_str_impl(pschema)
self.undent()
self.emit('},')
elif '$ref' in schema:
schemaName = schema['$ref']
description = schema.get('description', '')
s = self.from_cache(schemaName, self.seen)
parts = s.splitlines()
self.emitEnd(parts[0], description)
for line in parts[1:]:
self.emit(line.rstrip())
elif stype == 'boolean':
value = schema.get('default', 'True or False')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'string':
value = schema.get('default', 'A String')
self.emitEnd('"%s",' % str(value), schema.get('description', ''))
elif stype == 'integer':
value = schema.get('default', '42')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'number':
value = schema.get('default', '3.14')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'null':
self.emitEnd('None,', schema.get('description', ''))
elif stype == 'any':
self.emitEnd('"",', schema.get('description', ''))
elif stype == 'array':
self.emitEnd('[', schema.get('description'))
self.indent()
self.emitBegin('')
self._to_str_impl(schema['items'])
self.undent()
self.emit('],')
else:
self.emit('Unknown type! %s' % stype)
self.emitEnd('', '')
self.string = ''.join(self.value)
return self.string
def to_str(self, from_cache):
"""Prototype object based on the schema, in Python code with comments.
Args:
from_cache: callable(name, seen), Callable that retrieves an object
prototype for a schema with the given name. Seen is a list of schema
names already seen as we recursively descend the schema definition.
Returns:
Prototype object based on the schema, in Python code with comments.
The lines of the code will all be properly indented.
"""
self.from_cache = from_cache
return self._to_str_impl(self.schema)
| {
"content_hash": "d6e15b1e5f87bbca58342ccc91e81850",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 79,
"avg_line_length": 29.24125874125874,
"alnum_prop": 0.6258519669974889,
"repo_name": "gdg-garage/knihovna-db",
"id": "915f5f4811dc761b55336cae217649c5b97373b0",
"size": "8944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "knihovna-server/third_party/apiclient/schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "152002"
},
{
"name": "Dart",
"bytes": "22745"
},
{
"name": "Python",
"bytes": "44798"
},
{
"name": "Ruby",
"bytes": "5940"
},
{
"name": "Shell",
"bytes": "324"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages, Extension
from codecs import open
from os import path
# sum up:
# mktmpenv (Python version should not matter)
# pip install numpy cython pypandoc
# python setup.py sdist
# twine upload dist/blabla.tar.gz [-r testpypi]
try:
import numpy as np
except ImportError:
exit('Please install numpy>=1.11.2 first.')
try:
from Cython.Build import cythonize
from Cython.Distutils import build_ext
except ImportError:
USE_CYTHON = False
else:
USE_CYTHON = True
__version__ = '1.0.3'
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file and convert it to rst
try:
import pypandoc
long_description = pypandoc.convert(path.join(here, 'README.md'), 'rst')
except(IOError, ImportError):
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# get the dependencies and installs
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')]
cmdclass = {}
ext = '.pyx' if USE_CYTHON else '.c'
extensions = [Extension('surprise.similarities',
['surprise/similarities' + ext],
include_dirs=[np.get_include()]),
Extension('surprise.prediction_algorithms.matrix_factorization',
['surprise/prediction_algorithms/matrix_factorization' + ext],
include_dirs=[np.get_include()]),
Extension('surprise.prediction_algorithms.optimize_baselines',
['surprise/prediction_algorithms/optimize_baselines' + ext],
include_dirs=[np.get_include()]),
Extension('surprise.prediction_algorithms.slope_one',
['surprise/prediction_algorithms/slope_one' + ext],
include_dirs=[np.get_include()]),
Extension('surprise.prediction_algorithms.co_clustering',
['surprise/prediction_algorithms/co_clustering' + ext],
include_dirs=[np.get_include()]),
]
if USE_CYTHON:
ext_modules = cythonize(extensions)
cmdclass.update({'build_ext': build_ext})
else:
ext_modules = extensions
setup(
name='scikit-surprise',
author='Nicolas Hug',
author_email='contact@nicolas-hug.com',
description=('An easy-to-use library for recommender systems.'),
long_description=long_description,
version=__version__,
url='http://surpriselib.com',
license='GPLv3+',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
],
keywords='recommender recommendation system',
packages=find_packages(exclude=['tests*']),
include_package_data=True,
ext_modules = ext_modules,
cmdclass=cmdclass,
install_requires=install_requires,
dependency_links=dependency_links,
entry_points={'console_scripts':
['surprise = surprise.__main__:main']},
)
| {
"content_hash": "05bfa563bf6cdd90e36301ae89850ed0",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 92,
"avg_line_length": 33.66019417475728,
"alnum_prop": 0.6325353331410442,
"repo_name": "charmoniumQ/Surprise",
"id": "7fb85916820f94b01cad8d2aeedc2e22d0cecf86",
"size": "3467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "177820"
}
],
"symlink_target": ""
} |
import string
from textwrap import wrap
MIN = 1
UNBIASED = 2
def display_table(rows, # List of tuples of data
headings=[], # Optional headings for columns
col_widths=[], # Column widths
col_justs=[], # Column justifications (str.ljust, etc)
screen_width=80, # Width of terminal
col_spacer=2, # Space between columns
fill_char=' ', # Fill character
col_sep='=', # Separator char
row_term='\n', # row terminator (could be <br />)
norm_meth=MIN, # Screen width normailization method
):
_col_justs = list(col_justs)
_col_widths = list(col_widths)
# String-ify everything
rows = [tuple((str(col) for col in row)) for row in rows]
# Compute appropriate col_widths if not given
if not col_widths:
if headings:
_col_widths = [max(row) for row in (map(len, col)
for col in zip(headings, *rows))]
else:
_col_widths = [max(row) for row in (map(len, col)
for col in zip(*rows))]
num_cols = len(_col_widths)
col_spaces = col_spacer * (num_cols - 1)
# Compute the size a row in our table would be in chars
def _get_row_size(cw):
return sum(cw) + col_spaces
row_size = _get_row_size(_col_widths)
def _unbiased_normalization():
""" Normalize keeping the ratio of column sizes the same """
__col_widths = [int(col_width *
(float(screen_width - col_spaces) / row_size))
for col_width in _col_widths]
# Distribute epsilon underage to the the columns
for x in xrange(screen_width - _get_row_size(__col_widths)):
__col_widths[x % num_cols] += 1
return __col_widths
def _min_normalization():
""" Bring all columns up to the minimum """
__col_widths = _unbiased_normalization()
# A made up heuristic -- hope it looks good
col_min = int(0.5 * min(row_size, screen_width) / float(num_cols))
# Bring all the columns up to the minimum
norm_widths = []
for col_width, org_width in zip(__col_widths, _col_widths):
if col_width < col_min:
col_width = min(org_width, col_min)
norm_widths.append(col_width)
# Distribute epsilon overage to the the columns
count = _get_row_size(norm_widths) - screen_width
x = 0
while count > 0:
if norm_widths[x % num_cols] > col_min:
norm_widths[x % num_cols] -= 1
count -= 1
x += 1
return norm_widths
if not col_widths:
# Normalize columns to screen size
if row_size > screen_width:
if norm_meth is UNBIASED:
_col_widths = _unbiased_normalization()
else:
_col_widths = _min_normalization()
row_size = _get_row_size(_col_widths)
# If col_justs are not specified then guess the justification from
# the appearence of the first row of data
# Numbers and money are right justified, alpha beginning strings are left
if not _col_justs:
for col_datum in rows[0]:
if isinstance(col_datum, str):
if col_datum.startswith(tuple(string.digits + '$')):
_col_justs.append(str.rjust)
else:
_col_justs.append(str.ljust)
else:
_col_justs.append(str.rjust)
# Calculate the minimum screen width needed based on col_spacer and number
# of columns
min_screen_width = num_cols + col_spaces
assert screen_width >= min_screen_width, "Screen Width is set too small, must be >= %d" % min_screen_width
row_size = _get_row_size(_col_widths)
def _display_wrapped_row(row, heading=False):
""" Take a row, wrap it, and then display in proper tabular format
"""
wrapped_row = [wrap(col_datum, col_width)
for col_datum, col_width in zip(row, _col_widths)]
row_lines = []
for cols in map(None, *wrapped_row):
if heading:
partial = (str.center((partial_col or ''), col_width, fill_char)
for partial_col, col_width in zip(cols, _col_widths))
else:
partial = (col_just((partial_col or ''), col_width, fill_char)
for partial_col, col_width, col_just in zip(cols,
_col_widths,
_col_justs))
row_lines.append((fill_char * col_spacer).join(partial))
print row_term.join(row_lines)
if headings:
# Print out the headings
_display_wrapped_row(headings, heading=True)
# Print separator
print col_sep * row_size
# Print out the rows of data
for row in rows:
_display_wrapped_row(row)
| {
"content_hash": "aa73ae2c879d66914e26b4624cae7d29",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 110,
"avg_line_length": 37.56521739130435,
"alnum_prop": 0.5302854938271605,
"repo_name": "ActiveState/code",
"id": "9a77c483576aeeb361726d2615c1c8b2300ab5cd",
"size": "5184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/519618_Robust_Textual_Tables/recipe-519618.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
try:
# Try using ez_setup to install setuptools if not already installed.
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
# Ignore import error and assume Python 3 which already has setuptools.
pass
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
from setuptools import setup, find_packages
classifiers = ['Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: System :: Hardware']
setup(name = 'Adafruit_BMP',
version = '1.5.4',
author = 'Tony DiCola',
author_email = 'tdicola@adafruit.com',
description = 'Library for accessing the BMP series pressure and temperature sensors like the BMP085/BMP180 on a Raspberry Pi or Beaglebone Black.',
license = 'MIT',
classifiers = classifiers,
url = 'https://github.com/adafruit/Adafruit_Python_BMP/',
dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master#egg=Adafruit-GPIO-0.6.5'],
install_requires = ['Adafruit-GPIO>=0.6.5'],
packages = find_packages(),
long_description = long_description,
long_description_content_type = 'text/markdown')
| {
"content_hash": "f82f478a0cd14e4ae72cd8b5f4aa2ab5",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 160,
"avg_line_length": 41.214285714285715,
"alnum_prop": 0.6158290005777007,
"repo_name": "adafruit/Adafruit_Python_BMP",
"id": "aa9a270351bd5bede6d4b7a61e543eb8fbae7217",
"size": "1731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23241"
}
],
"symlink_target": ""
} |
import math
import Pyro4, time, psutil
#import sqlite3 as sqlite
#import datetime as dt
#perhaps needs to be set somewhere else
Pyro4.config.HMAC_KEY='pRivAt3Key'
#lists for return
stressValues = []
runStartTimeList=[]
runDurations = []
RESTYPE = "null"
MALLOC_LIMIT = 4095
import sys
from collections import OrderedDict
import Library
from Library import getHomepath
sys.path.insert(0, getHomepath() + '/distributions/')
from abstract_dist import *
class dist_real_trace1(abstract_dist):
pass
def functionCount(emulationID,emulationName,emulationLifetimeID,startTimesec,duration, distributionGranularity,distributionArg,resType,HOMEPATH):
#startLoad = int(distributionArg["startload"])
#stopLoad = int(distributionArg["stopload"])
trace = distributionArg["trace"]
groupingRange = int(distributionArg["groupingrange"])
global RESTYPE
RESTYPE = resType
global stressValues
global runStartTimeList
global runDurations
# memReading=psutil.phymem_usage()
# allMemoryPc =(memReading.total/1048576.00)/100.00
try:
f = open(trace, 'r')
#print f
line = f.readline()
HEAD, NCPUS = line.strip().split("\t")
line = f.readline()
HEAD, MEMTOTAL = line.strip().split("\t")
line = f.readline()
HEAD, TIMESTAMP = line.strip().split("\t")
line = f.readline()
HEAD, POLLFR = line.strip().split("\t")
FR = int(POLLFR)
# skip the header
prevline = f.readline()
memArray = []
cpuArray = []
for line in f:
cpuValue, memValue = line.split()
memArray.append(memValue)
cpuArray.append(cpuValue)
if RESTYPE == "mem":
if ("MEMUSED%" in prevline):
memArray = Library.memToInt(memArray) #Convert memory stressValues from % to real values
else:
memArray = map(lambda stressVal: int(stressVal) // (1024**2), memArray)
# for memVal in memArray: memVal = memVal / (1024**2) #Convert from Bytes into MegaBytes
groupingRange = (Library.getTotalMem() // 100) * groupingRange #Convert from % to real value
(stressValues, runStartTimeList, runDurations) = Library.realTraceSmoothing(memArray, startTimesec, FR, groupingRange)
splitMemMalloc()
if RESTYPE == "cpu":
(stressValues, runStartTimeList, runDurations) = Library.realTraceSmoothing(cpuArray, startTimesec, FR, groupingRange)
except Exception, e:
return "Unable to create distribution:\n" + str(e)
triggerType = "time"
return stressValues, runStartTimeList, runDurations, triggerType
#Checks if mem stressvalue is higher than MALLOC_LIMIT, splits jobs if so
def splitMemMalloc():
global stressValues
global runStartTimeList
global runDurations
mallocReached = True
while (mallocReached):
mallocReached = False
tempJobs = [] #Holds details of split jobs, to be merged with main arrays/lists
for i, stressValue in enumerate(stressValues):
if stressValue > MALLOC_LIMIT:
stressValues[i] = MALLOC_LIMIT
tempJob = [i, stressValue-MALLOC_LIMIT, runStartTimeList[i], runDurations[i]]
tempJobs.append(tempJob)
mallocReached = True
for tempJob in reversed(tempJobs): #Add the values into the lists at position tempJob[0]
stressValues.insert(tempJob[0]+1,tempJob[1])
runStartTimeList.insert(tempJob[0]+1,tempJob[2])
runDurations.insert(tempJob[0]+1,tempJob[3])
print "stressValues out ", stressValues #REMOVE
def distHelp():
'''
Help method that gives description of trapezoidal distribution usage
'''
print "Real Trace takes a locally stored trace file (generated by the rec_res_usage script) and a grouping range. The grouping range is used to group together stressvalues in a specified range.\nSee documentation for further help."
return "Real Trace takes a locally stored trace file (generated by the rec_res_usage script) and a grouping range. The grouping range is used to group together stressvalues in a specified range.\nSee documentation for further help."
def argNames(Rtype=None):
'''
We specify how many arguments distribution instance require to run properly
Rtype = <MEM, IO, NET>
IMPORTANT: All argument variable names must be in lower case
'''
#discovery of supported resources
if Rtype == None:
argNames = ["mem","cpu"]
return argNames
if Rtype.lower() == "cpu":
argNames=[("minJobTime", {"upperBound":10000000, "lowerBound":2, "argHelp":"Minimum time a single job's duration can be (any jobs under will be deleted).\nUnits: seconds (Min 2)"}),
("trace", {"upperBound": 999999, "lowerBound":0, "argHelp":"Local path to trace file (must be lower-case).\nSee documentation for additional help"}),
("groupingRange", {"upperBound":99, "lowerBound": 1 , "argHelp": "Range to group jobs together by.\nSee documentation for additional help"})]
return OrderedDict(argNames)
#get free amount of memory and set it to upper bound
if Rtype.lower() == "mem":
argNames=[("minJobTime",{"upperBound":10000000,"lowerBound":2, "argHelp":"Minimum time a single job's duration can be (any jobs under will be deleted).\nUnits: seconds (Min 2)"}),
("trace",{"upperBound":999999,"lowerBound":0, "argHelp":"Local path to trace file (must be lower-case).\nSee documentation for additional help"}),
("groupingRange",{"upperBound":99,"lowerBound":1, "argHelp":"Range to group jobs together by.\nSee documentation for additional help"})]
return OrderedDict(argNames) | {
"content_hash": "39324112203dbff92fc8d679817d74a9",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 236,
"avg_line_length": 41.05555555555556,
"alnum_prop": 0.6562922868741543,
"repo_name": "cragusa/cocoma",
"id": "b477c8b57feb7d4de811578641ae16a770ccc6bb",
"size": "6625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributions/dist_real_trace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21251"
},
{
"name": "JavaScript",
"bytes": "100005"
},
{
"name": "Python",
"bytes": "351175"
},
{
"name": "Ruby",
"bytes": "19866"
},
{
"name": "Shell",
"bytes": "9935"
},
{
"name": "TeX",
"bytes": "310637"
}
],
"symlink_target": ""
} |
import random
import places
import persons
import actions
import options
from state import GameState
from character import Character
from multiple_choice import MultipleChoice
def lords_victory(state):
if not state.persons.persons_dict["lord_arthur"].alive and \
not state.persons.persons_dict["lord_bartholomew"].alive and \
not state.persons.persons_dict["lord_carlos"].alive and \
not state.persons.persons_dict["lord_daniel"].alive:
state.character.alone = False
print("You have destroyed the establishment and brought about "
"a uptopian anarchy... more or less.")
print("You win!")
def pyro_victory(state):
if len(state.places.burned) > 8: # increase this number for each new
# burnable place we add
state.character.alone = False # might need to add a master_pryo
# boolean for the character instead
# of using .alone (which makes no sense)
print(random.choice(["Some people just like to watch the world "
"burn. You are one of them.",
"You are satisfied with how everything has "
"been burned.",
]))
print("You win!")
def main():
"""
The goal is to have the main function operate as follows:
Set up the initial state
Display the initial message
Display the initial options
Choose an action
Get an outcome
Display results of the outcomes
Outcome changes game state
"""
persons_object = persons.Persons()
places_object = places.Places()
character_object = Character(place=places_object.places_dict["tavern"])
state = GameState(character_object, places_object, persons_object)
choices = MultipleChoice()
options.set_initial_actions(choices, state)
print("\n---The St. George Game---\n")
print("You are in a tavern. The local assassins hate you.")
while state.character.alive and \
state.character.alone and not \
state.character.lose:
action = choices.choose_action()
if not state.character.threatened or action.combat_action:
outcome = action.get_outcome(state)
else:
outcome = \
actions.Attack(state, state.character.person).get_outcome(state)
outcome.execute()
options.add_actions(choices, state, outcome)
choices.generate_actions(state.character)
lords_victory(state)
pyro_victory(state)
if __name__ == "__main__":
main()
| {
"content_hash": "8526522fb5a5c665f267750db1c823a9",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 76,
"avg_line_length": 38.85507246376812,
"alnum_prop": 0.6105930622901903,
"repo_name": "SageBerg/St.GeorgeGame",
"id": "e9e0f8ce621c9b6ecb8092b79289882a42d016ce",
"size": "2681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python_version/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1611"
},
{
"name": "JavaScript",
"bytes": "477193"
},
{
"name": "Python",
"bytes": "269761"
},
{
"name": "Shell",
"bytes": "391"
}
],
"symlink_target": ""
} |
from .sql import (
alias,
all_,
and_,
any_,
asc,
between,
bindparam,
case,
cast,
collate,
column,
delete,
desc,
distinct,
except_,
except_all,
exists,
extract,
false,
func,
funcfilter,
insert,
intersect,
intersect_all,
join,
lateral,
literal,
literal_column,
modifier,
not_,
null,
or_,
outerjoin,
outparam,
over,
select,
subquery,
table,
tablesample,
text,
true,
tuple_,
type_coerce,
union,
union_all,
update,
within_group,
)
from .types import (
ARRAY,
BIGINT,
BINARY,
BLOB,
BOOLEAN,
BigInteger,
Binary,
Boolean,
CHAR,
CLOB,
DATE,
DATETIME,
DECIMAL,
Date,
DateTime,
Enum,
FLOAT,
Float,
INT,
INTEGER,
Integer,
Interval,
JSON,
LargeBinary,
NCHAR,
NVARCHAR,
NUMERIC,
Numeric,
PickleType,
REAL,
SMALLINT,
SmallInteger,
String,
TEXT,
TIME,
TIMESTAMP,
Text,
Time,
TypeDecorator,
Unicode,
UnicodeText,
VARBINARY,
VARCHAR,
)
from .schema import (
CheckConstraint,
Column,
ColumnDefault,
Constraint,
DefaultClause,
FetchedValue,
ForeignKey,
ForeignKeyConstraint,
Index,
MetaData,
PassiveDefault,
PrimaryKeyConstraint,
Sequence,
Table,
ThreadLocalMetaData,
UniqueConstraint,
DDL,
BLANK_SCHEMA
)
from .inspection import inspect
from .engine import create_engine, engine_from_config
__version__ = '1.1.0'
def __go(lcls):
global __all__
from . import events
from . import util as _sa_util
import inspect as _inspect
__all__ = sorted(name for name, obj in lcls.items()
if not (name.startswith('_') or _inspect.ismodule(obj)))
_sa_util.dependencies.resolve_all("sqlalchemy")
__go(locals())
| {
"content_hash": "27b8781181fe27f81d3787e25729d66c",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 77,
"avg_line_length": 14.297101449275363,
"alnum_prop": 0.5631018753167765,
"repo_name": "franekp/millandict",
"id": "e7810239c1af809ba120bd9b8a78a95c64476b4c",
"size": "2217",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ankidict/thirdparty/sqlalchemy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "913"
},
{
"name": "CSS",
"bytes": "1117"
},
{
"name": "Python",
"bytes": "86639"
},
{
"name": "QMake",
"bytes": "158"
},
{
"name": "VimL",
"bytes": "31"
}
],
"symlink_target": ""
} |
from cliff import show
# noinspection PyAbstractClass
class HealthCheck(show.ShowOne):
"""Check api health status"""
def take_action(self, parsed_args):
return self.dict2columns(self.app.client.healthcheck.get())
| {
"content_hash": "326d6338d1dbb7234e61ba66c816189b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 67,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.728448275862069,
"repo_name": "openstack/python-vitrageclient",
"id": "9838dbd3839a06876e95d516753f2c4fc55d484e",
"size": "815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vitrageclient/v1/cli/healthcheck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "108144"
}
],
"symlink_target": ""
} |
classmates=["Michael","Bob","Tracy"]
classmates
# ['Michael', 'Bob', 'Tracy']
len(classmates)
#3
classmates[0] # Start from 0
#'Michael'
classmates[1]
#'Bob'
classmates[3] # list index out of range
# len(classmates) - 1 is the python rule
classmates[-1] # get the last
# 'Tracy'
classmates[-2]
# 'Bob'
## add one more elements to the end
classmates.append("Adam")
classmates
# ['Michael', 'Bob', 'Tracy', 'Adam']
## we can add elements to particular position
classmates.insert(1,"Jack")
classmates
# ['Michael', 'Jack', 'Bob', 'Tracy', 'Adam']
## to delete the last element in the list, use pop()
classmates.pop()# the default is -1, so it will delete the last one
# 'Adam'
classmates
#['Michael', 'Jack', 'Bob', 'Tracy']
classmates.pop(2)
#'Bob'
classmates
# ['Michael', 'Jack', 'Tracy']
## replace the elements
classmates[1]="Sarah"
#['Michael', 'Sarah', 'Tracy']
## different from R, elements can have different types in one list
L=["Apple",123, True]
## also, we can add a list into a list too
s=["python","java",["asp","php"],'scheme']
len(s)
# 4
s[2][1]
# 'php'
p = ['asp', 'php']
s = ['python', 'java', p, 'scheme']
# s is 2-dimonsional list
## create an empty list
L=[]
len(L)
#0
#____________tuple_____________
## tuples can't be changed when comparing with lists
# # which means tuple is much more safe than lists
classmates = ('Michael', 'Bob', 'Tracy')
## tuples can't use append(), and insert()
classmates.append("Adam")
#Traceback (most recent call last):
# File "<input>", line 1, in <module>
#AttributeError: 'tuple' object has no attribute 'append'
classmates.insert(1,"Adam")
#Traceback (most recent call last):
# File "<input>", line 1, in <module>
#AttributeError: 'tuple' object has no attribute 'insert'
## you can get elements in tuple
classmates[1]
# 'Bob'
classmates[0]
# 'Michael'
## unchangedable tuple means you should put the elements in the very beginning
t=(1,2)
t
#(1, 2)
## to make an empty tuple
t=()
t
## what if you want to define a one-element tuple?
# Wrong way:
t=(1)
t
#1 # it's not tuple, but 1 the number, since () can be regard as a calculation
# Right way:
# add , after the elements
t=(1,)
t
#(1,)
## can we change the list in a tuple?
t=("a","b",["c","d"])
t[2][0]="x"
t[2][1]="y"
t
# ('a', 'b', ['x', 'y'])
| {
"content_hash": "c85f12c17b4499e7ee6c9b2625006df8",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 78,
"avg_line_length": 22.225490196078432,
"alnum_prop": 0.6479929422143802,
"repo_name": "yujingma45/PythonLearning_Liaoxuefeng",
"id": "6daaa6e499c1a00b7e0eb255a5cf8eb39f265014",
"size": "2361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pythonlearning3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61326"
}
],
"symlink_target": ""
} |
"""This module exports the FileExists plugin class."""
from SublimeLinter.lint import Linter, util
import re
import os
import sublime
import sublime_plugin
import json
import logging
# PLUGIN_SETTINGS = sublime.load_settings("fileExists.sublime-settings")
# SYNTAX = PLUGIN_SETTINGS.get("syntax")
# DEBUG = PLUGIN_SETTINGS.get("debug", False)
# logging.basicConfig(format='[fileExists] %(message)s ')
# felogger = logging.getLogger(__name__)
# if (DEBUG):
# felogger.setLevel(logging.DEBUG)
# else:
# felogger.setLevel(logging.WARNING)
# SYNTAX = "source.shell"
PLUGIN_SETTINGS = sublime.load_settings("fileExists.sublime-settings")
SYNTAX = 'source.shell'
SYNTAX = PLUGIN_SETTINGS.get("syntax")
def plugin_loaded():
global SYNTAX
print (sublime.find_resources("fileExists.sublime-settings"))
# felogger.debug("FileExists syntaxes: %s"%','.join(SYNTAX))
DEBUG = PLUGIN_SETTINGS.get("debug", False)
logging.basicConfig(format='[fileExists] %(message)s ')
global felogger
felogger = logging.getLogger(__name__)
if (DEBUG):
felogger.setLevel(logging.DEBUG)
else:
felogger.setLevel(logging.WARNING)
# sublime_plugin.reload_plugin("SublimeLinter")
class FileExists(Linter):
"""Provides an interface to fileExists."""
syntax = tuple(SYNTAX)
# felogger.debug("FileExists syntaxes: %s"%','.join(syntax))
cmd = None
regex = (
r'^.+?:(?P<line>\d+):(?P<col>\d+):'
r'(?:(?P<error>error)|(?P<warning>(warning|note))):'
r'(?P<message>.+)$'
)
word_re = r'(^[-\w\.\/]+)'
multiline = False
line_col_base = (1, 1)
defaults = {}
inline_settings = None
inline_overrides = None
comment_re = None
# felogger.debug("FileExists syntaxes: %s"%','.join([x for x in syntax]))
@classmethod
def posToRowCol(cls, pos, code):
"""
Convert position in string to rectangular coordinates
"""
row = 1
currentlen = 0
lastlen = 0
splitcode = code.split('\n')
for line in splitcode:
lastlen = currentlen
currentlen += len(line)+1
if currentlen >= pos:
return (row, pos-lastlen+1)
row += 1
def splitInterruptedLint(self,lint):
"""
Split linted area interrupted by non-\w characters into multiple
warnings/errors
"""
linted = ""
# felogger.debug(lint)
fname = re.search("(?P<open>\()(?P<file>[\w\.\/_-]+)(?P<close>\))",lint).group('file')
felogger.debug(fname)
positions = lint.split(":")
slash_search = re.compile("/")
slashes = []
for slash_instance in slash_search.finditer(fname):
slashes.append(slash_instance.start())
if len(slashes) > 0:
for s in slashes:
# linted += '\nW:%s:%s:warning:File exists (%s)\n'%(positions[1], int(positions[2])+s+1, fname)
linted += '\n%s:%s:%s:%s:%s'\
%(positions[0],positions[1],int(positions[2])+s,positions[3], positions[4])
linted += '\n%s:%s:%s:%s:%s'\
%(positions[0],positions[1],int(positions[2])+s+1,positions[3], positions[4])
return linted
def checkForFile(self, code, path, filename_instance, prog_instance, inputfile=True):
"""
Check whether P?<fname> in filename_instance regex find (within prog_instance
regex find) exists in same directory as file. Return appropriate warning/error
"""
filename = filename_instance.group('fname')
filenameStart = filename_instance.start('fname')
pos = self.posToRowCol(prog_instance.start(0)+filenameStart, code)
if filename[0] == "/":
fullpath = filename
else:
fullpath = path+"/"+filename
if os.path.isfile(fullpath):
if inputfile:
linted = 'W:%s:%s:note:File exists (%s)\n'%(pos[0], pos[1], filename)
# linted += self.splitInterruptedLint( linted)
else:
linted = 'E:%s:%s:error:File exists, will be overwritten (%s)\n'\
%(pos[0], pos[1], filename)
# linted += self.splitInterruptedLint(linted)
else:
if inputfile:
linted = 'E:%s:%s:error:File not found (%s)\n'%(pos[0], pos[1], filename)
# linted += self.splitInterruptedLint(linted)
else:
linted = ""
return linted
def scanUnflagged(self, prog, ext, code, inputfile=True):
"""
Scan for file arguments not preceded by a -/-- type flag.
Check if each file exists and return appropriate warning
or error messages for each file
"""
path = os.path.dirname(self.view.file_name())
all_lints = ''
linted = None
# regex corresponding to 'prog(ram) filename.(ext)ension'
# first look for matching extension, then extract full file name
regex = re.compile(
r'(?<!#.)(?<=\s)%s' # keyword exists and not in a comment
r'(.+?\\\n)*?' # including broken by newlines
r'(.+?([\w\._-]+%s).' # argument to keyword
r'*?(\n|\Z))' %(prog, ext) # match end of line or end of file
, re.M)
# Iterate over all "prog(ram) filename.ext(ension)" instances in code
for prog_instance in regex.finditer(code):
felogger.debug('FileExists, unflagged argument found')
# regex to extract filenames out of 'prog(ram) filename.ext(ension)' instance
file_regex = re.compile(
r'(?<!-)(?P<preceding>[\']?\w[\w_\.-]+[\']?)[\n\\\s]+' # preceding junk
r'(?P<fname>[/\w_\.-]+%s)'%ext # Full file name
)
for file_instance in file_regex.finditer(prog_instance.group(0)):
filename = file_instance.group('fname')
felogger.debug("FileExists, check for %s"%file_instance.group(0))
# isflag = re.search('^-{1,2}\w+',file_instance.group('preceding')) # taken care of by (?<!-) regex?
# if not isflag:
linted = self.checkForFile(code, path, file_instance, \
prog_instance, inputfile)
all_lints += linted
return all_lints
def scanFlagged(self, prog, flag, code, inputfile=True):
"""
Scan for file arguments preceded by -/-- type flags.
Check if each file exists and return appropriate warning
or error messages for each file
"""
# r'(.+\\\s*\n)*' # allow for linebreaks before flag
# regex to find all instances of 'prog(gram) (flag) argument'
regex = re.compile(
r'(?<!#.)%s' # prog(ram)/keyword exists, but not in comment.
r'(.+\\\n)*?' # allow for linebreaks before flag
r'.*?%s\s+?' # flag
r'(\s*?[\w\._-]+?\s*?)' # argument to keyword
r'(.*(\n|\Z))' %(prog,flag) # account for newline / eof
,re.M)
all_lints = ''
linted = None
path = os.path.dirname(self.view.file_name())
all = []
# iterate over 'prog(gram) (flag) argument' instances
for prog_instance in regex.finditer(code):
felogger.debug("FileExists, flagged argument found"+prog_instance.group(0))
all.append(prog_instance.group(0))
# extract filename
file_regex = re.compile(r'(?P<flag>%s)[\s\\]+(?P<fname>[/\w\.\/_-]+)'%flag,
re.M)
for file_instance in file_regex.finditer(prog_instance.group(0)):
felogger.debug("FileExists, check for %s"%file_instance.group(0))
linted = self.checkForFile(code, path, file_instance, \
prog_instance, inputfile)
linted = self.checkForFile(code, path, file_instance, prog_instance, inputfile)
all_lints += linted
# print("Total "+str(len(all)))
return all_lints
def readFileArgs(self, scope):
"""
Read predefined program name and flag assocations from all
*.fileArg files is sublime 3 config folder.
"""
flagFiles = sublime.find_resources("*.fileArgs")
for flaglist in flagFiles:
# felogger.debug(flaglist)
flagdata = json.loads(sublime.load_resource(flaglist))
if flagdata['scope'] in scope:
return flagdata
return False
def run(self, cmd, code):
"""
Return error and warning messages internally instead of
running through external linter
"""
scope_name = self.view.scope_name(0)
felogger.debug(scope_name)
fromFile = self.readFileArgs(scope_name)
all_lints = ''
for entry in fromFile['keywords']:
# felogger.debug (entry['key'])
# print (fromFile['keywords'][key]['unflaggedExts'])
for inputFlag in entry['inputflags']:
all_lints += self.scanFlagged(entry['key'], inputFlag, code)
for outputFlag in entry['outputflags']:
all_lints += self.scanFlagged(entry['key'], outputFlag, code, False)
for ext in entry['unflaggedInputs']:
all_lints += self.scanUnflagged(entry['key'], ext, code)
for ext in entry['unflaggedOutputs']:
all_lints += self.scanUnflagged(entry['key'], ext, code, False)
# print(all_lints)
return all_lints
| {
"content_hash": "9b5da97145493845a36a8b2b7db622e1",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 116,
"avg_line_length": 35.3971119133574,
"alnum_prop": 0.5525752167261602,
"repo_name": "gawells/SublimeLinter-contrib-fileExists",
"id": "9185ac3c042a1a5416c34ba4d856307328f91aad",
"size": "9949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "399"
},
{
"name": "Python",
"bytes": "9949"
}
],
"symlink_target": ""
} |
from JapaneseTokenizer.common import sever_handler
# client module
import six
if six.PY2:
from JapaneseTokenizer.jumanpp_wrapper.__jumanpp_wrapper_python2 import JumanppWrapper
else:
from JapaneseTokenizer.jumanpp_wrapper.__jumanpp_wrapper_python3 import JumanppWrapper
# else
import sys
import unittest
import os
import time
__author__ = 'kensuke-mi'
class TestServerHandler(unittest.TestCase):
@classmethod
def setUpClass(cls):
if six.PY3:
cls.test_senetence = '紗倉 まな(さくらまな、1993年3月23日 - )は、日本のAV女優。'
else:
cls.test_senetence = u'紗倉 まな(さくらまな、1993年3月23日 - )は、日本のAV女優。'
cls.jumanpp_command = "/usr/local/bin/jumanpp"
def test_jumanpp_process_hanlder_normal(self):
"""It tests jumanpp process handler"""
# normal test #
jumanpp_process_handler = sever_handler.JumanppHnadler(jumanpp_command=self.jumanpp_command)
result_jumanpp_analysis = jumanpp_process_handler.query(input_string=self.test_senetence)
self.assertTrue(isinstance(result_jumanpp_analysis,six.text_type))
## stop process ##
jumanpp_process_handler.stop_process()
## delete instance ##
del jumanpp_process_handler
def test_jumanpp_process_handler_timeout_exception(self):
"""It tests the case which causes timeout exception"""
with self.assertRaises(Exception) as exc:
jumanpp_process_handler = sever_handler.JumanppHnadler(jumanpp_command=self.jumanpp_command,
timeout_second=1)
result_jumanpp_analysis = jumanpp_process_handler.query(input_string=self.test_senetence*100)
exception_message = exc.exception
jumanpp_process_handler.stop_process()
def test_jumanpp_process_handler_init_exception(self):
with self.assertRaises(Exception) as exc:
jumanpp_process_handler = sever_handler.JumanppHnadler(jumanpp_command='hoge',
timeout_second=1)
exception_message = exc.exception
def test_jumanpp_process_handler_huge_request(self):
"""It tests the case where a user sends too much request"""
input_huge_request = [self.test_senetence] * 100
jumanpp_process_handler = sever_handler.JumanppHnadler(jumanpp_command=self.jumanpp_command)
seq_result_jumanpp_analysis = [jumanpp_process_handler.query(input_string=sentence)
for sentence in input_huge_request]
self.assertTrue(isinstance(seq_result_jumanpp_analysis, list))
if __name__ == '__main__':
unittest.main() | {
"content_hash": "1be389565caf22f976dbc1382fcf9c06",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 105,
"avg_line_length": 41.890625,
"alnum_prop": 0.659828422230511,
"repo_name": "Kensuke-Mitsuzawa/JapaneseTokenizers",
"id": "65a15045275ee1d6bffe818db1fe3a1ee806f648",
"size": "2812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/common/test_server_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "4465"
},
{
"name": "Makefile",
"bytes": "409"
},
{
"name": "Python",
"bytes": "111827"
},
{
"name": "Shell",
"bytes": "4194"
}
],
"symlink_target": ""
} |
import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from pimp_board import app, db
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run() | {
"content_hash": "5be1f3e7f78e61fdcaf93a9c1941f027",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 49,
"avg_line_length": 18.928571428571427,
"alnum_prop": 0.7245283018867924,
"repo_name": "peterprokop/PimpBoard",
"id": "374bf032cb652b3c3d988bc4f5123ec085a069c6",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pimp_board/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "918"
},
{
"name": "HTML",
"bytes": "3362"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "10499"
},
{
"name": "Shell",
"bytes": "62"
}
],
"symlink_target": ""
} |
from flask_testing import TestCase
from mock import Mock
import yawt.utils
import yawtext
from yawt.cli import create_manager
from yawtext.sync import Sync
from yawtext.test import TempGitFolder
class TestFolder(TempGitFolder):
def __init__(self):
super(TestFolder, self).__init__()
self.files = {
'content/index.txt': 'index text',
}
class TestSync(TestCase):
YAWT_EXTENSIONS = ['yawtext.vc.YawtVersionControl',
'yawtext.sync.YawtSync']
def create_app(self):
self.site = TestFolder()
self.site.initialize()
return yawt.create_app(self.site.site_root, config=self)
def setUp(self):
self.site.initialize_git()
self.old_vc_add_tracked = yawtext.sync.vc_add_tracked
self.old_vc_add_tracked_and_new = yawtext.sync.vc_add_tracked_and_new
yawtext.sync.vc_add_tracked = Mock()
yawtext.sync.vc_add_tracked_and_new = Mock()
self.old_vc_status = yawtext.sync.vc_status
yawtext.sync.vc_status = Mock()
self.old_vc_commit = yawtext.sync.vc_commit
yawtext.sync.vc_commit = Mock()
self.old_vc_push = yawtext.sync.vc_push
yawtext.sync.vc_push = Mock()
self.old_call_plugins = yawtext.sync.call_plugins
yawtext.sync.call_plugins = Mock()
def test_sync_is_added_to_commands(self):
self.app.preprocess_request()
manager = create_manager(self.app)
self.assertTrue('sync' in manager._commands)
def test_sync_commits_in_strict_mode(self):
syncCmd = Sync()
syncCmd.run(strict=True,
addnew=False,
push=False,
message='commit message')
yawtext.sync.vc_commit.assert_called_with('commit message')
def test_sync_supplies_commit_message(self):
syncCmd = Sync()
syncCmd.run(strict=True,
addnew=False,
push=False,
message=None)
yawtext.sync.vc_commit.assert_called_with('synced changes')
def test_sync_pushes_if_asked(self):
syncCmd = Sync()
syncCmd.run(strict=True,
addnew=False,
push=True,
message='commit message')
yawtext.sync.vc_push.assert_called_with()
def test_sync_skips_push_when_in_nopush_mode(self):
syncCmd = Sync()
syncCmd.run(strict=True,
addnew=False,
push=False,
message='commit message')
yawtext.sync.vc_push.assert_not_called()
def test_sync_skips_adding_and_status_in_strict_mode(self):
syncCmd = Sync()
syncCmd.run(strict=True,
addnew=False,
push=False,
message='commit message')
yawtext.sync.vc_add_tracked.assert_not_called()
yawtext.sync.vc_add_tracked_and_new.assert_not_called()
yawtext.sync.vc_status.assert_not_called()
def test_sync_adds_new_when_asked(self):
syncCmd = Sync()
syncCmd.run(strict=False,
addnew=True,
push=False,
message='commit message')
yawtext.sync.vc_add_tracked_and_new.assert_called_with()
yawtext.sync.vc_add_tracked.assert_not_called()
def test_sync_adds_only_tracked_when_asked(self):
syncCmd = Sync()
syncCmd.run(strict=False,
addnew=False,
push=False,
message='commit message')
yawtext.sync.vc_add_tracked_and_new.assert_not_called()
yawtext.sync.vc_add_tracked.assert_called_with()
def test_call_plugins_called_with_status_results(self):
changed = yawt.utils.ChangedFiles(modified=['content/index.txt'])
yawtext.sync.vc_status.return_value = changed
syncCmd = Sync()
syncCmd.run(strict=False,
addnew=False,
push=False,
message='commit message')
yawtext.sync.call_plugins.assert_called_with('on_pre_sync',
changed)
def tearDown(self):
yawtext.sync.vc_add_tracked = self.old_vc_add_tracked
yawtext.sync.vc_add_tracked_and_new = self.old_vc_add_tracked_and_new
yawtext.sync.vc_status = self.old_vc_status
yawtext.sync.vc_commit = self.old_vc_commit
yawtext.sync.vc_push = self.old_vc_push
yawtext.sync.call_plugins = self.old_call_plugins
self.site.remove()
| {
"content_hash": "58f9099dab36d43202b2cb9095a1be9c",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 77,
"avg_line_length": 35.37692307692308,
"alnum_prop": 0.583605131550337,
"repo_name": "drivet/yawt",
"id": "ab80dd549509cfca37958e7b7f426ceda0b7827d",
"size": "4618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yawtext/test/test_sync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2273"
},
{
"name": "Python",
"bytes": "192131"
},
{
"name": "Shell",
"bytes": "1034"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='glu',
packages=find_packages(),
url='https://github.com/chongkong/glu',
license='MIT',
version='0.0.18',
description='Glue for DRY configurations',
author='Park Jong Bin',
author_email='chongkong94@gmail.com',
keywords=['glue', 'glu', 'dry', 'config', 'dry-config', 'dry-configurable'],
zip_safe=False,
install_requires=[
'future==0.15.2',
'PyYAML==3.11'
],
entry_points={
'console_scripts': [
'glu = glu.cli:main'
]
},
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License'
]
)
| {
"content_hash": "d0103ab24aec95d85dde6f1c2e9a61e6",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 80,
"avg_line_length": 28.35483870967742,
"alnum_prop": 0.5699658703071673,
"repo_name": "chongkong/glu",
"id": "fe212568bc06a4a6bafb3ca5f7a94fe7d5e498fb",
"size": "879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14118"
}
],
"symlink_target": ""
} |
from dataclasses import dataclass
from typing import Any
from pants.backend.codegen.protobuf.lint.buf.skip_field import SkipBufLintField
from pants.backend.codegen.protobuf.lint.buf.subsystem import BufSubsystem
from pants.backend.codegen.protobuf.target_types import (
ProtobufDependenciesField,
ProtobufSourceField,
)
from pants.core.goals.lint import LintResult, LintTargetsRequest, Partitions
from pants.core.util_rules.external_tool import DownloadedExternalTool, ExternalToolRequest
from pants.core.util_rules.source_files import SourceFilesRequest
from pants.core.util_rules.stripped_source_files import StrippedSourceFiles
from pants.engine.fs import Digest, MergeDigests
from pants.engine.platform import Platform
from pants.engine.process import FallibleProcessResult, Process
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import FieldSet, Target, TransitiveTargets, TransitiveTargetsRequest
from pants.util.logging import LogLevel
from pants.util.meta import classproperty
from pants.util.strutil import pluralize
@dataclass(frozen=True)
class BufFieldSet(FieldSet):
required_fields = (ProtobufSourceField,)
sources: ProtobufSourceField
dependencies: ProtobufDependenciesField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipBufLintField).value
class BufLintRequest(LintTargetsRequest):
field_set_type = BufFieldSet
tool_subsystem = BufSubsystem # type: ignore[assignment]
@classproperty
def tool_name(cls) -> str:
return "buf-lint"
@rule
async def partition_buf(
request: BufLintRequest.PartitionRequest[BufFieldSet], buf: BufSubsystem
) -> Partitions[BufFieldSet, Any]:
return Partitions() if buf.lint_skip else Partitions.single_partition(request.field_sets)
@rule(desc="Lint with buf lint", level=LogLevel.DEBUG)
async def run_buf(
request: BufLintRequest.Batch[BufFieldSet, Any], buf: BufSubsystem, platform: Platform
) -> LintResult:
transitive_targets = await Get(
TransitiveTargets,
TransitiveTargetsRequest((field_set.address for field_set in request.elements)),
)
all_stripped_sources_request = Get(
StrippedSourceFiles,
SourceFilesRequest(
tgt[ProtobufSourceField]
for tgt in transitive_targets.closure
if tgt.has_field(ProtobufSourceField)
),
)
target_stripped_sources_request = Get(
StrippedSourceFiles,
SourceFilesRequest(
(field_set.sources for field_set in request.elements),
for_sources_types=(ProtobufSourceField,),
enable_codegen=True,
),
)
download_buf_get = Get(DownloadedExternalTool, ExternalToolRequest, buf.get_request(platform))
target_sources_stripped, all_sources_stripped, downloaded_buf = await MultiGet(
target_stripped_sources_request, all_stripped_sources_request, download_buf_get
)
input_digest = await Get(
Digest,
MergeDigests(
(
target_sources_stripped.snapshot.digest,
all_sources_stripped.snapshot.digest,
downloaded_buf.digest,
)
),
)
process_result = await Get(
FallibleProcessResult,
Process(
argv=[
downloaded_buf.exe,
"lint",
*buf.lint_args,
"--path",
",".join(target_sources_stripped.snapshot.files),
],
input_digest=input_digest,
description=f"Run buf lint on {pluralize(len(request.elements), 'file')}.",
level=LogLevel.DEBUG,
),
)
return LintResult.create(request, process_result)
def rules():
return [
*collect_rules(),
*BufLintRequest.rules(),
]
| {
"content_hash": "9114147f492a581a452fba95ddbdb9a1",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 98,
"avg_line_length": 32.92307692307692,
"alnum_prop": 0.6905503634475597,
"repo_name": "pantsbuild/pants",
"id": "7449bf5575559f575eeb2c9f9463a052cd052881",
"size": "3983",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/codegen/protobuf/lint/buf/lint_rules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Dockerfile",
"bytes": "1132"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "97190"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3771"
},
{
"name": "Python",
"bytes": "7582858"
},
{
"name": "Rust",
"bytes": "1657282"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31400"
},
{
"name": "Starlark",
"bytes": "76892"
}
],
"symlink_target": ""
} |
import unittest
import json
from base64 import b64encode
from flask import url_for
from app import create_app, db
from app.models import User
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def get_api_headers(self, username, password):
return {
'Authorization': 'Basic ' + b64encode(
(username + ':' + password).encode('utf-8')).decode('utf-8'),
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def test_404(self):
response = self.client.get(
'/wrong/url',
headers=self.get_api_headers('email', 'password'))
self.assertTrue(response.status_code == 404)
json_response = json.loads(
response.data.decode('utf-8'))
self.assertTrue(json_response['error'] == 'not found')
def test_no_auth(self):
response = self.client.get(
url_for('api.get_notes'),
content_type='application/json')
self.assertTrue(response.status_code == 401)
def test_bad_auth(self):
# add a user
u = User(email='test@example.com', password='password', confirmed=True)
db.session.add(u)
db.session.commit()
# attempt to authenticate with bad password
response = self.client.get(
url_for('api.get_notes'),
headers=self.get_api_headers(
'test@example.com', 'wrong password'))
self.assertTrue(response.status_code == 401)
def test_token_auth(self):
# add a user
u = User(email='test@example.com', password='password', confirmed=True)
db.session.add(u)
db.session.commit()
# issue request with bad token
response = self.client.get(
url_for('api.get_notes'),
headers=self.get_api_headers('bad-token', ''))
self.assertTrue(response.status_code == 401)
# get a token
response = self.client.get(
url_for('api.get_token'),
headers=self.get_api_headers('test@example.com', 'password'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(
response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('token'))
token = json_response['token']
# issue a request with the new token
response = self.client.get(
url_for('api.get_notes'),
headers=self.get_api_headers(token, ''))
self.assertTrue(response.status_code == 200)
def test_anonymous(self):
# Try to get notes
response = self.client.get(
url_for('api.get_notes'),
headers=self.get_api_headers('', ''))
self.assertTrue(response.status_code == 401)
# Try to get a token
response = self.client.get(
url_for('api.get_token'),
headers=self.get_api_headers('', ''))
self.assertTrue(response.status_code == 401)
def test_unconfirmed_acount(self):
# add an unconfirmed user
u = User(
email='test@example.com',
password='password2',
confirmed=False)
db.session.add(u)
db.session.commit()
# get notes from unconfirmed account
response = self.client.get(
url_for('api.get_notes'),
headers=self.get_api_headers(
'test@example.com', 'password2'))
self.assertTrue(response.status_code == 403)
| {
"content_hash": "b465695b31baff8d1cbb2c4a31be05d7",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 33.324561403508774,
"alnum_prop": 0.5740984469597262,
"repo_name": "iamgroot42/braindump",
"id": "102a3e4941e75fa3c664d7d5b24caa70513d48bc",
"size": "3799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4967"
},
{
"name": "HTML",
"bytes": "24306"
},
{
"name": "JavaScript",
"bytes": "31273"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "60355"
},
{
"name": "Shell",
"bytes": "3723"
}
],
"symlink_target": ""
} |
import csv
import random
# Checkout the README.md file for some other notes.
if __name__ == "__main__":
# Dictionary to hold players loaded from the JSON file
players = {}
# The three teams with lists that will hold the
# players assigned to each.
teams = {
'Sharks': { 'players': [] },
'Dragons': { 'players': [] },
'Raptors': { 'players': [] }
}
def load_players():
with open('soccer_players.csv', 'r') as csv_file:
csv_data = csv.DictReader(csv_file)
for row in csv_data:
# Make the name of each player the main key
# and assign the rest of the data cleaner names.
players[row['Name']] = {
'height': row['Height (inches)'],
'experience': row['Soccer Experience'],
'guardian': row['Guardian Name(s)']
}
def players_who_have_experience(do_they_have_it):
return_list = []
for player_name, player_data in players.items():
if player_data['experience'] == do_they_have_it:
return_list.append(player_name)
return return_list
def put_players_on_team_randomly(player_list):
while len(player_list):
for team_key, team_data in teams.items():
if len(player_list):
team_data['players'].append(player_list.pop(random.randint(0,len(player_list) - 1)))
def output_team_file():
with open('teams.txt', 'w') as teams_file:
for team_key, team_data in teams.items():
teams_file.write(team_key + '\n')
for player in team_data['players']:
teams_file.write('{}, {}, {}'.format(player, players[player]['experience'], players[player]['guardian']) + '\n')
teams_file.write('\n')
def output_welcome_letters():
for team_name, team_data in teams.items():
for player_name in team_data['players']:
file_name = player_name.lower().replace(' ', '_') + '.txt'
with open(file_name, 'w') as letter:
letter.write('Dear {},\n\n'.format(players[player_name]['guardian']))
letter.write('Welcome to the new season!\n\n')
letter.write('This letter provides soccer seasons details for your player:\n\n {}\n\n'.format(player_name))
letter.write('This season, they will play for:\n\n The {}\n\n'.format(team_name))
letter.write('The first practice is:\n\n 4:45pm on Jan. 10 at Community Field 7\n\n'.format(team_name))
letter.write('Looking forward to seeing you then!\n\n')
letter.write('-- Alan "The Commish" Smith\n')
load_players()
put_players_on_team_randomly(players_who_have_experience('YES'))
put_players_on_team_randomly(players_who_have_experience('NO'))
output_team_file()
output_welcome_letters()
| {
"content_hash": "35a69d508f4cf95a93f0b199a44118b5",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 132,
"avg_line_length": 42.55714285714286,
"alnum_prop": 0.5589123867069486,
"repo_name": "alanwsmith/treehouse-soccer-league",
"id": "1e995c426623f86633bf08e0bd711c2a1cf1aa61",
"size": "3003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "league_builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3003"
}
],
"symlink_target": ""
} |
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
from __future__ import print_function
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print("Usage: versiongenerate.py input_dir output_dir")
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| {
"content_hash": "54889e8bd832a0a1cba5793fbde502d0",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 80,
"avg_line_length": 42.57746478873239,
"alnum_prop": 0.7158451869004301,
"repo_name": "AOSPU/external_chromium_org_testing_gtest",
"id": "88ac186ffdc3ad8f14de4a138a6e86516f2859c8",
"size": "4576",
"binary": false,
"copies": "1",
"ref": "refs/heads/android-5.0/py3",
"path": "xcode/Scripts/versiongenerate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1925888"
},
{
"name": "Makefile",
"bytes": "4555"
},
{
"name": "Python",
"bytes": "254120"
},
{
"name": "Shell",
"bytes": "4941"
}
],
"symlink_target": ""
} |
from google.cloud import memcache_v1beta2
async def sample_get_instance():
# Create a client
client = memcache_v1beta2.CloudMemcacheAsyncClient()
# Initialize request argument(s)
request = memcache_v1beta2.GetInstanceRequest(
name="name_value",
)
# Make the request
response = await client.get_instance(request=request)
# Handle the response
print(response)
# [END memcache_v1beta2_generated_CloudMemcache_GetInstance_async]
| {
"content_hash": "34721af3fd7dd37b7644120151cee7bf",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 66,
"avg_line_length": 25,
"alnum_prop": 0.72,
"repo_name": "googleapis/python-memcache",
"id": "16fbc6d6f8b8b154c11476b091c105b4c317019d",
"size": "1864",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/memcache_v1beta2_generated_cloud_memcache_get_instance_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "680016"
},
{
"name": "Shell",
"bytes": "30666"
}
],
"symlink_target": ""
} |
__author__ = 'Taio'
# itertools.chain
import itertools
a = [1, 2, 3, 4]
for p in itertools.chain(itertools.combinations(a, 2), itertools.combinations(a, 3)):
print p
for subset in itertools.chain.from_iterable(itertools.combinations(a, n) for n in range(len(a) + 1)):
print subset | {
"content_hash": "96ed78e01182df1ed8199a0d9e7adbcf",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 101,
"avg_line_length": 22.53846153846154,
"alnum_prop": 0.6928327645051194,
"repo_name": "jiasir/python-examples",
"id": "e0a1d646f631bf959b47aa1d862964f357b75281",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chaining_iterables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7113"
}
],
"symlink_target": ""
} |
from typing import List
from pystarling.api_objects.transactions.TransactionDirectDebit import TransactionDirectDebit
from pystarling.api_services.BaseApiService import BaseApiService
class TransactionDirectDebitService(BaseApiService):
ENDPOINT = 'transactions/direct-debit'
def __init__(self, config):
BaseApiService.__init__(self, config)
def get(self, transaction_id) -> TransactionDirectDebit:
url = self.ENDPOINT + '/' + transaction_id
r = self.get_parsed_response(url)
return TransactionDirectDebit(r)
def list(self, from_date=None, to_date=None) -> List[TransactionDirectDebit]:
r = self.get_embedded_key_from_parsed_response_with_date_range(self.ENDPOINT, from_date, to_date)
transactions = r['transactions']
return [TransactionDirectDebit(t) for t in transactions]
| {
"content_hash": "9c90b741b98c816ac6f56f6db5ceaf74",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 105,
"avg_line_length": 38.81818181818182,
"alnum_prop": 0.7295081967213115,
"repo_name": "rdcrt/pystarling",
"id": "6ae2d78b9ce4aa511009b23b9a1e7ac77c2312e1",
"size": "854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pystarling/api_services/transactions/TransactionDirectDebitService.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71012"
}
],
"symlink_target": ""
} |
"""Eval libraries."""
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
from tensorflow_examples.lite.model_maker.third_party.efficientdet import coco_metric
from tensorflow_examples.lite.model_maker.third_party.efficientdet import dataloader
from tensorflow_examples.lite.model_maker.third_party.efficientdet import hparams_config
from tensorflow_examples.lite.model_maker.third_party.efficientdet import utils
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import anchors
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import efficientdet_keras
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import label_util
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import postprocess
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import util_keras
# Cloud TPU Cluster Resolvers
flags.DEFINE_string('tpu', None, 'The Cloud TPU name.')
flags.DEFINE_string('gcp_project', None, 'Project name.')
flags.DEFINE_string('tpu_zone', None, 'GCE zone name.')
flags.DEFINE_integer('eval_samples', None, 'Number of eval samples.')
flags.DEFINE_string('val_file_pattern', None,
'Glob for eval tfrecords, e.g. coco/val-*.tfrecord.')
flags.DEFINE_string('val_json_file', None,
'Groudtruth, e.g. annotations/instances_val2017.json.')
flags.DEFINE_string('model_name', 'efficientdet-d0', 'Model name to use.')
flags.DEFINE_string('model_dir', None, 'Location of the checkpoint to run.')
flags.DEFINE_integer('batch_size', 8, 'GLobal batch size.')
flags.DEFINE_string('hparams', '', 'Comma separated k=v pairs or a yaml file')
FLAGS = flags.FLAGS
def main(_):
config = hparams_config.get_efficientdet_config(FLAGS.model_name)
config.override(FLAGS.hparams)
config.val_json_file = FLAGS.val_json_file
config.nms_configs.max_nms_inputs = anchors.MAX_DETECTION_POINTS
config.drop_remainder = False # eval all examples w/o drop.
config.image_size = utils.parse_image_size(config['image_size'])
if config.strategy == 'tpu':
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
elif config.strategy == 'gpus':
ds_strategy = tf.distribute.MirroredStrategy()
logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
else:
if tf.config.list_physical_devices('GPU'):
ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
else:
ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')
with ds_strategy.scope():
# Network
model = efficientdet_keras.EfficientDetNet(config=config)
model.build((None, *config.image_size, 3))
util_keras.restore_ckpt(model,
tf.train.latest_checkpoint(FLAGS.model_dir),
config.moving_average_decay,
skip_mismatch=False)
@tf.function
def model_fn(images, labels):
cls_outputs, box_outputs = model(images, training=False)
detections = postprocess.generate_detections(config,
cls_outputs,
box_outputs,
labels['image_scales'],
labels['source_ids'])
tf.numpy_function(evaluator.update_state,
[labels['groundtruth_data'],
postprocess.transform_detections(detections)], [])
# Evaluator for AP calculation.
label_map = label_util.get_label_map(config.label_map)
evaluator = coco_metric.EvaluationMetric(
filename=config.val_json_file, label_map=label_map)
# dataset
batch_size = FLAGS.batch_size # global batch size.
ds = dataloader.InputReader(
FLAGS.val_file_pattern,
is_training=False,
max_instances_per_image=config.max_instances_per_image)(
config, batch_size=batch_size)
if FLAGS.eval_samples:
ds = ds.take((FLAGS.eval_samples + batch_size - 1) // batch_size)
ds = ds_strategy.experimental_distribute_dataset(ds)
# evaluate all images.
eval_samples = FLAGS.eval_samples or 5000
pbar = tf.keras.utils.Progbar((eval_samples + batch_size - 1) // batch_size)
for i, (images, labels) in enumerate(ds):
ds_strategy.run(model_fn, (images, labels))
pbar.update(i)
# compute the final eval results.
metrics = evaluator.result()
metric_dict = {}
for i, name in enumerate(evaluator.metric_names):
metric_dict[name] = metrics[i]
if label_map:
for i, cid in enumerate(sorted(label_map.keys())):
name = 'AP_/%s' % label_map[cid]
metric_dict[name] = metrics[i + len(evaluator.metric_names)]
print(FLAGS.model_name, metric_dict)
if __name__ == '__main__':
flags.mark_flag_as_required('val_file_pattern')
flags.mark_flag_as_required('model_dir')
logging.set_verbosity(logging.ERROR)
app.run(main)
| {
"content_hash": "c9cc4b49964e1e851db18b56b7ac2cf1",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 98,
"avg_line_length": 45.55084745762712,
"alnum_prop": 0.6792558139534883,
"repo_name": "tensorflow/examples",
"id": "84fd95f7f4345b64d659d239862cb832aba61ce7",
"size": "6057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_examples/lite/model_maker/third_party/efficientdet/keras/eval.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "106227"
},
{
"name": "CMake",
"bytes": "1553"
},
{
"name": "CSS",
"bytes": "4746"
},
{
"name": "Dockerfile",
"bytes": "467"
},
{
"name": "HTML",
"bytes": "12491"
},
{
"name": "Java",
"bytes": "305092"
},
{
"name": "JavaScript",
"bytes": "24461"
},
{
"name": "Jupyter Notebook",
"bytes": "1733035"
},
{
"name": "Kotlin",
"bytes": "631463"
},
{
"name": "Objective-C",
"bytes": "14639"
},
{
"name": "Objective-C++",
"bytes": "14293"
},
{
"name": "Python",
"bytes": "1232357"
},
{
"name": "Ruby",
"bytes": "3744"
},
{
"name": "Shell",
"bytes": "41573"
},
{
"name": "Starlark",
"bytes": "17498"
},
{
"name": "Swift",
"bytes": "553535"
}
],
"symlink_target": ""
} |
from rauth.service import OAuth1Service, OAuth1Session
import xmltodict
class GRSessionError(Exception):
""" Custom request exception """
def __init__(self, error_msg):
self.error_msg = error_msg
def __str__(self):
return self.error_msg + "\n"
class GRSession:
""" Handles OAuth sessions """
def __init__(self, client_key, client_secret, \
access_token=None, access_token_secret=None):
self.session = None
self.client_key = client_key
self.client_secret = client_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
self.request_token = None;
self.goodreads_service = OAuth1Service(
consumer_key=self.client_key,
consumer_secret=self.client_secret,
name='goodreads',
request_token_url='http://www.goodreads.com/oauth/request_token',
authorize_url='http://www.goodreads.com/oauth/authorize',
access_token_url='http://www.goodreads.com/oauth/access_token',
base_url='http://www.goodreads.com/'
)
def oauth_start(self):
""" Start oauth, get tokens return authorization url"""
# Create auth service
# Get tokens and authorization link
self.request_token, self.request_token_secret = \
goodreads_service.get_request_token(header_auth=True)
authorize_url = self.goodreads_service.get_authorize_url(self.request_token)
print 'To authorize access visit: ' + authorize_url
return authorize_url
def oauth_finish(self):
""" Finish creating session after user authorized access.
save access tokens as instance members. """
self.session = self.goodreads_service.get_auth_session(self.request_token,
self.request_token_secret)
# TODO: Check session valid
self.access_token = self.session.access_token
self.access_token_secret = self.session.access_token_secret
def oauth_resume(self):
""" Create a session when access tokens are already available """
# self.session = OAuth1Session(
# consumer_key=self.client_key,
# consumer_secret=self.client_secret,
# access_token=self.access_token,
# access_token_secret=self.access_token_secret)
#
print("resuming with: " + self.access_token)
self.session = self.goodreads_service.get_auth_session(self.access_token, self.access_token_secret)
return self.session
def post(self, url, data={}):
""" """
# Are there parameters?
if len(data) > 0:
url += '?'
response = self.session.post(url, params=data)
if response.status_code == 201:
data_dict = xmltodict.parse(response.content)
return data_dict['GRResponse']
else:
raise Exception('Cannot create resource: %s' % response.status_code)
def get(self, url, data={}):
""" """
# Are there parameters?
if len(data) > 0:
url += '?'
print(url)
response = self.session.get(url, params=data)
if response.status_code == 200:
return response
data_dict = xmltodict.parse(response.content)
return data_dict['GRResponse']
else:
raise Exception('Unable to GET: %s' % response.status_code)
| {
"content_hash": "6ddae8fcb5c72e6b52328c064d6a12e7",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 107,
"avg_line_length": 37.09473684210526,
"alnum_prop": 0.5956299659477866,
"repo_name": "solvire/goodreads_api",
"id": "1ae5a845789f71c52cb08476aaf7605f16bf8988",
"size": "3524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30013"
}
],
"symlink_target": ""
} |
import collections.abc
import contextlib
import copy
import itertools
import random
import string
import threading
from functools import total_ordering, wraps
from typing import TYPE_CHECKING, Iterable, List, Optional, Union
from loguru import logger
from sqlalchemy import Column, Integer, String, Unicode
from flexget import config_schema, db_schema
from flexget.db_schema import VersionedBaseMeta
from flexget.entry import Entry, EntryState, EntryUnicodeError
from flexget.event import event, fire_event
from flexget.manager import Session
from flexget.plugin import (
DependencyError,
PluginError,
PluginWarning,
get_plugins,
phase_methods,
plugin_schemas,
)
from flexget.plugin import plugins as all_plugins
from flexget.plugin import task_phases
from flexget.terminal import capture_console
from flexget.utils import requests
from flexget.utils.database import with_session
from flexget.utils.simple_persistence import SimpleTaskPersistence
from flexget.utils.sqlalchemy_utils import ContextSession
from flexget.utils.template import FlexGetTemplate, render_from_task
from flexget.utils.tools import MergeException, get_config_hash, merge_dict_from_to
logger = logger.bind(name='task')
if TYPE_CHECKING:
Base = VersionedBaseMeta
else:
Base = db_schema.versioned_base('feed', 0)
class TaskConfigHash(Base):
"""Stores the config hash for tasks so that we can tell if the config has changed since last run."""
__tablename__ = 'feed_config_hash'
id = Column(Integer, primary_key=True)
task = Column('name', Unicode, index=True, nullable=False)
hash = Column('hash', String)
def __repr__(self) -> str:
return f'<TaskConfigHash(task={self.task},hash={self.hash})>'
@with_session
def config_changed(task: str = None, session: ContextSession = None) -> None:
"""
Forces config_modified flag to come out true on next run of `task`. Used when the db changes, and all
entries need to be reprocessed.
.. WARNING: DO NOT (FURTHER) USE FROM PLUGINS
:param task: Name of the task. If `None`, will be set for all tasks.
:param session: sqlalchemy Session instance
"""
logger.debug('Marking config for {} as changed.', (task or 'all tasks'))
task_hash = session.query(TaskConfigHash)
if task:
task_hash = task_hash.filter(TaskConfigHash.task == task)
task_hash.delete()
def use_task_logging(func):
@wraps(func)
def wrapper(self, *args, **kw):
# Set the appropriate logger context while running task
cms = [logger.contextualize(task=self.name, task_id=self.id, session_id=self.session_id)]
# Capture console output if configured to do so
if self.output:
cms.append(capture_console(self.output))
with contextlib.ExitStack() as stack:
for cm in cms:
stack.enter_context(cm)
return func(self, *args, **kw)
return wrapper
class EntryIterator:
"""An iterator over a subset of entries to emulate old task.accepted/rejected/failed/entries properties."""
def __init__(self, entries: List[Entry], states: Union[EntryState, Iterable[EntryState]]):
self.all_entries = entries
if isinstance(states, EntryState):
states = [states]
self.filter = lambda e: e._state in states
def __iter__(self) -> Iterable[Entry]:
return filter(self.filter, self.all_entries)
def __bool__(self):
return any(e for e in self)
def __len__(self):
return sum(1 for _e in self)
def __add__(self, other):
return itertools.chain(self, other)
def __radd__(self, other):
return itertools.chain(other, self)
def __getitem__(self, item) -> Union[Entry, Iterable[Entry]]:
if isinstance(item, slice):
return list(itertools.islice(self, item.start, item.stop))
if not isinstance(item, int):
raise ValueError('Index must be integer.')
for index, entry in enumerate(self):
if index == item:
return entry
else:
raise IndexError(f'{item} is out of bounds')
def reverse(self):
self.all_entries.sort(reverse=True)
def sort(self, *args, **kwargs):
self.all_entries.sort(*args, **kwargs)
class EntryContainer(list):
"""Container for a list of entries, also contains accepted, rejected failed iterators over them."""
def __init__(self, iterable: list = None):
list.__init__(self, iterable or [])
self._entries = EntryIterator(self, [EntryState.UNDECIDED, EntryState.ACCEPTED])
self._accepted = EntryIterator(
self, EntryState.ACCEPTED
) # accepted entries, can still be rejected
self._rejected = EntryIterator(
self, EntryState.REJECTED
) # rejected entries, can not be accepted
self._failed = EntryIterator(self, EntryState.FAILED) # failed entries
self._undecided = EntryIterator(self, EntryState.UNDECIDED) # undecided entries (default)
# Make these read-only properties
entries: EntryIterator = property(lambda self: self._entries)
accepted: EntryIterator = property(lambda self: self._accepted)
rejected: EntryIterator = property(lambda self: self._rejected)
failed: EntryIterator = property(lambda self: self._failed)
undecided: EntryIterator = property(lambda self: self._undecided)
def __repr__(self) -> str:
return f'<EntryContainer({list.__repr__(self)})>'
class TaskAbort(Exception):
def __init__(self, reason: str, silent: bool = False) -> None:
self.reason = reason
self.silent = silent
def __repr__(self):
return f'TaskAbort(reason={self.reason}, silent={self.silent})'
@total_ordering
class Task:
"""
Represents one task in the configuration.
**Fires events:**
* task.execute.before_plugin
Before a plugin is about to be executed. Note that since this will also include all
builtin plugins the amount of calls can be quite high
``parameters: task, keyword``
* task.execute.after_plugin
After a plugin has been executed.
``parameters: task, keyword``
* task.execute.started
Before a task starts execution
* task.execute.completed
After task execution has been completed
``parameters: task``
"""
# Used to determine task order, when priority is the same
_counter = itertools.count()
RERUN_DEFAULT = 5
RERUN_MAX = 100
def __init__(
self,
manager,
name,
config=None,
options=None,
output=None,
session_id=None,
priority=None,
suppress_warnings=None,
):
"""
:param Manager manager: Manager instance.
:param string name: Name of the task.
:param dict config: Task configuration.
:param options: dict or argparse namespace with options for this task
:param output: A filelike that all console output will be sent to for this task.
:param session_id: Session id that will be attached to all log messages for filtering
:param priority: If multiple tasks are waiting to run, the task with the lowest priority will be run first.
The default is 0, if the cron option is set though, the default is lowered to 10.
:param suppress_warnings: Allows suppressing log warning about missing plugin in key phases
"""
self.name = str(name)
self.id = ''.join(random.choice(string.digits) for _ in range(6))
self.manager = manager
if config is None:
config = manager.config['tasks'].get(name, {})
self.config = copy.deepcopy(config)
self.prepared_config = None
if options is None:
options = copy.copy(self.manager.options.execute)
elif isinstance(options, dict):
options_namespace = copy.copy(self.manager.options.execute)
options_namespace.__dict__.update(options)
options = options_namespace
# If execution hasn't specifically set the `allow_manual` flag, set it to False by default
if not hasattr(options, 'allow_manual'):
setattr(options, 'allow_manual', False)
self.options = options
self.output = output
self.session_id = session_id
self.suppress_warnings = suppress_warnings or []
if priority is None:
self.priority = 10 if self.options.cron else 0
else:
self.priority = priority
self.priority = priority
self._count = next(self._counter)
self.finished_event = threading.Event()
# simple persistence
self.simple_persistence = SimpleTaskPersistence(self)
# rerun related flags and values
self._rerun_count = 0
self._max_reruns = Task.RERUN_DEFAULT
self._reruns_locked = False
self.config_modified = None
self.enabled = not self.name.startswith('_')
# These are just to query what happened in task. Call task.abort to set.
self.aborted = False
self.abort_reason = None
self.silent_abort = False
self.session = None
self.requests = requests.Session()
# List of all entries in the task
self._all_entries = EntryContainer()
self._rerun = False
self.disabled_phases = []
self.disabled_plugins = []
# current state
self.current_phase = None
self.current_plugin = None
self.traceback: Optional[str] = None
@property
def max_reruns(self):
"""How many times task can be rerunned before stopping"""
return self._max_reruns
@max_reruns.setter
def max_reruns(self, value):
"""Set new maximum value for reruns unless property has been locked"""
if not self._reruns_locked:
self._max_reruns = value
else:
logger.debug('max_reruns is locked, {} tried to modify it', self.current_plugin)
def lock_reruns(self):
"""Prevent modification of max_reruns property"""
logger.debug('Enabling rerun lock')
self._reruns_locked = True
def unlock_reruns(self):
"""Allow modification of max_reruns property"""
logger.debug('Releasing rerun lock')
self._reruns_locked = False
@property
def reruns_locked(self):
return self._reruns_locked
@property
def is_rerun(self):
return bool(self._rerun_count)
@property
def rerun_count(self):
return self._rerun_count
@property
def undecided(self):
"""
.. deprecated:: Use API v3
.. note:: We did not migrate to v3
If I remember correctly the idea was to make v3 signature
on_task_xxx(task, config, entries)
Param entries would be EntryContainer, which has convenience
iterator methods:
- entries.accepted
- entries.failed
- etc, which you see here
"""
return self.all_entries.undecided
@property
def failed(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.failed
@property
def rejected(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.rejected
@property
def accepted(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.accepted
@property
def entries(self):
"""
.. deprecated:: Use API v3
"""
return self.all_entries.entries
@property
def all_entries(self):
"""
.. deprecated:: Use API v3
"""
return self._all_entries
def __lt__(self, other):
return (self.priority, self._count) < (other.priority, other._count)
def __eq__(self, other):
return (self.priority, self._count) == (other.priority, other._count)
def __str__(self):
return '<Task(name=%s,aborted=%s)>' % (self.name, self.aborted)
def disable_phase(self, phase):
"""Disable ``phase`` from execution.
:param string phase: Name of ``phase``
:raises ValueError: *phase* could not be found.
"""
if phase not in task_phases:
raise ValueError('%s is not a valid phase' % phase)
if phase not in self.disabled_phases:
logger.debug('Disabling {} phase', phase)
self.disabled_phases.append(phase)
def disable_plugin(self, plugin):
"""Disable ``plugin`` from execution.
:param string plugin: Name of ``plugin``
:raises ValueError: *plugin* could not be found.
"""
if plugin not in all_plugins:
raise ValueError(f'`{plugin}` is not a valid plugin.')
self.disabled_plugins.append(plugin)
def abort(self, reason='Unknown', silent=False, traceback: str = None):
"""Abort this task execution, no more plugins will be executed except the abort handling ones."""
self.aborted = True
self.abort_reason = reason
self.silent_abort = silent
self.traceback = traceback
if not self.silent_abort:
logger.warning('Aborting task (plugin: {})', self.current_plugin)
else:
logger.debug('Aborting task (plugin: {})', self.current_plugin)
raise TaskAbort(reason, silent=silent)
def find_entry(self, category='entries', **values):
"""
Find and return :class:`~flexget.entry.Entry` with given attributes from task or None
:param string category: entries, accepted, rejected or failed. Defaults to entries.
:param values: Key values of entries to be searched
:return: Entry or None
"""
cat = getattr(self, category)
if not isinstance(cat, EntryIterator):
raise TypeError('category must be a EntryIterator')
for entry in cat:
for k, v in values.items():
if not (k in entry and entry[k] == v):
break
else:
return entry
return None
def plugins(self, phase=None):
"""Get currently enabled plugins.
:param string phase:
Optional, limits to plugins currently configured on given phase, sorted in phase order.
:return:
An iterator over configured :class:`flexget.plugin.PluginInfo` instances enabled on this task.
"""
if phase:
plugins = sorted(
get_plugins(phase=phase), key=lambda p: p.phase_handlers[phase], reverse=True
)
else:
plugins = iter(all_plugins.values())
return (p for p in plugins if p.name in self.config or p.builtin)
def __run_task_phase(self, phase):
"""Executes task phase, ie. call all enabled plugins on the task.
Fires events:
* task.execute.before_plugin
* task.execute.after_plugin
:param string phase: Name of the phase
"""
if phase not in phase_methods:
raise Exception('%s is not a valid task phase' % phase)
# warn if no inputs, filters or outputs in the task
if phase in ['input', 'filter', 'output']:
if not self.manager.unit_test:
# Check that there is at least one manually configured plugin for these phases
for p in self.plugins(phase):
if not p.builtin:
break
else:
if phase not in self.suppress_warnings:
if phase == 'filter':
logger.warning(
'Task does not have any filter plugins to accept entries. '
'You need at least one to accept the entries you want.'
)
else:
logger.warning(
'Task doesn\'t have any {} plugins, you should add (at least) one!',
phase,
)
for plugin in self.plugins(phase):
# Abort this phase if one of the plugins disables it
if phase in self.disabled_phases:
return
if plugin.name in self.disabled_plugins:
continue
# store execute info, except during entry events
self.current_phase = phase
self.current_plugin = plugin.name
if plugin.api_ver == 1:
# backwards compatibility
# pass method only task (old behaviour)
args = (self,)
else:
# pass method task, copy of config (so plugin cannot modify it)
args = (self, copy.copy(self.config.get(plugin.name)))
# Hack to make task.session only active for a single plugin
with Session() as session:
self.session = session
try:
fire_event('task.execute.before_plugin', self, plugin.name)
response = self.__run_plugin(plugin, phase, args)
if phase == 'input' and response:
# add entries returned by input to self.all_entries
for e in response:
e.task = self
self.all_entries.append(e)
finally:
fire_event('task.execute.after_plugin', self, plugin.name)
self.session = None
# check config hash for changes at the end of 'prepare' phase
if phase == 'prepare':
self.check_config_hash()
def __run_plugin(self, plugin, phase, args=None, kwargs=None):
"""
Execute given plugins phase method, with supplied args and kwargs.
If plugin throws unexpected exceptions :meth:`abort` will be called.
:param PluginInfo plugin: Plugin to be executed
:param string phase: Name of the phase to be executed
:param args: Passed to the plugin
:param kwargs: Passed to the plugin
"""
keyword = plugin.name
method = plugin.phase_handlers[phase]
if args is None:
args = []
if kwargs is None:
kwargs = {}
# log.trace('Running %s method %s' % (keyword, method))
# call the plugin
try:
result = method(*args, **kwargs)
# We exhaust any iterator inputs here to make sure we catch exceptions properly.
if isinstance(result, collections.abc.Iterable):
result = list(result)
return result
except TaskAbort:
raise
except PluginWarning as warn:
# check if this warning should be logged only once (may keep repeating)
if warn.kwargs.get('log_once', False):
from flexget.utils.log import log_once
log_once(warn.value, warn.logger)
else:
warn.logger.warning(warn)
except EntryUnicodeError as eue:
msg = 'Plugin %s tried to create non-unicode compatible entry (key: %s, value: %r)' % (
keyword,
eue.key,
eue.value,
)
logger.critical(msg)
self.abort(msg)
except PluginError as err:
err.logger.critical(err.value)
self.abort(err.value)
except DependencyError as e:
msg = 'Plugin `%s` cannot be used because dependency `%s` is missing.' % (
keyword,
e.missing,
)
logger.critical(e.message)
self.abort(msg)
except Warning as e:
# If warnings have been elevated to errors
msg = 'Warning during plugin %s: %s' % (keyword, e)
logger.exception(msg)
self.abort(msg)
except Exception as e:
msg = 'BUG: Unhandled error in plugin %s: %s' % (keyword, e)
logger.opt(exception=True).critical(msg)
traceback = self.manager.crash_report()
self.abort(msg, traceback=traceback)
def rerun(self, plugin=None, reason=None):
"""
Immediately re-run the task after execute has completed,
task can be re-run up to :attr:`.max_reruns` times.
:param str plugin: Plugin name
:param str reason: Why the rerun is done
"""
msg = (
'Plugin {0} has requested task to be ran again after execution has completed.'.format(
self.current_plugin if plugin is None else plugin
)
)
if reason:
msg += ' Reason: {0}'.format(reason)
# Only print the first request for a rerun to the info log
if self._rerun:
logger.debug(msg)
else:
logger.info(msg)
self._rerun = True
def config_changed(self):
"""
Sets config_modified flag to True for the remainder of this run.
Used when the db changes, and all entries need to be reprocessed.
"""
self.config_modified = True
def merge_config(self, new_config):
try:
merge_dict_from_to(new_config, self.config)
except MergeException as e:
raise PluginError('Failed to merge configs for task %s: %s' % (self.name, e))
def check_config_hash(self):
"""
Checks the task's config hash and updates the hash if necessary.
"""
# Save current config hash and set config_modified flag
config_hash = get_config_hash(self.config)
if self.is_rerun:
# Restore the config to state right after start phase
if self.prepared_config:
self.config = copy.deepcopy(self.prepared_config)
else:
logger.error('BUG: No prepared_config on rerun, please report.')
with Session() as session:
last_hash = (
session.query(TaskConfigHash).filter(TaskConfigHash.task == self.name).first()
)
if not last_hash:
session.add(TaskConfigHash(task=self.name, hash=config_hash))
self.config_changed()
elif last_hash.hash != config_hash:
last_hash.hash = config_hash
self.config_changed()
def _execute(self):
"""Executes the task without rerunning."""
if not self.enabled:
logger.debug('Not running disabled task {}', self.name)
return
logger.debug('executing {}', self.name)
# Handle keyword args
if self.options.learn:
logger.info('Disabling download and output phases because of --learn')
self.disable_phase('download')
self.disable_phase('output')
if self.options.disable_phases:
list(map(self.disable_phase, self.options.disable_phases))
if self.options.inject:
# If entries are passed for this execution (eg. rerun), disable the input phase
self.disable_phase('input')
self.all_entries.extend(copy.deepcopy(self.options.inject))
# run phases
try:
for phase in task_phases:
if phase in self.disabled_phases:
# log keywords not executed
if phase not in self.suppress_warnings:
for plugin in self.plugins(phase):
if plugin.name in self.config:
logger.info(
'Plugin {} is not executed in {} phase because the phase is disabled '
'(e.g. --test, --inject)',
plugin.name,
phase,
)
continue
if phase in ('start', 'prepare') and self.is_rerun:
logger.debug('skipping phase {} during rerun', phase)
continue
if phase == 'exit':
# Make sure we run the entry complete hook before exit phase. These hooks may call for a rerun,
# which would mean we should skip the exit phase during this run.
for entry in self.all_entries:
entry.complete()
if self._rerun and self._rerun_count < self.max_reruns:
logger.debug('not running task_exit yet because task will rerun')
continue
# run all plugins with this phase
self.__run_task_phase(phase)
if phase == 'start':
# Store a copy of the config state after start phase to restore for reruns
self.prepared_config = copy.deepcopy(self.config)
except TaskAbort:
try:
self.__run_task_phase('abort')
except TaskAbort as e:
logger.exception('abort handlers aborted: {}', e)
raise
@use_task_logging
def execute(self):
"""
Executes the the task.
If :attr:`.enabled` is False task is not executed. Certain :attr:`.options`
affect how execution is handled.
- :attr:`.options.disable_phases` is a list of phases that are not enabled
for this execution.
- :attr:`.options.inject` is a list of :class:`Entry` instances used instead
of running input phase.
"""
self.finished_event.clear()
try:
if self.options.cron:
self.manager.db_cleanup()
fire_event('task.execute.started', self)
while True:
self._execute()
# rerun task
if (
self._rerun
and self._rerun_count < self.max_reruns
and self._rerun_count < Task.RERUN_MAX
):
logger.info('Rerunning the task in case better resolution can be achieved.')
self._rerun_count += 1
self._all_entries = EntryContainer()
self._rerun = False
continue
elif self._rerun:
logger.info(
'Task has been re-run {} times already, stopping for now',
self._rerun_count,
)
break
fire_event('task.execute.completed', self)
finally:
self.finished_event.set()
@staticmethod
def validate_config(config):
schema = plugin_schemas(interface='task')
# Don't validate commented out plugins
schema['patternProperties'] = {'^_': {}}
return config_schema.process_config(config, schema)
def __copy__(self):
new = type(self)(self.manager, self.name, self.config, self.options)
# Update all the variables of new instance to match our own
new.__dict__.update(self.__dict__)
# Some mutable objects need to be copies
new.options = copy.copy(self.options)
new.config = copy.deepcopy(self.config)
return new
copy = __copy__
def render(self, template):
"""
Renders a template string based on fields in the entry.
:param template: A template string or FlexGetTemplate that uses jinja2 or python string replacement format.
:return: The result of the rendering.
:rtype: string
:raises RenderError: If there is a problem.
"""
if not isinstance(template, (str, FlexGetTemplate)):
raise ValueError(
'Trying to render non string template or unrecognized template format, got %s'
% repr(template)
)
logger.trace('rendering: {}', template)
return render_from_task(template, self)
@event('config.register')
def register_config_key():
task_config_schema = {
'type': 'object',
'additionalProperties': plugin_schemas(interface='task'),
}
config_schema.register_config_key('tasks', task_config_schema, required=True)
| {
"content_hash": "1b37ef369fb297618dcacf8fdb72292e",
"timestamp": "",
"source": "github",
"line_count": 792,
"max_line_length": 115,
"avg_line_length": 35.87626262626262,
"alnum_prop": 0.5811571760399803,
"repo_name": "crawln45/Flexget",
"id": "b7a033bf57a7c961efe329c9f3500b727554c615",
"size": "28414",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "flexget/task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1233"
},
{
"name": "HTML",
"bytes": "82565"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3761134"
},
{
"name": "SCSS",
"bytes": "11875"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1586"
}
],
"symlink_target": ""
} |
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| {
"content_hash": "389e1aa9fe1d5a60e949299b911b8b5e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 42,
"avg_line_length": 26.5,
"alnum_prop": 0.6792452830188679,
"repo_name": "ossdemura/django-miniblog",
"id": "ab2b104a7548229bace859c2e9e1f3f9c888b08e",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "Scripts/django-admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2481"
},
{
"name": "C",
"bytes": "848092"
},
{
"name": "C++",
"bytes": "281626"
},
{
"name": "CSS",
"bytes": "141281"
},
{
"name": "HTML",
"bytes": "180909"
},
{
"name": "JavaScript",
"bytes": "328362"
},
{
"name": "PowerShell",
"bytes": "16350"
},
{
"name": "Python",
"bytes": "17418850"
},
{
"name": "Tcl",
"bytes": "2590140"
}
],
"symlink_target": ""
} |
def extractFictionweeklyNet(item):
'''
Parser for 'fictionweekly.net'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "edbe58f467f2d6cbf16b2db657c09aa6",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 26,
"alnum_prop": 0.6282051282051282,
"repo_name": "fake-name/ReadableWebProxy",
"id": "4030e34de28cee5e22867f7563405e2a28c38cd0",
"size": "547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractFictionweeklyNet.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
import flask
import pytest
from dash import Dash, Input, Output, State, MATCH, ALL, ALLSMALLER, html, dcc
from dash.testing import wait
debugging = dict(
debug=True, use_reloader=False, use_debugger=True, dev_tools_hot_reload=False
)
def check_errors(dash_duo, specs):
# Order-agnostic check of all the errors shown.
# This is not fully general - despite the selectors below, it only applies
# to front-end errors with no back-end errors in the list.
cnt = len(specs)
dash_duo.wait_for_text_to_equal(dash_duo.devtools_error_count_locator, str(cnt))
found = []
for i in range(cnt):
msg = dash_duo.find_elements(".dash-fe-error__title")[i].text
dash_duo.find_elements(".test-devtools-error-toggle")[i].click()
dash_duo.wait_for_element(".dash-backend-error,.dash-fe-error__info")
has_BE = dash_duo.driver.execute_script(
"return document.querySelectorAll('.dash-backend-error').length"
)
txt_selector = ".dash-backend-error" if has_BE else ".dash-fe-error__info"
txt = dash_duo.wait_for_element(txt_selector).text
dash_duo.find_elements(".test-devtools-error-toggle")[i].click()
dash_duo.wait_for_no_elements(".dash-backend-error")
found.append((msg, txt))
orig_found = found[:]
for i, (message, snippets) in enumerate(specs):
for j, (msg, txt) in enumerate(found):
if msg == message and all(snip in txt for snip in snippets):
print(j)
found.pop(j)
break
else:
raise AssertionError(
(
"error {} ({}) not found with text:\n"
" {}\nThe found messages were:\n---\n{}"
).format(
i,
message,
"\n ".join(snippets),
"\n---\n".join(
"{}\n{}".format(msg, txt) for msg, txt in orig_found
),
)
)
# ensure the errors didn't leave items in the pendingCallbacks queue
assert dash_duo.driver.execute_script("return document.title") == "Dash"
def test_dvcv001_blank(dash_duo):
app = Dash(__name__)
app.layout = html.Div()
@app.callback([], [])
def x():
return 42
dash_duo.start_server(app, **debugging)
check_errors(
dash_duo,
[
["A callback is missing Inputs", ["there are no `Input` elements."]],
[
"A callback is missing Outputs",
["Please provide an output for this callback:"],
],
],
)
def test_dvcv002_blank_id_prop(dash_duo):
# TODO: remove suppress_callback_exceptions after we move that part to FE
app = Dash(__name__, suppress_callback_exceptions=True)
app.layout = html.Div([html.Div(id="a")])
@app.callback([Output("a", "children"), Output("", "")], [Input("", "")])
def x(a):
return a
dash_duo.start_server(app, **debugging)
specs = [
[
"Callback item missing ID",
['Input[0].id = ""', "Every item linked to a callback needs an ID"],
],
[
"Callback property error",
[
'Input[0].property = ""',
"expected `property` to be a non-empty string.",
],
],
[
"Callback item missing ID",
['Output[1].id = ""', "Every item linked to a callback needs an ID"],
],
[
"Callback property error",
[
'Output[1].property = ""',
"expected `property` to be a non-empty string.",
],
],
]
check_errors(dash_duo, specs)
def test_dvcv003_duplicate_outputs_same_callback(dash_duo):
app = Dash(__name__)
app.layout = html.Div([html.Div(id="a"), html.Div(id="b")])
@app.callback(
[Output("a", "children"), Output("a", "children")], [Input("b", "children")]
)
def x(b):
return b, b
@app.callback(
[Output({"a": 1}, "children"), Output({"a": ALL}, "children")],
[Input("b", "children")],
)
def y(b):
return b, b
dash_duo.start_server(app, **debugging)
specs = [
[
"Overlapping wildcard callback outputs",
[
'Output 1 ({"a":ALL}.children)',
'overlaps another output ({"a":1}.children)',
"used in this callback",
],
],
[
"Duplicate callback Outputs",
["Output 1 (a.children) is already used by this callback."],
],
]
check_errors(dash_duo, specs)
def test_dvcv004_duplicate_outputs_across_callbacks(dash_duo):
app = Dash(__name__)
app.layout = html.Div([html.Div(id="a"), html.Div(id="b"), html.Div(id="c")])
@app.callback(
[Output("a", "children"), Output("a", "style")], [Input("b", "children")]
)
def x(b):
return b, b
@app.callback(Output("b", "children"), [Input("b", "style")])
def y(b):
return b
@app.callback(Output("a", "children"), [Input("b", "children")])
def x2(b):
return b
@app.callback(
[Output("b", "children"), Output("b", "style")], [Input("c", "children")]
)
def y2(c):
return c
@app.callback(
[Output({"a": 1}, "children"), Output({"b": ALL, "c": 1}, "children")],
[Input("b", "children")],
)
def z(b):
return b, b
@app.callback(
[Output({"a": ALL}, "children"), Output({"b": 1, "c": ALL}, "children")],
[Input("b", "children")],
)
def z2(b):
return b, b
dash_duo.start_server(app, **debugging)
specs = [
[
"Overlapping wildcard callback outputs",
[
# depending on the order callbacks get reported to the
# front end, either of these could have been registered first.
# so we use this oder-independent form that just checks for
# both prop_id's and the string "overlaps another output"
'({"b":1,"c":ALL}.children)',
"overlaps another output",
'({"b":ALL,"c":1}.children)',
"used in a different callback.",
],
],
[
"Overlapping wildcard callback outputs",
[
'({"a":ALL}.children)',
"overlaps another output",
'({"a":1}.children)',
"used in a different callback.",
],
],
["Duplicate callback outputs", ["Output 0 (b.children) is already in use."]],
["Duplicate callback outputs", ["Output 0 (a.children) is already in use."]],
]
check_errors(dash_duo, specs)
def test_dvcv005_input_output_overlap(dash_duo):
app = Dash(__name__)
app.layout = html.Div([html.Div(id="a"), html.Div(id="b"), html.Div(id="c")])
@app.callback(Output("a", "children"), [Input("a", "children")])
def x(a):
return a
@app.callback(
[Output("b", "children"), Output("c", "children")], [Input("c", "children")]
)
def y(c):
return c, c
@app.callback(Output({"a": ALL}, "children"), [Input({"a": 1}, "children")])
def x2(a):
return [a]
@app.callback(
[Output({"b": MATCH}, "children"), Output({"b": MATCH, "c": 1}, "children")],
[Input({"b": MATCH, "c": 1}, "children")],
)
def y2(c):
return c, c
dash_duo.start_server(app, **debugging)
# input/output overlap is now legal, shouldn't throw any errors
wait.until(lambda: ~dash_duo.redux_state_is_loading, 2)
assert dash_duo.get_logs() == []
def test_dvcv006_inconsistent_wildcards(dash_duo):
app = Dash(__name__)
app.layout = html.Div()
@app.callback(
[Output({"b": MATCH}, "children"), Output({"b": ALL, "c": 1}, "children")],
[Input({"b": MATCH, "c": 2}, "children")],
)
def x(c):
return c, [c]
@app.callback(
[Output({"a": MATCH}, "children")],
[Input({"b": MATCH}, "children"), Input({"c": ALLSMALLER}, "children")],
[State({"d": MATCH, "dd": MATCH}, "children"), State({"e": ALL}, "children")],
)
def y(b, c, d, e):
return b + c + d + e
dash_duo.start_server(app, **debugging)
specs = [
[
"`Input` / `State` wildcards not in `Output`s",
[
'State 0 ({"d":MATCH,"dd":MATCH}.children)',
"has MATCH or ALLSMALLER on key(s) d, dd",
'where Output 0 ({"a":MATCH}.children)',
],
],
[
"`Input` / `State` wildcards not in `Output`s",
[
'Input 1 ({"c":ALLSMALLER}.children)',
"has MATCH or ALLSMALLER on key(s) c",
'where Output 0 ({"a":MATCH}.children)',
],
],
[
"`Input` / `State` wildcards not in `Output`s",
[
'Input 0 ({"b":MATCH}.children)',
"has MATCH or ALLSMALLER on key(s) b",
'where Output 0 ({"a":MATCH}.children)',
],
],
[
"Mismatched `MATCH` wildcards across `Output`s",
[
'Output 1 ({"b":ALL,"c":1}.children)',
"does not have MATCH wildcards on the same keys as",
'Output 0 ({"b":MATCH}.children).',
],
],
]
check_errors(dash_duo, specs)
def test_dvcv007_disallowed_ids(dash_duo):
app = Dash(__name__)
app.layout = html.Div()
@app.callback(
Output({"": 1, "a": [4], "c": ALLSMALLER}, "children"),
[Input({"b": {"c": 1}}, "children")],
)
def y(b):
return b
dash_duo.start_server(app, **debugging)
specs = [
[
"Callback wildcard ID error",
[
'Input[0].id["b"] = {"c":1}',
"Wildcard callback ID values must be either wildcards",
"or constants of one of these types:",
"string, number, boolean",
],
],
[
"Callback wildcard ID error",
[
'Output[0].id["c"] = ALLSMALLER',
"Allowed wildcards for Outputs are:",
"ALL, MATCH",
],
],
[
"Callback wildcard ID error",
[
'Output[0].id["a"] = [4]',
"Wildcard callback ID values must be either wildcards",
"or constants of one of these types:",
"string, number, boolean",
],
],
[
"Callback wildcard ID error",
['Output[0].id has key ""', "Keys must be non-empty strings."],
],
]
check_errors(dash_duo, specs)
def bad_id_app(**kwargs):
app = Dash(__name__, **kwargs)
app.layout = html.Div(
[
html.Div(
[html.Div(id="inner-div"), dcc.Input(id="inner-input")], id="outer-div"
),
dcc.Input(id="outer-input"),
],
id="main",
)
@app.callback(Output("nuh-uh", "children"), [Input("inner-input", "value")])
def f(a):
return a
@app.callback(Output("outer-input", "value"), [Input("yeah-no", "value")])
def g(a):
return a
@app.callback(
[Output("inner-div", "children"), Output("nope", "children")],
[Input("inner-input", "value")],
[State("what", "children")],
)
def g2(a):
return [a, a]
# the right way
@app.callback(Output("inner-div", "style"), [Input("inner-input", "value")])
def h(a):
return a
return app
# This one is raised by bad_id_app whether suppressing callback exceptions or not
# yeah-no no longer raises an error on dispatch due to the no-input regression fix
# for issue #1200
dispatch_specs = [
[
"A nonexistent object was used in an `Output` of a Dash callback. "
"The id of this object is `nope` and the property is `children`. "
"The string ids in the current layout are: "
"[main, outer-div, inner-div, inner-input, outer-input]",
[],
],
]
def test_dvcv008_wrong_callback_id(dash_duo):
dash_duo.start_server(bad_id_app(), **debugging)
specs = [
[
"ID not found in layout",
[
"Attempting to connect a callback Input item to component:",
'"yeah-no"',
"but no components with that id exist in the layout.",
"If you are assigning callbacks to components that are",
"generated by other callbacks (and therefore not in the",
"initial layout), you can suppress this exception by setting",
"`suppress_callback_exceptions=True`.",
"This ID was used in the callback(s) for Output(s):",
"outer-input.value",
],
],
[
"ID not found in layout",
[
"Attempting to connect a callback Output item to component:",
'"nope"',
"but no components with that id exist in the layout.",
"This ID was used in the callback(s) for Output(s):",
"inner-div.children, nope.children",
],
],
[
"ID not found in layout",
[
"Attempting to connect a callback State item to component:",
'"what"',
"but no components with that id exist in the layout.",
"This ID was used in the callback(s) for Output(s):",
"inner-div.children, nope.children",
],
],
[
"ID not found in layout",
[
"Attempting to connect a callback Output item to component:",
'"nuh-uh"',
"but no components with that id exist in the layout.",
"This ID was used in the callback(s) for Output(s):",
"nuh-uh.children",
],
],
]
check_errors(dash_duo, dispatch_specs + specs)
def test_dvcv009_suppress_callback_exceptions(dash_duo):
dash_duo.start_server(bad_id_app(suppress_callback_exceptions=True), **debugging)
check_errors(dash_duo, dispatch_specs)
def test_dvcv010_bad_props(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[
html.Div(
[html.Div(id="inner-div"), dcc.Input(id="inner-input")], id="outer-div"
),
dcc.Input(id={"a": 1}),
],
id="main",
)
@app.callback(
Output("inner-div", "xyz"),
# "data-xyz" is OK, does not give an error
[Input("inner-input", "pdq"), Input("inner-div", "data-xyz")],
[State("inner-div", "value")],
)
def xyz(a, b, c):
a if b else c
@app.callback(
Output({"a": MATCH}, "no"),
[Input({"a": MATCH}, "never")],
# "boo" will not error because we don't check State MATCH/ALLSMALLER
[State({"a": MATCH}, "boo"), State({"a": ALL}, "nope")],
)
def f(a, b, c):
return a if b else c
dash_duo.start_server(app, **debugging)
specs = [
[
"Invalid prop for this component",
[
'Property "never" was used with component ID:',
'{"a":1}',
"in one of the Input items of a callback.",
"This ID is assigned to a dash_core_components.Input component",
"in the layout, which does not support this property.",
"This ID was used in the callback(s) for Output(s):",
'{"a":MATCH}.no',
],
],
[
"Invalid prop for this component",
[
'Property "nope" was used with component ID:',
'{"a":1}',
"in one of the State items of a callback.",
"This ID is assigned to a dash_core_components.Input component",
'{"a":MATCH}.no',
],
],
[
"Invalid prop for this component",
[
'Property "no" was used with component ID:',
'{"a":1}',
"in one of the Output items of a callback.",
"This ID is assigned to a dash_core_components.Input component",
'{"a":MATCH}.no',
],
],
[
"Invalid prop for this component",
[
'Property "pdq" was used with component ID:',
'"inner-input"',
"in one of the Input items of a callback.",
"This ID is assigned to a dash_core_components.Input component",
"inner-div.xyz",
],
],
[
"Invalid prop for this component",
[
'Property "value" was used with component ID:',
'"inner-div"',
"in one of the State items of a callback.",
"This ID is assigned to a dash_html_components.Div component",
"inner-div.xyz",
],
],
[
"Invalid prop for this component",
[
'Property "xyz" was used with component ID:',
'"inner-div"',
"in one of the Output items of a callback.",
"This ID is assigned to a dash_html_components.Div component",
"inner-div.xyz",
],
],
]
check_errors(dash_duo, specs)
def test_dvcv011_duplicate_outputs_simple(dash_duo):
app = Dash(__name__)
@app.callback(Output("a", "children"), [Input("c", "children")])
def c(children):
return children
@app.callback(Output("a", "children"), [Input("b", "children")])
def c2(children):
return children
@app.callback([Output("a", "style")], [Input("c", "style")])
def s(children):
return (children,)
@app.callback([Output("a", "style")], [Input("b", "style")])
def s2(children):
return (children,)
app.layout = html.Div(
[
html.Div([], id="a"),
html.Div(["Bye"], id="b", style={"color": "red"}),
html.Div(["Hello"], id="c", style={"color": "green"}),
]
)
dash_duo.start_server(app, **debugging)
specs = [
["Duplicate callback outputs", ["Output 0 (a.children) is already in use."]],
["Duplicate callback outputs", ["Output 0 (a.style) is already in use."]],
]
check_errors(dash_duo, specs)
def test_dvcv012_circular_2_step(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[html.Div([], id="a"), html.Div(["Bye"], id="b"), html.Div(["Hello"], id="c")]
)
@app.callback(Output("a", "children"), [Input("b", "children")])
def callback(children):
return children
@app.callback(Output("b", "children"), [Input("a", "children")])
def c2(children):
return children
dash_duo.start_server(app, **debugging)
specs = [
[
"Circular Dependencies",
[
"Dependency Cycle Found:",
"a.children -> b.children",
"b.children -> a.children",
],
]
]
check_errors(dash_duo, specs)
def test_dvcv013_circular_3_step(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[html.Div([], id="a"), html.Div(["Bye"], id="b"), html.Div(["Hello"], id="c")]
)
@app.callback(Output("b", "children"), [Input("a", "children")])
def callback(children):
return children
@app.callback(Output("c", "children"), [Input("b", "children")])
def c2(children):
return children
@app.callback([Output("a", "children")], [Input("c", "children")])
def c3(children):
return (children,)
dash_duo.start_server(app, **debugging)
specs = [
[
"Circular Dependencies",
[
"Dependency Cycle Found:",
"a.children -> b.children",
"b.children -> c.children",
"c.children -> a.children",
],
]
]
check_errors(dash_duo, specs)
def multipage_app(validation=False):
app = Dash(__name__, suppress_callback_exceptions=(validation == "suppress"))
skeleton = html.Div(
[dcc.Location(id="url", refresh=False), html.Div(id="page-content")]
)
layout_index = html.Div(
[
dcc.Link('Navigate to "/page-1"', id="index_p1", href="/page-1"),
dcc.Link('Navigate to "/page-2"', id="index_p2", href="/page-2"),
]
)
layout_page_1 = html.Div(
[
html.H2("Page 1"),
dcc.Input(id="input-1-state", type="text", value="Montreal"),
dcc.Input(id="input-2-state", type="text", value="Canada"),
html.Button(id="submit-button", n_clicks=0, children="Submit"),
html.Div(id="output-state"),
html.Br(),
dcc.Link('Navigate to "/"', id="p1_index", href="/"),
dcc.Link('Navigate to "/page-2"', id="p1_p2", href="/page-2"),
]
)
layout_page_2 = html.Div(
[
html.H2("Page 2"),
dcc.Input(id="page-2-input", value="LA"),
html.Div(id="page-2-display-value"),
html.Br(),
dcc.Link('Navigate to "/"', id="p2_index", href="/"),
dcc.Link('Navigate to "/page-1"', id="p2_p1", href="/page-1"),
]
)
validation_layout = html.Div([skeleton, layout_index, layout_page_1, layout_page_2])
def validation_function():
return skeleton if flask.has_request_context() else validation_layout
app.layout = validation_function if validation == "function" else skeleton
if validation == "attribute":
app.validation_layout = validation_layout
# Index callbacks
@app.callback(Output("page-content", "children"), [Input("url", "pathname")])
def display_page(pathname):
if pathname == "/page-1":
return layout_page_1
elif pathname == "/page-2":
return layout_page_2
else:
return layout_index
# Page 1 callbacks
@app.callback(
Output("output-state", "children"),
[Input("submit-button", "n_clicks")],
[State("input-1-state", "value"), State("input-2-state", "value")],
)
def update_output(n_clicks, input1, input2):
return (
"The Button has been pressed {} times,"
'Input 1 is "{}",'
'and Input 2 is "{}"'
).format(n_clicks, input1, input2)
# Page 2 callbacks
@app.callback(
Output("page-2-display-value", "children"), [Input("page-2-input", "value")]
)
def display_value(value):
print("display_value")
return 'You have selected "{}"'.format(value)
return app
def test_dvcv014_multipage_errors(dash_duo):
app = multipage_app()
dash_duo.start_server(app, **debugging)
specs = [
[
"ID not found in layout",
['"page-2-input"', "page-2-display-value.children"],
],
["ID not found in layout", ['"submit-button"', "output-state.children"]],
[
"ID not found in layout",
['"page-2-display-value"', "page-2-display-value.children"],
],
["ID not found in layout", ['"output-state"', "output-state.children"]],
]
check_errors(dash_duo, specs)
@pytest.mark.parametrize("validation", ("function", "attribute", "suppress"))
def test_dvcv015_multipage_validation_layout(validation, dash_duo):
app = multipage_app(validation)
dash_duo.start_server(app, **debugging)
dash_duo.wait_for_text_to_equal("#index_p1", 'Navigate to "/page-1"')
dash_duo.find_element("#index_p1").click()
dash_duo.find_element("#submit-button").click()
dash_duo.wait_for_text_to_equal(
"#output-state",
"The Button has been pressed 1 times,"
'Input 1 is "Montreal",and Input 2 is "Canada"',
)
dash_duo.find_element("#p1_p2").click()
dash_duo.wait_for_text_to_equal("#page-2-display-value", 'You have selected "LA"')
assert not dash_duo.get_logs()
def test_dvcv016_circular_with_input_output(dash_duo):
app = Dash(__name__)
app.layout = html.Div(
[html.Div([], id="a"), html.Div(["Bye"], id="b"), html.Div(["Hello"], id="c")]
)
@app.callback(
[Output("a", "children"), Output("b", "children")],
[Input("a", "children"), Input("b", "children"), Input("c", "children")],
)
def c1(a, b, c):
return a, b
@app.callback(Output("c", "children"), [Input("a", "children")])
def c2(children):
return children
dash_duo.start_server(app, **debugging)
specs = [
[
"Circular Dependencies",
[
"Dependency Cycle Found:",
"a.children__output -> c.children",
"c.children -> a.children__output",
],
]
]
check_errors(dash_duo, specs)
| {
"content_hash": "eaf3d2f29142767a0413ccdbecf64101",
"timestamp": "",
"source": "github",
"line_count": 821,
"max_line_length": 88,
"avg_line_length": 30.89890377588307,
"alnum_prop": 0.5056764427625354,
"repo_name": "plotly/dash",
"id": "0e758a6889bfb2911ec0e8eb0604b7e899ace913",
"size": "25368",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/integration/devtools/test_callback_validation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17191"
},
{
"name": "HTML",
"bytes": "1729"
},
{
"name": "JavaScript",
"bytes": "638735"
},
{
"name": "Less",
"bytes": "22320"
},
{
"name": "Python",
"bytes": "1304969"
},
{
"name": "Shell",
"bytes": "224"
},
{
"name": "TypeScript",
"bytes": "840257"
}
],
"symlink_target": ""
} |
from gcp_common import BaseTest
class AppEngineAppTest(BaseTest):
def test_app_query(self):
project_id = 'cloud-custodian'
app_name = 'apps/{}'.format(project_id)
session_factory = self.replay_flight_data(
'app-engine-query', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-dryrun',
'resource': 'gcp.app-engine'},
session_factory=session_factory)
resources = policy.run()
self.assertEqual(resources[0]['name'], app_name)
def test_app_get(self):
project_id = 'cloud-custodian'
app_name = 'apps/' + project_id
session_factory = self.replay_flight_data(
'app-engine-get', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-dryrun',
'resource': 'gcp.app-engine'},
session_factory=session_factory)
resource = policy.resource_manager.get_resource(
{'resourceName': app_name})
self.assertEqual(resource['name'], app_name)
class AppEngineCertificateTest(BaseTest):
def test_certificate_query(self):
project_id = 'cloud-custodian'
app_name = 'apps/{}'.format(project_id)
certificate_id = '12277184'
certificate_name = '{}/authorizedCertificates/{}'.format(app_name, certificate_id)
session_factory = self.replay_flight_data(
'app-engine-certificate-query', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-certificate-dryrun',
'resource': 'gcp.app-engine-certificate'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resources = policy.run()
self.assertEqual(resources[0]['name'], certificate_name)
self.assertEqual(resources[0][parent_annotation_key]['name'], app_name)
def test_certificate_get(self):
project_id = 'cloud-custodian'
app_name = 'apps/' + project_id
certificate_id = '12277184'
certificate_name = '{}/authorizedCertificates/{}'.format(app_name, certificate_id)
session_factory = self.replay_flight_data(
'app-engine-certificate-get', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-certificate-dryrun',
'resource': 'gcp.app-engine-certificate'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resource = policy.resource_manager.get_resource(
{'resourceName': certificate_name})
self.assertEqual(resource['name'], certificate_name)
self.assertEqual(resource[parent_annotation_key]['name'], app_name)
class AppEngineDomainTest(BaseTest):
def test_domain_query(self):
project_id = 'cloud-custodian'
app_name = 'apps/{}'.format(project_id)
domain_id = 'gcp-li.ga'
domain_name = '{}/authorizedDomains/{}'.format(app_name, domain_id)
session_factory = self.replay_flight_data(
'app-engine-domain-query', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-domain-dryrun',
'resource': 'gcp.app-engine-domain'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resources = policy.run()
self.assertEqual(resources[0]['name'], domain_name)
self.assertEqual(resources[0][parent_annotation_key]['name'], app_name)
class AppEngineDomainMappingTest(BaseTest):
def test_domain_mapping_query(self):
project_id = 'cloud-custodian'
app_name = 'apps/{}'.format(project_id)
domain_mapping_id = 'alex.gcp-li.ga'
domain_mapping_name = '{}/domainMappings/{}'.format(app_name, domain_mapping_id)
session_factory = self.replay_flight_data(
'app-engine-domain-mapping-query', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-domain-mapping-dryrun',
'resource': 'gcp.app-engine-domain-mapping'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resources = policy.run()
self.assertEqual(resources[0]['name'], domain_mapping_name)
self.assertEqual(resources[0][parent_annotation_key]['name'], app_name)
def test_domain_mapping_get(self):
project_id = 'cloud-custodian'
app_name = 'apps/' + project_id
domain_mapping_id = 'alex.gcp-li.ga'
domain_mapping_name = '{}/domainMappings/{}'.format(app_name, domain_mapping_id)
session_factory = self.replay_flight_data(
'app-engine-domain-mapping-get', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-domain-mapping-dryrun',
'resource': 'gcp.app-engine-domain-mapping'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resource = policy.resource_manager.get_resource(
{'resourceName': domain_mapping_name})
self.assertEqual(resource['name'], domain_mapping_name)
self.assertEqual(resource[parent_annotation_key]['name'], app_name)
class AppEngineFirewallIngressRuleTest(BaseTest):
def test_firewall_ingress_rule_query(self):
project_id = 'cloud-custodian'
app_name = 'apps/{}'.format(project_id)
rule_priority = 2147483647
session_factory = self.replay_flight_data(
'app-engine-firewall-ingress-rule-query', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-firewall-ingress-rule-dryrun',
'resource': 'gcp.app-engine-firewall-ingress-rule'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resources = policy.run()
self.assertEqual(resources[0]['priority'], rule_priority)
self.assertEqual(resources[0][parent_annotation_key]['name'], app_name)
def test_firewall_ingress_rule_get(self):
project_id = 'cloud-custodian'
app_name = 'apps/{}'.format(project_id)
rule_priority = 2147483647
rule_priority_full = '{}/firewall/ingressRules/{}'.format(app_name, rule_priority)
session_factory = self.replay_flight_data(
'app-engine-firewall-ingress-rule-get', project_id=project_id)
policy = self.load_policy(
{'name': 'gcp-app-engine-firewall-ingress-rule-dryrun',
'resource': 'gcp.app-engine-firewall-ingress-rule'},
session_factory=session_factory)
parent_annotation_key = policy.resource_manager.resource_type.get_parent_annotation_key()
resource = policy.resource_manager.get_resource(
{'resourceName': rule_priority_full})
self.assertEqual(resource['priority'], rule_priority)
self.assertEqual(resource[parent_annotation_key]['name'], app_name)
| {
"content_hash": "a71c5a437144eea62b80692d2eacdef8",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 97,
"avg_line_length": 42.358381502890175,
"alnum_prop": 0.6368722707423581,
"repo_name": "thisisshi/cloud-custodian",
"id": "8e616529d0b286522cb4896a5be2338bb6198709",
"size": "7408",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/c7n_gcp/tests/test_appengine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2126"
},
{
"name": "Go",
"bytes": "146637"
},
{
"name": "HCL",
"bytes": "62085"
},
{
"name": "Jinja",
"bytes": "19775"
},
{
"name": "Makefile",
"bytes": "14242"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "6684814"
},
{
"name": "Shell",
"bytes": "15323"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
} |
import numpy as np
from .query import PWEQuery
from .helper import pw_slicer
class PWEFeatureCalculation:
@staticmethod
def custom_feature_calculation(pws_rel_dfs, rel_schemas, pw_objs, func=lambda pw_dfs, rel_schemas, pw_obj: 0):
features = {}
for pw_obj in pw_objs:
pw_id = pw_obj.pw_id
dfs, _ = pw_slicer(pws_rel_dfs, None, [pw_id])
features[pw_id] = func(dfs, rel_schemas, pw_obj)
return features
@staticmethod
def euler_complexity_analysis(expected_pws, dfs, rl_name, col_name, pws_to_consider: list = None, do_print=True):
if not pws_to_consider:
pws_to_consider = [j for j in range(1, expected_pws + 1)]
complexities = np.zeros(len(pws_to_consider))
for i, pw in enumerate(pws_to_consider):
complexities[i] = PWEQuery.freq(expected_pws, dfs, rl_name, [col_name], ['"><"'], [pw], False)[1][0]
if np.max(complexities) != np.min(complexities):
complexities = (complexities - np.min(complexities)) / (np.max(complexities) - np.min(complexities))
if do_print:
paired_pw_compl = list(zip(pws_to_consider, complexities))
paired_pw_compl = sorted(paired_pw_compl, key=lambda x: x[1], reverse=True)
print('PWs: ', str([x[0] for x in paired_pw_compl]))
print('Complexities:', str([x[1].round(2) for x in paired_pw_compl]))
return complexities
| {
"content_hash": "20ae49080bb2a34fabd1a550c2d87706",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 117,
"avg_line_length": 39.62162162162162,
"alnum_prop": 0.6084583901773534,
"repo_name": "idaks/PW-explorer",
"id": "00fac08395e5cf392bb8e453aecfdf6cdf80883c",
"size": "1490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PW_explorer/pw_feature_calc.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "3147"
},
{
"name": "Dockerfile",
"bytes": "631"
},
{
"name": "Python",
"bytes": "247807"
}
],
"symlink_target": ""
} |
import functools
from wadl2rst.nodes.base import BaseNode
from wadl2rst.nodes.parameters import ParametersNode
def collapse_resources(tree):
""" In the input wadl, the resource uris are split out into a nested
structure with each resource slug having it's own level. For the output,
we only care about the methods that can be done on resources. """
# grab all the resources nodes with method nodes under them.
resource_nodes = []
resource_visitor = functools.partial(get_resource_nodes, resource_nodes)
tree.visit(resource_visitor)
# get resource types and add methods to resource nodes
resource_type_nodes = []
resource_type_visitor = functools.partial(get_resource_type_nodes, resource_type_nodes)
tree.visit(resource_type_visitor)
# get resource types and add methods to resource nodes
# append method nodes to the appropriate resource node
for rtnode in resource_type_nodes:
# remove the resource_type node from the tree
# this prevents an id conflict down the line
rtnode.parent.remove_child(rtnode)
# extract method node
for child in rtnode.children:
if child.name == "method":
method_node = child
# look up resource node
# this is really inefficient but I can't figure out how to get the node while keeping it in scope
for rnode in resource_nodes:
type_name = '#' + rtnode.attributes['id']
found_node = find_resource_nodes_with_type(type_name, rnode)
if not found_node:
for rn in rnode.children:
found_node = find_resource_nodes_with_type(type_name, rn)
if found_node:
break
if not found_node:
continue
# change the parent of the method node to the resource node
method_node.parent = found_node
# add the method node to the resource node's children
found_node.add_child(method_node)
# setup the path for each node properly
for node in resource_nodes:
setup_node_path(node)
# remove each node from the tree
for node in reversed(resource_nodes):
node.parent.remove_child(node)
node.parent = None
resources_node = tree.find_first("resources")
# if there is no <resources> node on the page, add one.
if resources_node is None:
resources_node = BaseNode(tree, "resources", {})
tree.add_child(resources_node)
resources_node.children = []
# setup the resources nodes in their proper place
for node in resource_nodes:
resources_node.add_child(node)
node.parent = resources_node
# remove any param nodes not nested in params
param_nodes = []
param_visitor = functools.partial(get_param_nodes, param_nodes)
tree.visit(param_visitor)
for node in set(param_nodes):
node.parent.remove_child(node)
node.parent = None
# remove any resource nodes with no method children
empty_resources = []
empty_resource_visitor = functools.partial(get_empty_resource_nodes, empty_resources)
tree.visit(empty_resource_visitor)
for node in empty_resources:
node.parent.remove_child(node)
def setup_node_path(node):
""" Prep this node to exist outside it's hierarchy. We'll need to give it
it's full path, and make sure it has references to the path params. """
current = node
path = []
params = []
while current:
# handle the path
if 'path' in current.attributes:
path.insert(0, current.attributes['path'].strip('/'))
# grab any params that exist
for child in current.children:
if child.name == "param":
params.insert(0, child)
# if the current node has a parent that is a resource too, keep going
if current.parent.name == "resource":
current = current.parent
else:
current = None
# setup the path for this node
node.attributes['full_path'] = '/' + '/'.join(path)
if len(params) > 0:
params_node = ParametersNode(node, 'params', {})
node.children.insert(0, params_node)
for param in params:
clone = param.clone()
params_node.add_child(clone)
clone.parent = params_node
def get_param_nodes(memory, node):
if (node.name == "param") and (node.parent.name == "resource"):
memory.append(node)
def get_empty_resource_nodes(memory, node):
child_names = [child.name for child in node.children]
if (node.name == "resource") and ("method" not in child_names):
memory.append(node)
def get_resource_nodes(memory, node):
child_names = [child.name for child in node.children]
if (node.name == "resource") and ("method" in child_names):
memory.append(node)
def get_resource_type_nodes(memory, node):
child_names = [child.name for child in node.children]
if (node.name == "resource_type") and ("method" in child_names):
memory.append(node)
def find_resource_nodes_with_type(type_name, node):
if ("type" in node.attributes) and (node.attributes['type'] == type_name):
return node | {
"content_hash": "3f6d93cdbb97ab2b52f108480051dfc8",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 114,
"avg_line_length": 33.7625,
"alnum_prop": 0.62217697149204,
"repo_name": "annegentle/wadl2rst",
"id": "5c86a229ed26546c211d9153eb69f50389e82ba4",
"size": "5403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wadl2rst/transformations/collapse_resources.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "26080"
},
{
"name": "HTML",
"bytes": "1362"
},
{
"name": "JavaScript",
"bytes": "1273"
},
{
"name": "Python",
"bytes": "62349"
}
],
"symlink_target": ""
} |
from rosidl_adapter.parser import parse_service_string
from rosidl_adapter.resource import expand_template
def convert_srv_to_idl(package_dir, package_name, input_file, output_dir):
assert package_dir.is_absolute()
assert not input_file.is_absolute()
assert input_file.suffix == '.srv'
abs_input_file = package_dir / input_file
print(f'Reading input file: {abs_input_file}')
abs_input_file = package_dir / input_file
content = abs_input_file.read_text(encoding='utf-8')
srv = parse_service_string(package_name, input_file.stem, content)
output_file = output_dir / input_file.with_suffix('.idl').name
abs_output_file = output_file.absolute()
print(f'Writing output file: {abs_output_file}')
data = {
'pkg_name': package_name,
'relative_input_file': input_file.as_posix(),
'srv': srv,
}
expand_template('srv.idl.em', data, output_file, encoding='iso-8859-1')
return output_file
| {
"content_hash": "898150e3d71a5241c87c13cd16c3a61d",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 37.11538461538461,
"alnum_prop": 0.6787564766839378,
"repo_name": "ros2/rosidl",
"id": "c57b7013fc5052e0e3c72e4f4fdc330c314de8ad",
"size": "1567",
"binary": false,
"copies": "1",
"ref": "refs/heads/rolling",
"path": "rosidl_adapter/rosidl_adapter/srv/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148503"
},
{
"name": "C++",
"bytes": "460037"
},
{
"name": "CMake",
"bytes": "95353"
},
{
"name": "EmberScript",
"bytes": "154541"
},
{
"name": "Python",
"bytes": "266797"
},
{
"name": "Shell",
"bytes": "1728"
}
],
"symlink_target": ""
} |
import psycopg2
import sys
def drop_if_exists(tbl_name, conn):
"""
delete a table if exists
"""
cur = conn.cursor()
cur.execute("drop table if exists %s" % tbl_name)
conn.commit()
def summarize(conn, tbl_name):
cur = conn.cursor()
cur.execute("select * from %s" % tbl_name)
tree = cur.fetchall()
print "=" * 30
print "edges in minimum spanning tree"
print "=" * 30
print "src_id\tdst_id\tweight"
for i in range(len(tree)):
src_id = tree[i][0]
dst_id = tree[i][1]
weight = tree[i][2]
print "%d\t%d\t%f" %(src_id, dst_id, weight)
def mst(conn, edge_table, dataset):
"""
Prim's algorithm
"""
print "Calculating Minimum Spanning Tree"
cur = conn.cursor()
target_table = "mst_" + dataset
node_table = "node_mst"
tmp_table = "tmp_table"
drop_if_exists(target_table, conn)
drop_if_exists(node_table, conn)
drop_if_exists(tmp_table, conn)
#cur.execute("drop index if exists e_index")
#cur.execute("create index e_index on %s(src_id)" % edge_table)
cur.execute("create table %s(src_id int, dst_id int, weight float)" % target_table)
cur.execute("create table %s(nid int)" % node_table)
cur.execute("create table %s(src_id int, dst_id int, weight float)" % tmp_table);
conn.commit()
cur.execute('select count(distinct src_id)from %s' % edge_table)
num_nodes = cur.fetchone()[0]
# randomly insert an initial node
cur.execute('insert into %s select src_id from %s limit 1' % (node_table, edge_table))
conn.commit()
for i in range(num_nodes - 1):
print "iteration %d" % (i + 1)
cur.execute("""insert into %s select src_id, dst_id, weight from %s as A, %s as B
where A.src_id = B.nid AND A.dst_id not in (select nid from %s) order by weight limit 1""" % (tmp_table, edge_table, node_table, node_table))
cur.execute('insert into %s select dst_id from %s' % (node_table, tmp_table))
cur.execute('insert into %s select * from %s' % (target_table, tmp_table))
cur.execute('delete from %s' % tmp_table)
conn.commit()
summarize(conn, target_table)
print "done"
if __name__ == "__main__":
conn = psycopg2.connect(database="mydb", host="127.0.0.1")
mst(conn, sys.argv[1], sys.argv[2])
| {
"content_hash": "9bd394f9d524219f811add82951799d6",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 144,
"avg_line_length": 33.13846153846154,
"alnum_prop": 0.6573816155988857,
"repo_name": "spininertia/graph-mining-rdbms",
"id": "096184dacc5d73125aeb6bd4c1615ad6fe64f00f",
"size": "2154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/phase-3/src/mst/mst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106216"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
} |
import json
import logging
from io import BytesIO
from urllib.parse import urlencode
import tornado
from tornado import gen
from tornado.httpclient import HTTPRequest, HTTPResponse
from tornado.httputil import HTTPHeaders
from smart_sentinel.tornado_client import TornadoStrictRedis
logger = logging.getLogger(__name__)
class StaleHTTPClient(object):
def __init__(self, cache=None, client=None,
primary_key_prefix='primary_http',
stale_key_prefix='stale_http',
ttl=5, stale_ttl=None):
self.cache = cache or TornadoStrictRedis()
self.client = client or tornado.httpclient.AsyncHTTPClient()
self.primary_key_prefix = primary_key_prefix
self.stale_key_prefix = stale_key_prefix
self.ttl = ttl
self.stale_ttl = stale_ttl
@gen.coroutine
def fetch(self, request, vary=None, **kwargs):
should_raise_error = kwargs.pop('raise_error', True)
ttl = kwargs.pop('ttl', self.ttl)
stale_ttl = kwargs.pop('stale_ttl', self.stale_ttl)
# Convert to HTTPRequest if fetching a URL
if not isinstance(request, HTTPRequest):
request = HTTPRequest(url=request, **kwargs)
# Try the primary cache
cached_response = yield self.get_primary_cache(request, vary=vary)
if cached_response is not None:
raise gen.Return(cached_response)
# Get the real response
real_response = yield self.client.fetch(
request, raise_error=False, **kwargs)
# Set cache and return on success
if real_response.error is None:
yield self.set_cache(request, vary, real_response, ttl, stale_ttl)
raise gen.Return(real_response)
# Response failed, try the stale cache
stale_response = yield self.get_stale_cache(request, vary=vary)
if stale_response is not None:
raise gen.Return(stale_response)
# No stale, return or throw error
if should_raise_error:
real_response.rethrow()
raise gen.Return(real_response)
def get_key(self, request, vary):
vary = vary or []
vary_headers = {
k.lower(): v for k, v in request.headers.items() if k in vary}
return request.url + "#" + urlencode(vary_headers)
def get_primary_key(self, request, vary):
return '%s:%s' % (self.primary_key_prefix, self.get_key(request, vary))
def get_stale_key(self, request, vary):
return '%s:%s' % (self.stale_key_prefix, self.get_key(request, vary))
@gen.coroutine
def get_cache(self, request, key):
raw_data = yield self.cache.get(key)
if raw_data is None:
return None
logger.debug('Loaded cache: %s', key)
response = self.deserialize_response(request, raw_data)
return response
@gen.coroutine
def get_primary_cache(self, request, vary):
key = self.get_primary_key(request, vary)
result = yield self.get_cache(request, key)
return result
@gen.coroutine
def get_stale_cache(self, request, vary):
key = self.get_stale_key(request, vary)
result = yield self.get_cache(request, key)
return result
@gen.coroutine
def set_cache(self, request, vary, response, ttl, stale_ttl):
primary_key = self.get_primary_key(request, vary)
stale_key = self.get_stale_key(request, vary)
logger.debug('Caching response: %s', request.url)
serialized_response = self.serialize_response(request, response)
pipe = yield self.cache.pipeline()
with pipe:
microseconds = int(ttl * 1000)
pipe.set(primary_key, serialized_response, px=microseconds)
microseconds = stale_ttl and int(stale_ttl * 1000)
pipe.set(stale_key, serialized_response, px=microseconds)
pipe.execute()
def serialize_response(self, request, response):
return json.dumps({
'headers': dict(response.headers),
'body': response.body.decode(),
'code': response.code,
})
def deserialize_response(self, request, raw_data):
data = json.loads(raw_data, encoding='utf-8')
buffer = BytesIO(bytes(data['body'], 'utf-8'))
request.headers = HTTPHeaders(request.headers)
return HTTPResponse(
request=request,
headers=HTTPHeaders(data['headers']),
code=data['code'],
buffer=buffer,
)
| {
"content_hash": "3ab53c0abb9def8998d20b019938bef9",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 79,
"avg_line_length": 34.61832061068702,
"alnum_prop": 0.6227122381477398,
"repo_name": "globocom/tornado-stale-client",
"id": "8e44a87c983edabaad54f127bfb75680a9c9f850",
"size": "4800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornado_stale_client/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "454"
},
{
"name": "Python",
"bytes": "15794"
}
],
"symlink_target": ""
} |
"""
This file contains classes and functions for representing,
solving, and simulating agents who must allocate their resources
among consumption, saving in a risk-free asset (with a low return),
and saving in a risky asset (with higher average return).
This file also demonstrates a "frame" model architecture.
"""
import numpy as np
from scipy.optimize import minimize_scalar
from copy import deepcopy
from HARK.frame import Frame, FrameAgentType, FrameModel
from HARK.ConsumptionSaving.ConsIndShockModel import LognormPermIncShk
from HARK.ConsumptionSaving.ConsPortfolioModel import (
init_portfolio,
PortfolioConsumerType,
)
from HARK.distribution import combine_indep_dstns, add_discrete_outcome_constant_mean
from HARK.distribution import (
IndexDistribution,
Lognormal,
MeanOneLogNormal,
Bernoulli # Random draws for simulating agents
)
from HARK.utilities import (
CRRAutility,
)
class PortfolioConsumerFrameType(FrameAgentType, PortfolioConsumerType):
"""
A consumer type with a portfolio choice, using Frame architecture.
A subclass of PortfolioConsumerType for now.
This is mainly to keep the _solver_ logic intact.
"""
def __init__(self, **kwds):
params = init_portfolio.copy()
params.update(kwds)
kwds = params
# Initialize a basic consumer type
PortfolioConsumerType.__init__(
self, **kwds
)
# Initialize a basic consumer type
FrameAgentType.__init__(
self, self.model, **kwds
)
self.shocks = {}
self.controls = {}
self.state_now = {}
def solve(self):
# Some contortions are needed here to make decision rule shaped objects
# out of the HARK solution objects
super().solve(self)
## TODO: make this a property of FrameAgentTypes or FrameModels?
self.decision_rules = {}
def decision_rule_Share_from_solution(solution_t):
def decision_rule_Share(Adjust, mNrm, Share):
Share = np.zeros(len(Adjust)) + np.nan
Share[Adjust] = solution_t.ShareFuncAdj(mNrm[Adjust])
Share[~Adjust] = solution_t.ShareFuncFxd(mNrm[~Adjust], Share[~Adjust])
return Share
return decision_rule_Share
def decision_rule_cNrm_from_solution(solution_t):
def decision_rule_cNrm(Adjust, mNrm, Share):
cNrm = np.zeros(len(Adjust)) + np.nan
cNrm[Adjust] = solution_t.cFuncAdj(mNrm[Adjust])
cNrm[~Adjust] = solution_t.cFuncFxd(
mNrm[~Adjust], Share[~Adjust]
)
return cNrm
return decision_rule_cNrm
self.decision_rules[('Share',)] = [decision_rule_Share_from_solution(sol) for sol in self.solution]
self.decision_rules[('cNrm',)] = [decision_rule_cNrm_from_solution(sol) for sol in self.solution]
# TODO: streamline this so it can draw the parameters from context
def birth_aNrmNow(self, N):
"""
Birth value for aNrmNow
"""
return Lognormal(
mu=self.aNrmInitMean,
sigma=self.aNrmInitStd,
seed=self.RNG.randint(0, 2 ** 31 - 1),
).draw(N)
# TODO: streamline this so it can draw the parameters from context
def birth_pLvlNow(self, N):
"""
Birth value for pLvlNow
"""
pLvlInitMeanNow = self.pLvlInitMean + np.log(
self.state_now["PlvlAgg"]
) # Account for newer cohorts having higher permanent income
return Lognormal(
pLvlInitMeanNow,
self.pLvlInitStd,
seed=self.RNG.randint(0, 2 ** 31 - 1)
).draw(N)
# maybe replace reference to init_portfolio to self.parameters?
model = FrameModel([
# todo : make an aggegrate value
Frame(('PermShkAgg',), ('PermGroFacAgg',),
transition = lambda PermGroFacAgg : (PermGroFacAgg,),
aggregate = True
),
Frame(
('PermShk'), None,
default = {'PermShk' : 1.0}, # maybe this is unnecessary because the shock gets sampled at t = 0
# this is discretized before it's sampled
transition = IndexDistribution(
Lognormal.from_mean_std,
{
'mean' : init_portfolio['PermGroFac'],
'std' : init_portfolio['PermShkStd']
}
).approx(
init_portfolio['PermShkCount'], tail_N=0
),
),
Frame(
('TranShk'), None,
default = {'TranShk' : 1.0}, # maybe this is unnecessary because the shock gets sampled at t = 0
transition = add_discrete_outcome_constant_mean(
IndexDistribution(
MeanOneLogNormal,
{
'sigma' : init_portfolio['TranShkStd']
}).approx(
init_portfolio['TranShkCount'], tail_N=0
),
p = init_portfolio['UnempPrb'], x = init_portfolio['IncUnemp']
)
),
Frame( ## TODO: Handle Risky as an Aggregate value
('Risky'), None,
transition = IndexDistribution(
Lognormal.from_mean_std,
{
'mean' : init_portfolio['RiskyAvg'],
'std' : init_portfolio['RiskyStd']
}
# seed=self.RNG.randint(0, 2 ** 31 - 1) : TODO: Seed logic
).approx(
init_portfolio['RiskyCount']
),
aggregate = True
),
Frame(
('Adjust'), None,
default = {'Adjust' : False},
transition = IndexDistribution(
Bernoulli,
{'p' : init_portfolio['AdjustPrb']},
# seed=self.RNG.randint(0, 2 ** 31 - 1) : TODO: Seed logic
) # self.t_cycle input implied
),
Frame(
('Rport'), ('Share', 'Risky', 'Rfree'),
transition = lambda Share, Risky, Rfree : (Share * Risky + (1.0 - Share) * Rfree,)
),
Frame(
('PlvlAgg'), ('PlvlAgg', 'PermShkAgg'),
default = {'PlvlAgg' : 1.0},
transition = lambda PlvlAgg, PermShkAgg : PlvlAgg * PermShkAgg,
aggregate = True
),
Frame(
('pLvl',),
('pLvl', 'PermShk'),
default = {'pLvl' : birth_pLvlNow},
transition = lambda pLvl, PermShk : (pLvl * PermShk,)
),
Frame(
('bNrm',),
('aNrm', 'Rport', 'PermShk'),
transition = lambda aNrm, Rport, PermShk: (Rport / PermShk) * aNrm
),
Frame(
('mNrm',),
('bNrm', 'TranShk'),
transition = lambda bNrm, TranShk : (bNrm + TranShk,)
),
Frame(
('Share'), ('Adjust', 'mNrm', 'Share'),
default = {'Share' : 0},
control = True
),
Frame(
('cNrm'), ('Adjust','mNrm','Share'),
control = True
),
Frame(
('U'), ('cNrm','CRRA'), ## Note CRRA here is a parameter not a state var
transition = lambda cNrm, CRRA : (CRRAutility(cNrm, CRRA),),
reward = True
),
Frame(
('aNrm'), ('mNrm', 'cNrm'),
default = {'aNrm' : birth_aNrmNow},
transition = lambda mNrm, cNrm : (mNrm - cNrm,)
),
Frame(
('aLvl'), ('aNrm', 'pLvl'),
transition = lambda aNrm, pLvl : (aNrm * pLvl,)
)
],
init_portfolio)
| {
"content_hash": "63f43f6a6c56d09a6d3ae70620636367",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 108,
"avg_line_length": 34.01310043668122,
"alnum_prop": 0.5331878289896007,
"repo_name": "econ-ark/HARK",
"id": "c17b23b7bf32e263df3a3924993364772368494a",
"size": "7789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "HARK/ConsumptionSaving/ConsPortfolioFrameModel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "111"
},
{
"name": "Python",
"bytes": "1397750"
},
{
"name": "Shell",
"bytes": "350"
}
],
"symlink_target": ""
} |
import warnings
import cx_Oracle
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type objects to Django Field types.
data_types_reverse = {
cx_Oracle.BLOB: 'BinaryField',
cx_Oracle.CLOB: 'TextField',
cx_Oracle.DATETIME: 'DateField',
cx_Oracle.FIXED_CHAR: 'CharField',
cx_Oracle.NCLOB: 'TextField',
cx_Oracle.NUMBER: 'DecimalField',
cx_Oracle.STRING: 'CharField',
cx_Oracle.TIMESTAMP: 'DateTimeField',
}
try:
data_types_reverse[cx_Oracle.NATIVE_FLOAT] = 'FloatField'
except AttributeError:
pass
try:
data_types_reverse[cx_Oracle.UNICODE] = 'CharField'
except AttributeError:
pass
cache_bust_counter = 1
def get_field_type(self, data_type, description):
# If it's a NUMBER with scale == 0, consider it an IntegerField
if data_type == cx_Oracle.NUMBER:
precision, scale = description[4:6]
if scale == 0:
if precision > 11:
return 'BigIntegerField'
elif precision == 1:
return 'BooleanField'
else:
return 'IntegerField'
elif scale == -127:
return 'FloatField'
return super(DatabaseIntrospection, self).get_field_type(data_type, description)
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("SELECT TABLE_NAME, 't' FROM USER_TABLES UNION ALL "
"SELECT VIEW_NAME, 'v' FROM USER_VIEWS")
return [TableInfo(row[0].lower(), row[1]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# user_tab_columns gives data default for columns
cursor.execute("""
SELECT column_name, data_default
FROM user_tab_cols
WHERE table_name = UPPER(%s)""", [table_name])
columns_default = {column: default if default != 'NULL' else None for column, default in cursor.fetchall()}
self.cache_bust_counter += 1
cursor.execute("SELECT * FROM {} WHERE ROWNUM < 2 AND {} > 0".format(
self.connection.ops.quote_name(table_name),
self.cache_bust_counter))
description = []
for desc in cursor.description:
name = force_text(desc[0]) # cx_Oracle always returns a 'str' on both Python 2 and 3
default = columns_default[name]
name = name % {} # cx_Oracle, for some reason, doubles percent signs.
description.append(FieldInfo(*(name.lower(),) + desc[1:] + (default,)))
return description
def table_name_converter(self, name):
"Table name comparison is case insensitive under Oracle"
return name.lower()
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return {d[0]: i for i, d in enumerate(self.get_table_description(cursor, table_name))}
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
table_name = table_name.upper()
cursor.execute("""
SELECT ta.column_name, tb.table_name, tb.column_name
FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb,
user_tab_cols ta, user_tab_cols tb
WHERE user_constraints.table_name = %s AND
ta.table_name = user_constraints.table_name AND
ta.column_name = ca.column_name AND
ca.table_name = ta.table_name AND
user_constraints.constraint_name = ca.constraint_name AND
user_constraints.r_constraint_name = cb.constraint_name AND
cb.table_name = tb.table_name AND
cb.column_name = tb.column_name AND
ca.position = cb.position""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[0].lower()] = (row[2].lower(), row[1].lower())
return relations
def get_key_columns(self, cursor, table_name):
cursor.execute("""
SELECT ccol.column_name, rcol.table_name AS referenced_table, rcol.column_name AS referenced_column
FROM user_constraints c
JOIN user_cons_columns ccol
ON ccol.constraint_name = c.constraint_name
JOIN user_cons_columns rcol
ON rcol.constraint_name = c.r_constraint_name
WHERE c.table_name = %s AND c.constraint_type = 'R'""", [table_name.upper()])
return [tuple(cell.lower() for cell in row)
for row in cursor.fetchall()]
def get_indexes(self, cursor, table_name):
warnings.warn(
"get_indexes() is deprecated in favor of get_constraints().",
RemovedInDjango21Warning, stacklevel=2
)
sql = """
SELECT LOWER(uic1.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1 ELSE 0
END AS is_primary_key,
CASE user_indexes.uniqueness
WHEN 'UNIQUE' THEN 1 ELSE 0
END AS is_unique
FROM user_constraints, user_indexes, user_ind_columns uic1
WHERE user_constraints.constraint_type (+) = 'P'
AND user_constraints.index_name (+) = uic1.index_name
AND user_indexes.uniqueness (+) = 'UNIQUE'
AND user_indexes.index_name (+) = uic1.index_name
AND uic1.table_name = UPPER(%s)
AND uic1.column_position = 1
AND NOT EXISTS (
SELECT 1
FROM user_ind_columns uic2
WHERE uic2.index_name = uic1.index_name
AND uic2.column_position = 2
)
"""
cursor.execute(sql, [table_name])
indexes = {}
for row in cursor.fetchall():
indexes[row[0]] = {'primary_key': bool(row[1]),
'unique': bool(row[2])}
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the constraints, getting PKs, uniques, and checks
cursor.execute("""
SELECT
user_constraints.constraint_name,
LOWER(cols.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1
ELSE 0
END AS is_primary_key,
CASE
WHEN EXISTS (
SELECT 1
FROM user_indexes
WHERE user_indexes.index_name = user_constraints.index_name
AND user_indexes.uniqueness = 'UNIQUE'
)
THEN 1
ELSE 0
END AS is_unique,
CASE user_constraints.constraint_type
WHEN 'C' THEN 1
ELSE 0
END AS is_check_constraint,
CASE
WHEN user_constraints.constraint_type IN ('P', 'U') THEN 1
ELSE 0
END AS has_index
FROM
user_constraints
LEFT OUTER JOIN
user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name
WHERE
user_constraints.constraint_type = ANY('P', 'U', 'C')
AND user_constraints.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, pk, unique, check, index in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": pk,
"unique": unique,
"foreign_key": None,
"check": check,
"index": index, # All P and U come with index
}
# Record the details
constraints[constraint]['columns'].append(column)
# Foreign key constraints
cursor.execute("""
SELECT
cons.constraint_name,
LOWER(cols.column_name) AS column_name,
LOWER(rcons.table_name),
LOWER(rcols.column_name)
FROM
user_constraints cons
INNER JOIN
user_constraints rcons ON cons.r_constraint_name = rcons.constraint_name
INNER JOIN
user_cons_columns rcols ON rcols.constraint_name = rcons.constraint_name
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'R' AND
cons.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, other_table, other_column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": (other_table, other_column),
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
cols.index_name, LOWER(cols.column_name), cols.descend,
LOWER(ind.index_type)
FROM
user_ind_columns cols, user_indexes ind
WHERE
cols.table_name = UPPER(%s) AND
NOT EXISTS (
SELECT 1
FROM user_constraints cons
WHERE cols.index_name = cons.index_name
) AND cols.index_name = ind.index_name
ORDER BY cols.column_position
""", [table_name])
for constraint, column, order, type_ in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"orders": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": False,
"index": True,
"type": 'btree' if type_ == 'normal' else type_,
}
# Record the details
constraints[constraint]['columns'].append(column)
constraints[constraint]['orders'].append(order)
return constraints
| {
"content_hash": "3b2392db5b5169b6e4b306a7628eaf5c",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 115,
"avg_line_length": 40.74911660777385,
"alnum_prop": 0.5445716267776621,
"repo_name": "yewang15215/django",
"id": "2d91cc049fae797e73b4ef08e47576554d7fb806",
"size": "11532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/db/backends/oracle/introspection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52508"
},
{
"name": "HTML",
"bytes": "173554"
},
{
"name": "JavaScript",
"bytes": "452093"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12105199"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import json
import logging
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from vio.pub.msapi import extsys
from vio.pub.vim.vimapi.nova import OperateServers
from vio.swagger import nova_utils
from vio.pub.exceptions import VimDriverVioException
logger = logging.getLogger(__name__)
class ListServersView(APIView):
def post(self, request, vimid, tenantid):
try:
create_req = json.loads(request.body)
except Exception as e:
return Response(data={'error': 'Fail to decode request body.'},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
try:
vim_info = extsys.get_vim_by_id(vimid)
except VimDriverVioException as e:
return Response(data={'error': str(e)}, status=e.status_code)
data = {'vimid': vim_info['vimId'],
'vimName': vim_info['name'],
'username': vim_info['userName'],
'password': vim_info['password'],
'url': vim_info['url']}
rsp = {'vimId': vim_info['vimId'],
'vimName': vim_info['name'],
'tenantId': tenantid}
servers_op = OperateServers.OperateServers()
server_name = create_req.get('name', None)
server_id = create_req.get('id', None)
exist = False
try:
target = server_id or server_name
server = servers_op.find_server(data, tenantid, target)
# Find server only returns id and name, fetch all attributes again
if server:
server = servers_op.get_server(data, tenantid, server.id)
rsp['returnCode'] = 0
exist = True
else:
rsp['returnCode'] = 1
server = servers_op.create_server(data, tenantid, create_req)
except Exception as e:
if hasattr(e, "http_status"):
return Response(data={'error': str(e)}, status=e.http_status)
else:
return Response(data={'error': str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
server_dict = nova_utils.server_formatter(server)
rsp.update(server_dict)
if exist:
return Response(data=rsp, status=status.HTTP_200_OK)
else:
return Response(data=rsp, status=status.HTTP_202_ACCEPTED)
def get(self, request, vimid, tenantid):
try:
vim_info = extsys.get_vim_by_id(vimid)
except VimDriverVioException as e:
return Response(data={'error': str(e)}, status=e.status_code)
data = {'vimid': vim_info['vimId'],
'vimName': vim_info['name'],
'username': vim_info['userName'],
'password': vim_info['password'],
'url': vim_info['url']}
query = dict(request.query_params)
servers_op = OperateServers.OperateServers()
try:
servers = servers_op.list_servers(data, tenantid, **query)
except Exception as e:
if hasattr(e, "http_status"):
return Response(data={'error': str(e)}, status=e.http_status)
else:
return Response(data={'error': str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
servers_resp = []
for server in servers:
intfs = servers_op.list_server_interfaces(data, tenantid, server)
servers_resp.append(nova_utils.server_formatter(
server, interfaces=intfs))
rsp = {'vimId': vim_info['vimId'],
'vimName': vim_info['name'],
'servers': servers_resp}
return Response(data=rsp, status=status.HTTP_200_OK)
class GetServerView(APIView):
def get(self, request, vimid, tenantid, serverid):
try:
vim_info = extsys.get_vim_by_id(vimid)
except VimDriverVioException as e:
return Response(data={'error': str(e)}, status=e.status_code)
data = {'vimid': vim_info['vimId'],
'vimName': vim_info['name'],
'username': vim_info['userName'],
'password': vim_info['password'],
'url': vim_info['url']}
servers_op = OperateServers.OperateServers()
try:
server = servers_op.get_server(data, tenantid, serverid)
intfs = servers_op.list_server_interfaces(data, tenantid, server)
server_dict = nova_utils.server_formatter(server, interfaces=intfs)
except Exception as e:
if hasattr(e, "http_status"):
return Response(data={'error': str(e)}, status=e.http_status)
else:
return Response(data={'error': str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
rsp = {'vimId': vim_info['vimId'],
'vimName': vim_info['name'],
'tenantId': tenantid}
rsp.update(server_dict)
return Response(data=rsp, status=status.HTTP_200_OK)
def delete(self, request, vimid, tenantid, serverid):
servers_op = OperateServers.OperateServers()
try:
vim_info = extsys.get_vim_by_id(vimid)
except VimDriverVioException as e:
return Response(data={'error': str(e)}, status=e.status_code)
data = {'vimid': vim_info['vimId'],
'vimName': vim_info['name'],
'username': vim_info['userName'],
'password': vim_info['password'],
'url': vim_info['url']}
try:
servers_op.delete_server(data, tenantid, serverid)
except Exception as e:
if hasattr(e, "http_status"):
return Response(data={'error': str(e)}, status=e.http_status)
else:
return Response(data={'error': str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(status=status.HTTP_204_NO_CONTENT)
| {
"content_hash": "10afd6ae8f772e05199c10528c8d32f9",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 79,
"avg_line_length": 39.63636363636363,
"alnum_prop": 0.5576671035386632,
"repo_name": "johnsonlau/multivimdriver-vmware-vio",
"id": "966d5aacdbaab3ffeb22392d9b48dff4dfc954aa",
"size": "6584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vio/vio/swagger/views/server/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "145731"
},
{
"name": "Shell",
"bytes": "1658"
}
],
"symlink_target": ""
} |
from ..summary import Reader, Summarizer, NextKeyComposer, KeyValueComposer
from ..collector import ToTupleListWithDatasetColumn
from ..collector import WriteListToFile
from ..loop import Collector
##__________________________________________________________________||
def build_counter_collector_pair(tblcfg):
keyValComposer = KeyValueComposer(
keyAttrNames=tblcfg['keyAttrNames'],
binnings=tblcfg['binnings'],
keyIndices=tblcfg['keyIndices'],
valAttrNames=tblcfg['valAttrNames'],
valIndices=tblcfg['valIndices']
)
nextKeyComposer = NextKeyComposer(tblcfg['binnings']) if tblcfg['binnings'] is not None else None
summarizer = Summarizer(
Summary=tblcfg['summaryClass']
)
reader = Reader(
keyValComposer=keyValComposer,
summarizer=summarizer,
nextKeyComposer=nextKeyComposer,
weightCalculator=tblcfg['weight'],
nevents=tblcfg['nevents']
)
resultsCombinationMethod = ToTupleListWithDatasetColumn(
summaryColumnNames = tblcfg['keyOutColumnNames'] + tblcfg['valOutColumnNames']
)
deliveryMethod = WriteListToFile(tblcfg['outFilePath']) if tblcfg['outFile'] else None
collector = Collector(resultsCombinationMethod, deliveryMethod)
return reader, collector
##__________________________________________________________________||
| {
"content_hash": "d1506a1d51b7da46f6757c4987e5d8e1",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 101,
"avg_line_length": 41.515151515151516,
"alnum_prop": 0.6481751824817519,
"repo_name": "alphatwirl/alphatwirl",
"id": "880a3ea5db1957f846d73b768963a61ef3216a6a",
"size": "1406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alphatwirl/configure/build_counter_collector_pair.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3603"
},
{
"name": "Python",
"bytes": "775977"
},
{
"name": "R",
"bytes": "1222"
},
{
"name": "Shell",
"bytes": "28"
}
],
"symlink_target": ""
} |
import pyaudio
###################
# COMMAND GLOBALS #
###################
global COMMAND_NAME
COMMAND_NAME = 'jeeves' # We have a name!
global COMMAND_KEYWORDS
COMMAND_KEYWORDS = [COMMAND_NAME, 'record', 'talk', 'google']
#####################
# DEBUGGING GLOBALS #
#####################
global DEBUG_VERBOSITY
DEBUG_VERBOSITY = True
##################
# RECORD GLOBALS #
##################
global RECORD_TEMP_OUTPUT
RECORD_TEMP_OUTPUT = 'demo.wav'
global RECORD_THRESHOLD
RECORD_THRESHOLD = 500
global RECORD_CHUNK_SIZE
RECORD_CHUNK_SIZE = 1024
global RECORD_FORMAT
RECORD_FORMAT = pyaudio.paInt16
global RECORD_RATE
RECORD_RATE = 44100
global RECORD_CHANNELS
RECORD_CHANNELS = 1
#################
# ERROR globals #
#################
global ERROR_UNKNOWN_COMMAND
ERROR_UNKNOWN_COMMAND = -1 | {
"content_hash": "0e742d39d2ab4a85779be3e01ac6d5c8",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 61,
"avg_line_length": 20.53846153846154,
"alnum_prop": 0.6154806491885143,
"repo_name": "Baveau/jeeves",
"id": "c1ef18827afbb236b82bcc6adf16a55729f2e539",
"size": "801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Brain/Config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6147"
}
],
"symlink_target": ""
} |
import subprocess
from time import *
import sqlite3
import os
########################################################################################################################
DOOR_NUMBER = 101
NETWORK = "Airport"
PASSOWRD = "passpass"
IP_ADDRESS = "192.168.0.53"
SUB_MASK = "255.255.255.0"
ROUTER = "192.168.0.12"
DNS = "212.40.0.10"
########################################################################################################################
current_dir = os.path.dirname(os.path.abspath(__file__))
DBpath = os.path.join(current_dir, 'RFID_Lock.sqlite')
con = sqlite3.connect(DBpath)
########################################################################################################################
def readSqlite():
with con:
global personID, personName, personPIN, currentDate, currentTime, tagRecognized, dateIsSunday, doorNumber
c = con.cursor()
c.execute("SELECT * FROM seguridad_puerta")
row = c.fetchone()
ipNumber = row[5]
dnsNumber = row[9]
subnetNumber = row[10]
routerNumber = row[11]
wifiNetwork = row[12]
wifiPassword = row[13]
webPassword = row[14]
########################################################################################################################
def changeDNS(DNS):
textDNS = "sudo echo 'nameserver %s' > '/etc/resolv.conf'" % (DNS)
r = subprocess.Popen(textDNS, stdout=subprocess.PIPE, shell=True)
for line in r.stdout.readlines():
print line
r.communicate()
########################################################################################################################
def changeIP():
textIP = ["auto lo",
"",
"iface lo inet loopback",
"iface etho inet dhcp",
"",
"allow-hotplug wlan0",
"iface wlan0 inet static",
"wpa-ssid %s" % NETWORK,
"wpa-psk %s" % PASSOWRD,
"address %s" % IP_ADDRESS,
"netmask %s" % SUB_MASK,
"network 192.168.0.0",
"gateway %s" % ROUTER,
"",
"iface default inet dhcp"]
joinedString = '\n'.join(textIP)
print joinedString
file = current_dir + "/interfaces"
textFile = open(file, "w")
textFile.write(joinedString)
textFile.close()
shellCall = "sudo mv %s/interfaces /etc/network" % (current_dir)
r = subprocess.Popen(shellCall, stdout=subprocess.PIPE, shell=True)
r.communicate()
if r == "okay":
subprocess.Popen("reboot", stdout=subprocess.PIPE, shell=True)
########################################################################################################################
def installLib():
shellCalls = ["sudo apt-get update",
"sudo apt-get install python-setuptools",
"sudo apt-get install git-core",
"sudo apt-get install python-dev",
"sudo apt-get install python-pip",
"sudo easy_install -U pyserial",
"sudo easy_install -U RPIO",
"sudo easy_install -U cherrypy",
"sudo easy_install psutil"]
testCall = ["sudo easy_install -U pyserial"]
for i in range(len(testCall)):
r = subprocess.Popen(testCall[i], stdout=subprocess.PIPE, shell=True)
print testCall[i]
for line in r.stdout.readlines():
print line.strip()
r.communicate()
########################################################################################################################
def installGitCode():
textGit = ["cd ~",
"git clone https://github.com/pbernasconi/piLock.git",
"cd /piLock"]
for i in range(len(textGit)):
r = subprocess.Popen(textGit[i], stdout=subprocess.PIPE, shell=True)
########################################################################################################################
def createStartScript():
textSS = ["### BEGIN INIT INFO",
"# Provides: startScript",
"# Required-Start: $remote_fs $syslog",
"# Required-Stop: $remote_fs $syslog",
"# Default-Start: 2 3 4 5",
"# Default-Stop: 0 1 6",
"# Short-Description: Simple script to start a program at boot",
"# Description: A simple script from www.stuffaboutcode.com which will",
"### END INIT INFO",
"",
"case '$1' in",
" start)",
" echo 'Starting Script'",
" python %s/readRFID" % current_dir,
" bash %s/modDNS.sh" % current_dir,
" python %s/systemStats.py" % current_dir,
" ;;",
" stop)",
" echo 'stopping Script'",
" killall python",
" ;;",
" *)",
" echo 'usage ...'",
" exit 1",
" ;;",
"esac",
"",
"exit 0"]
joinedString = '\n'.join(textSS)
fileName = "startScript.sh"
file = current_dir + "/" + fileName
textFile = open(file, "w")
textFile.write(joinedString)
textFile.close()
commands = ["sudo mv %s/%s /etc/init.d/" % (current_dir, fileName),
"sudo chmod 755 /etc/init.d/%s" % (fileName),
"sudo update-rc.d %s defaults" % (fileName)]
for i in range(len(commands)):
r = subprocess.Popen(commands[i], stdout=subprocess.PIPE, shell=True)
print commands[i]
########################################################################################################################
def rebootPi():
command = "sudo reboot"
subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
########################################################################################################################
if __name__ == '__main__':
#readSqlite()
#changeDNS(DNS)
#changeIP()
#installLib()
#installGitCode()
createStartScript()
| {
"content_hash": "9ceac433ebae232de9e40724a04f7626",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 120,
"avg_line_length": 33.34054054054054,
"alnum_prop": 0.4275291828793774,
"repo_name": "maestromark55/bust-radio",
"id": "f4ff9921c496543865c2e429d9837e31e7a618ac",
"size": "6306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/editDNS.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4823"
},
{
"name": "HTML",
"bytes": "14212"
},
{
"name": "JavaScript",
"bytes": "34948"
},
{
"name": "Python",
"bytes": "38049"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class YpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="ypad", parent_name="mesh3d.colorbar", **kwargs):
super(YpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
| {
"content_hash": "02e2927eb34d8f4e0a28250ef667c4c6",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 84,
"avg_line_length": 36.583333333333336,
"alnum_prop": 0.6059225512528473,
"repo_name": "plotly/plotly.py",
"id": "7287f8f6de4c5ac5a4d4dd4393eac4433d918207",
"size": "439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/mesh3d/colorbar/_ypad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Constants for working with Chinese characters."""
from __future__ import unicode_literals
import sys
#: Character code ranges for pertinent CJK ideograph Unicode blocks.
characters = cjk_ideographs = (
'\u3007' # Ideographic number zero, see issue #17
'\u4E00-\u9FFF' # CJK Unified Ideographs
'\u3400-\u4DBF' # CJK Unified Ideographs Extension A
'\uF900-\uFAFF' # CJK Compatibility Ideographs
)
if sys.maxunicode > 0xFFFF:
characters += (
'\U00020000-\U0002A6DF' # CJK Unified Ideographs Extension B
'\U0002A700-\U0002B73F' # CJK Unified Ideographs Extension C
'\U0002B740-\U0002B81F' # CJK Unified Ideographs Extension D
'\U0002F800-\U0002FA1F' # CJK Compatibility Ideographs Supplement
)
#: Character code ranges for the Kangxi radicals and CJK Radicals Supplement.
radicals = (
'\u2F00-\u2FD5' # Kangxi Radicals
'\u2E80-\u2EF3' # CJK Radicals Supplement
)
#: A string containing Chinese punctuation marks (non-stops).
non_stops = (
# Fullwidth ASCII variants
'\uFF02\uFF03\uFF04\uFF05\uFF06\uFF07\uFF08\uFF09\uFF0A\uFF0B\uFF0C\uFF0D'
'\uFF0F\uFF1A\uFF1B\uFF1C\uFF1D\uFF1E\uFF20\uFF3B\uFF3C\uFF3D\uFF3E\uFF3F'
'\uFF40\uFF5B\uFF5C\uFF5D\uFF5E\uFF5F\uFF60'
# Halfwidth CJK punctuation
'\uFF62\uFF63\uFF64'
# CJK symbols and punctuation
'\u3000\u3001\u3003'
# CJK angle and corner brackets
'\u300B\u300C\u300D\u300E\u300F\u3010\u3011'
# CJK brackets and symbols/punctuation
'\u3014\u3015\u3016\u3017\u3018\u3019\u301A\u301B\u301C\u301D\u301E\u301F'
# Other CJK symbols
'\u3030'
# Special CJK indicators
'\u303E\u303F'
# Dashes
'\u2013\u2014'
# Quotation marks and apostrophe
'\u2018\u2019\u201B\u201C\u201D\u201E\u201F'
# General punctuation
'\u2026\u2027'
# Overscores and underscores
'\uFE4F'
)
#: A string of Chinese stops.
stops = (
'\uFF01' # Fullwidth exclamation mark
'\uFF1F' # Fullwidth question mark
'\uFF61' # Halfwidth ideographic full stop
'\u3002' # Ideographic full stop
)
#: A string containing all Chinese punctuation.
punctuation = non_stops + stops
# A sentence end is defined by a stop followed by zero or more
# container-closing marks (e.g. quotation or brackets).
_sentence_end = '[%(stops)s][」﹂”』’》)]}〕〗〙〛〉】]*' % {'stops': stops}
#: A regular expression pattern for a Chinese sentence. A sentence is defined
#: as a series of characters and non-stop punctuation marks followed by a stop
#: and zero or more container-closing punctuation marks (e.g. apostrophe or
# brackets).
sent = sentence = (
'[%(characters)s%(radicals)s%(non_stops)s]*%(sentence_end)s'
) % {'characters': characters, 'radicals': radicals, 'non_stops': non_stops,
'sentence_end': _sentence_end}
| {
"content_hash": "8c2e287205c26c7f7913941e8b74c480",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 78,
"avg_line_length": 32.35632183908046,
"alnum_prop": 0.6884547069271758,
"repo_name": "JohnnyZhao/zhon",
"id": "22d0436f1a595a1cdccfc569cb2853fcb93a05f6",
"size": "2869",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "zhon/hanzi.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import os
from warnings import warn
from collections import Mapping
from .. import yaml_io
import numpy as np
from .. Error import GroupMissingDataError
from . Group import Group, Descriptor
from . Scheme import GroupAdditivityScheme
from . DataDir import get_data_dir
class GroupLibrary(Mapping):
"""Represent library of contributing properties organized by group.
The set of properties that may be represented in a :class:`GroupLibrary` is
extensible. Contributing properties are represented by *property sets*
organized by group. See the manual for a list of available property sets.
.. note::
Because the *property set* system is extensible, the module within
which a particular *property set* is defined and registered must be
imported before loading a group library that contains data for that
type of *property set*.
To estimate properties, call :meth:`GroupLibrary.estimate()` with the
*property set* `name` and set of groups contained in the chemical
structure of interest. The properties estimate will be returned as an
object whose type depends on the particular *property set*.
To determine which groups are present in a particular chemical structure,
use :meth:`GroupLibrary.match_groups()`.
Data in multiple group libraries can be combined so long as the groups
they contain are defined within compatible schemes. See
:meth:`GroupLibrary.update()`.
"""
_property_set_estimator_types = {}
_property_set_group_yaml_types = {}
@classmethod
def register_property_set_type(cls, name, group_yaml_type, estimator_type):
"""(class method) Register new property set type.
Parameters
----------
name : str
Name of new property set type.
group_yaml_type : str
Name of property set type in the YAML type namespace.
estimator_type : class
The provided class is instantiated when an estimate is to be made
for a particular set of groups. The constructor should accept the
following parameters:
library : :class:`GroupLibrary`
library which is to be used to estimate these properties.
groups : mapping
Map from :class:`Group` to int or float specifying counts
of each group in the chemical structure.
"""
if name in cls._property_set_group_yaml_types:
raise KeyError('Property set %r already registered.' % name)
cls._property_set_group_yaml_types[name] = group_yaml_type
cls._property_set_estimator_types[name] = estimator_type
def __init__(self, scheme, contents={}, uq_contents={}, path=None):
"""Initialize library of contributing properties organized by group.
Parameters
----------
scheme : :class:`GroupScheme`
Specify group-additivity scheme to use.
contents : mapping or list
Define initial contents of the library either as mapping or list of
(`key`, `value`) pairs. See the last paragraph of the class
documentation for information on the format.
Other Parameters
----------------
path : str
File-system path library was loaded from or should be saved to by
default.
"""
self.scheme = scheme
self.path = path
if isinstance(contents, Mapping):
contents = list(contents.items())
self.contents = dict((group, property_sets)
for (group, property_sets) in contents)
self.uq_contents = uq_contents
def GetDescriptors(self, mol):
"""Determine groups appearing in chemical structure `chem`.
Parameters
----------
mol : :class:`rdkit.mol`
Specify chemical structure to match groups for.
manual_descriptors : mapping, optional
Specify value(s)/degree(s) of influence of additional descriptors
to include.
Returns
-------
groups : mapping
Map from :class:`Group` to int or float identifying groups and
their number of occurence in the structure.
"""
self.name = mol
return self.scheme.GetDescriptors(mol)
def Estimate(self, groups, property_set_name):
"""Estimate set of properties for chemical.
Parameters
----------
groups : mapping (dictionary)
Map from :class:`Group` to int or float specifying counts of each
group in the chemical structure.
property_set_name : str
Name of property set to estimate.
Returns
-------
estimated_properties : (varies)
The estimated properties, an object whose type depends on the
particular property set.
"""
if property_set_name not in self._property_set_estimator_types:
raise KeyError('Invalid property_set name: %r' % property_set_name)
# Verify groups present.
missing_groups = [group for group in groups
if property_set_name not in self[group]]
if missing_groups:
raise GroupMissingDataError(missing_groups, property_set_name)
estimator_type = self._property_set_estimator_types[property_set_name]
return estimator_type(self, groups)
def __contains__(self, group):
"""Test if this library contains contributing properties for `group`.
Parameters
----------
group : :class:`Group`
Group whose membership is being tested.
Returns
-------
result : bool
True if this library has properties for `group`.
"""
return group in self.contents
def __iter__(self):
"""Return iterator over all groups with property data in this library.
"""
return iter(self.contents)
def __len__(self):
"""Return number of groups with properties in this library."""
return len(self.contents)
def __getitem__(self, group):
"""Return contributing properties sets for `group`.
If no properties exist for `group`, then return ``{}`` instead of
raising an exception.
Parameters
----------
group : :class:`Group`
Identify group whose property sets are to be retrieved.
Returns
-------
property_sets : dict
Sets of contributing properties for `group`.
"""
return self.contents.get(group, {})
@classmethod
def Load(cls, path):
"""(class method) Load group-additivity library from file-system
`path` or builtin.
Parameters
----------
path : str
Specify either the path to a file containing the data or a symbolic
name of a builtin library to load (*e.g.* ``gas_benson`` to load
gas phase Benson groups.)
Returns
-------
lib : :class:`GroupLibrary`
Group library containing the loaded data.
"""
if os.sep not in path and '.' not in path and not os.path.exists(path):
# [JTF] where's our data directory?
base_path = os.path.join(get_data_dir(), path)
# We want to load the library.yaml in that directory:
path = os.path.join(base_path, 'library.yaml')
else:
# The base path is the directory containing whatever file/directory
# is referenced by path:
base_path = os.path.dirname(path)
# Load the scheme.yaml from the selected data directory:
scheme = GroupAdditivityScheme.Load(os.path.join(base_path,
'scheme.yaml'))
# Use that scheme to load the rest of the library:
return cls._do_load(path, base_path, scheme)
@classmethod
def _Load(cls, path, scheme):
if os.sep not in path and '.' not in path and not os.path.exists(path):
# [JTF] where's our data directory?
base_path = os.path.join(get_data_dir(), path)
# We want to load the library.yaml in that directory:
path = os.path.join(base_path, 'library.yaml')
else:
# The base path is the directory containing whatever file/directory
# is referenced by path:
base_path = os.path.dirname(path)
# Use the scheme passed to us to load the rest of the library:
return cls._do_load(path, base_path, scheme)
@classmethod
def _do_load(cls, path, base_path, scheme):
# Read data from file.
context = {'base_path': base_path}
with open(path) as f:
lib_data = yaml_io.load(
yaml_io.parse(f.read()), context, loader=cls._yaml_loader)
context['units'] = lib_data.units
group_properties = lib_data.groups
other_descriptor_properties = lib_data.other_descriptors
UQ = lib_data.UQ
if cls._property_set_group_yaml_types:
# Prepare property_sets loader.
property_sets_loader = yaml_io.make_object_loader(yaml_io.parse(
'\n'.join(('%r:\n type: %r\n optional: true'
% (str(name),
str(cls._property_set_group_yaml_types[name])))
for name in cls._property_set_group_yaml_types)))
# Read all properties.
lib_contents = {}
for name in group_properties:
group = Group.parse(scheme, name)
if group in lib_contents:
raise KeyError('Multiple definitions of group %s' % group)
property_sets = yaml_io.load(
group_properties[name], context,
loader=property_sets_loader)
lib_contents[group] = property_sets
for name in other_descriptor_properties:
descriptor = Descriptor(scheme, name)
if descriptor in lib_contents:
raise KeyError('Multiple definitions of descriptor %s' %
descriptor)
property_sets = yaml_io.load(
other_descriptor_properties[name], context,
loader=property_sets_loader)
lib_contents[descriptor] = property_sets
# Read UQ data
uq_contents = {}
if UQ:
uq_contents['RMSE'] = yaml_io.load(
UQ['RMSE'], context,
loader=property_sets_loader)
uq_contents['descriptors'] = UQ['InvCovMat']['groups']
uq_contents['mat'] = np.array(UQ['InvCovMat']['mat'])
uq_contents['dof'] = UQ['DOF']
else:
# No property sets defined.
warn('GroupLibrary.load(): No property sets defined.')
lib_contents = {}
uq_contents = {}
new_lib = cls(scheme, lib_contents, uq_contents, path=path)
# Update with included content.
for include_path in lib_data.include:
new_lib.Update(cls._Load(os.path.join(base_path,
include_path), scheme))
return new_lib
def Update(self, lib, overwrite=False):
"""Add complete contents of `lib` into this library.
Parameters
----------
lib : :class:`GroupLibrary`
Library to import from.
overwrite : bool
If True, then existing data may be overwritten by data from `lib`.
"""
for (group, other_property_sets) in list(lib.items()):
if group not in self.contents:
self.contents[group] = {}
property_sets = self.contents[group]
for name in other_property_sets:
if name not in property_sets:
property_sets[name] = other_property_sets[name].copy()
else:
property_sets[name].update(
other_property_sets[name], overwrite)
# UQ stuff can only be loaded once
if self.uq_contents and lib.uq_contents:
raise ValueError('More than one uncertainty quantification',
'information provided')
if not self.uq_contents:
self.uq_contents = lib.uq_contents
_yaml_loader = yaml_io.make_object_loader(yaml_io.parse("""
units:
type: mapping
default: {}
include:
type: list
item_type: string
default: []
groups:
type: mapping
default: {}
other_descriptors:
type: mapping
default: {}
UQ:
type: mapping
default: {}
"""))
| {
"content_hash": "20227b4e36be5b44fe11023a9f5ad8d6",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 79,
"avg_line_length": 37.51020408163265,
"alnum_prop": 0.5789678221669516,
"repo_name": "VlachosGroup/VlachosGroupAdditivity",
"id": "b5611acf134a542f37e86ba3aff1faedf03e5456",
"size": "12866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pgradd/GroupAdd/Library.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "107"
},
{
"name": "Python",
"bytes": "288237"
},
{
"name": "Shell",
"bytes": "91"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0003_auto_20151113_1545'),
]
operations = [
migrations.AlterModelOptions(
name='user_details',
options={'ordering': ['pk'], 'managed': True},
),
migrations.RemoveField(
model_name='user_details',
name='id',
),
migrations.AlterField(
model_name='user_details',
name='User_ID',
field=models.CharField(default=b'DMS_0000', max_length=8, serialize=False, primary_key=True),
),
]
| {
"content_hash": "539f7b1dc93088b89239d0840b411e8e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 105,
"avg_line_length": 26.307692307692307,
"alnum_prop": 0.5628654970760234,
"repo_name": "mdsafwan/Deal-My-Stuff",
"id": "c0273127b731c6b402920ec57ee96c91e7932dcd",
"size": "708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "login/migrations/0004_auto_20151114_0029.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "898"
},
{
"name": "C",
"bytes": "521537"
},
{
"name": "C++",
"bytes": "125678"
},
{
"name": "CSS",
"bytes": "127882"
},
{
"name": "HTML",
"bytes": "172987"
},
{
"name": "JavaScript",
"bytes": "256471"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "7186078"
}
],
"symlink_target": ""
} |
import sys
from leapp.exceptions import InvalidTopicDefinitionError
from leapp.utils.meta import get_flattened_subclasses, with_metaclass
class TopicMeta(type):
"""
Meta class for the registration of topics
"""
def __new__(mcs, name, bases, attrs):
klass = super(TopicMeta, mcs).__new__(mcs, name, bases, attrs)
setattr(sys.modules[mcs.__module__], name, klass)
setattr(klass, 'messages', ())
return klass
class Topic(with_metaclass(TopicMeta)):
""" Base class for all :ref:`topics <terminology:topic>`"""
name = None
""" Name of the topic """
messages = ()
"""
Tuple of :py:class:`leapp.models.Model` derived classes that are using this topic are automatically added to this
variable.
"""
class ErrorTopic(Topic):
"""
A special topic for errors during the execution.
"""
name = 'errors'
def get_topics():
"""
:return: All registered :py:class:`leapp.topics.Topic` derived classes
"""
topics = get_flattened_subclasses(Topic)
for topic in (topic for topic in topics):
topic_name = getattr(topic, 'name', None)
if not topic_name:
raise InvalidTopicDefinitionError('Topic {} does not contain a name attribute'.format(topic))
return topics
| {
"content_hash": "a208a936f71822d0b70af512938baf37",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 117,
"avg_line_length": 27.020833333333332,
"alnum_prop": 0.6468774094063223,
"repo_name": "vinzenz/prototype",
"id": "0331ec25ceef8a6f5cba9fb91d748223873eecea",
"size": "1297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leapp/topics/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1688"
},
{
"name": "HTML",
"bytes": "35793"
},
{
"name": "Makefile",
"bytes": "927"
},
{
"name": "PLpgSQL",
"bytes": "4262"
},
{
"name": "Python",
"bytes": "290041"
},
{
"name": "Ruby",
"bytes": "1363"
},
{
"name": "Shell",
"bytes": "1416"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from transfer.functions import run_transfer
class Command(BaseCommand):
help = 'Transfer children '
def handle(self, *args, **options):
run_transfer()
| {
"content_hash": "55d7555ffe69f138063b4dce541beb1c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 51,
"avg_line_length": 22.3,
"alnum_prop": 0.7219730941704036,
"repo_name": "mitrofun/kids2",
"id": "4a8cf61567a0ad83f25d2e03cc59b1e3e82771d7",
"size": "247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/transfer/management/commands/transfer_children.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3617"
},
{
"name": "HTML",
"bytes": "36917"
},
{
"name": "JavaScript",
"bytes": "17498"
},
{
"name": "Makefile",
"bytes": "473"
},
{
"name": "Python",
"bytes": "124231"
}
],
"symlink_target": ""
} |
from IPython import get_ipython
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import (HasFocus, HasSelection, ViInsertMode)
from prompt_toolkit.key_binding.vi_state import InputMode
ip = get_ipython()
def switch_mode(event):
" Map 'kj' to Escape. "
vi_state = event.cli.vi_state
vi_state.input_mode = InputMode.NAVIGATION
# Register the shortcut if IPython is using prompt_toolkit
if hasattr(ip, 'pt_app') and ip.pt_app:
insert_mode = ViInsertMode()
registry = ip.pt_app.key_bindings
registry.add_binding(
'k', 'j',
filter=(HasFocus(DEFAULT_BUFFER)
& ~HasSelection()
& insert_mode))(switch_mode)
# Add back standard readline-like mappings, even when using ViInsertMode
from prompt_toolkit.key_binding.bindings.named_commands import get_by_name
handle = registry.add
handle('c-a')(get_by_name('beginning-of-line'))
handle('c-b')(get_by_name('backward-char'))
handle('c-e')(get_by_name('end-of-line'))
handle('c-f')(get_by_name('forward-char'))
handle('c-left')(get_by_name('backward-word'))
handle('c-right')(get_by_name('forward-word'))
handle('escape', 'b')(get_by_name('backward-word'))
handle('escape', 'c', filter=insert_mode)(get_by_name('capitalize-word'))
handle('escape', 'd', filter=insert_mode)(get_by_name('kill-word'))
handle('escape', 'f')(get_by_name('forward-word'))
handle('escape', 'l', filter=insert_mode)(get_by_name('downcase-word'))
handle('escape', 'u', filter=insert_mode)(get_by_name('uppercase-word'))
handle('escape', 'y', filter=insert_mode)(get_by_name('yank-pop'))
handle('escape', 'backspace', filter=insert_mode)(get_by_name('backward-kill-word'))
handle('escape', '\\', filter=insert_mode)(get_by_name('delete-horizontal-space'))
| {
"content_hash": "848321ba4f4c931f4713386a97000ee3",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 88,
"avg_line_length": 42.86046511627907,
"alnum_prop": 0.6673901247965274,
"repo_name": "achalddave/dotfiles",
"id": "de9cdf36f5b29ba460448af4c32aa6238dc4a831",
"size": "1843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/ipython/mappings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "320"
},
{
"name": "Python",
"bytes": "25585"
},
{
"name": "Shell",
"bytes": "36383"
},
{
"name": "Vim Snippet",
"bytes": "5140"
},
{
"name": "Vim script",
"bytes": "28942"
}
],
"symlink_target": ""
} |
from wtforms import validators
from werkzeug.datastructures import FileStorage
import wtforms
def validate_required_iff(**kwargs):
"""
Used as a validator within a wtforms.Form
This implements a conditional DataRequired
Each of the kwargs is a condition that must be met in the form
Otherwise, no validation is done
"""
def _validator(form, field):
all_conditions_met = True
for key, value in kwargs.iteritems():
if getattr(form, key).data != value:
all_conditions_met = False
if all_conditions_met:
# Verify that data exists
if field.data is None \
or (isinstance(field.data, (str, unicode))
and not field.data.strip()) \
or (isinstance(field.data, FileStorage)
and not field.data.filename.strip()):
raise validators.ValidationError('This field is required.')
else:
# This field is not required, ignore other errors
field.errors[:] = []
raise validators.StopValidation()
return _validator
def validate_greater_than(fieldname):
"""
Compares the value of two fields the value of self is to be greater than the supplied field.
:param fieldname:
The name of the other field to compare to.
"""
def _validator(form, field):
try:
other = form[fieldname]
except KeyError:
raise validators.ValidationError(field.gettext(u"Invalid field name '%s'.") % fieldname)
if field.data != '' and field.data < other.data:
message = field.gettext(u'Field must be greater than %s.' % fieldname)
raise validators.ValidationError(message)
return _validator
class Tooltip(object):
"""
An HTML form tooltip.
"""
def __init__(self, field_id, for_name, text):
self.field_id = field_id
self.text = text
self.for_name = for_name
def __str__(self):
return self()
def __unicode__(self):
return self()
def __html__(self):
return self()
def __call__(self, text=None, **kwargs):
if 'for_' in kwargs:
kwargs['for'] = kwargs.pop('for_')
else:
kwargs.setdefault('for', self.field_id)
return wtforms.widgets.HTMLString(
('<span name="%s_explanation"'
' class="explanation-tooltip glyphicon glyphicon-question-sign"'
' data-container="body"'
' title="%s"'
' ></span>') % (self.for_name, self.text))
def __repr__(self):
return 'Tooltip(%r, %r)' % (self.field_id, self.for_name, self.text)
class Explanation(object):
"""
An HTML form explanation.
"""
def __init__(self, field_id, for_name, file):
self.field_id = field_id
self.file = file
self.for_name = for_name
def __str__(self):
return self()
def __unicode__(self):
return self()
def __html__(self):
return self()
def __call__(self, file=None, **kwargs):
if 'for_' in kwargs:
kwargs['for'] = kwargs.pop('for_')
else:
kwargs.setdefault('for', self.field_id)
import flask
from digits.webapp import app
html = ''
# get the text from the html file
with app.app_context():
html = flask.render_template(file if file else self.file)
if len(html) == 0: return ''
return wtforms.widgets.HTMLString(
('<div id="%s_explanation" style="display:none;">\n'
'%s'
'</div>\n'
'<a href=# onClick="bootbox.alert($(\'#%s_explanation\').html()); return false;"><span class="glyphicon glyphicon-question-sign"></span></a>\n'
) % (self.for_name, html, self.for_name))
def __repr__(self):
return 'Explanation(%r, %r)' % (self.field_id, self.for_name, self.file)
class IntegerField(wtforms.IntegerField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(IntegerField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class FloatField(wtforms.FloatField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(FloatField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class SelectField(wtforms.SelectField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(SelectField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class SelectMultipleField(wtforms.SelectMultipleField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(SelectMultipleField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class TextField(wtforms.TextField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(TextField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class StringField(wtforms.StringField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(StringField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class FileInput(object):
"""
Renders a file input chooser field.
"""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
return wtforms.widgets.HTMLString(
('<div class="input-group">' +
' <span class="input-group-btn">' +
' <span class="btn btn-info btn-file" %s>' +
' Browse…' +
' <input %s>' +
' </span>' +
' </span>' +
' <input class="form-control" %s readonly>' +
'</div>') % (wtforms.widgets.html_params(id=field.name + '_btn', name=field.name + '_btn'),
wtforms.widgets.html_params(name=field.name, type='file', **kwargs),
wtforms.widgets.html_params(id=field.id + '_text', name=field.name + '_text', type='text')))
class FileField(wtforms.FileField):
# Comment out the following line to use the native file input
widget = FileInput()
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(FileField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class TextAreaField(wtforms.TextAreaField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(TextAreaField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class BooleanField(wtforms.BooleanField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(BooleanField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
| {
"content_hash": "b9421038accb61c87ff72ed7680d7070",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 156,
"avg_line_length": 37.57272727272727,
"alnum_prop": 0.5952092910718606,
"repo_name": "delectable/DIGITS",
"id": "721e92575a541eb173ad19c862a46601836d7353",
"size": "8331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digits/utils/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1267"
},
{
"name": "HTML",
"bytes": "156266"
},
{
"name": "JavaScript",
"bytes": "108357"
},
{
"name": "Python",
"bytes": "471442"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
} |
from optparse import OptionParser
import docker
import logging
import json
from configuration.ganger_conf import GangerConfiguration
import util.utility as utility
from dns.ganger_dns import GangerDns
global docker_host
docker_client = None
docker_host = None
logger = logging.getLogger(__name__)
def get_client():
global docker_client
global docker_host
if docker_client == None:
docker_client = docker.Client(base_url = docker_host)
return docker_client
def start_dockers(docker_spec_list):
cli = get_client()
containers = cli.containers(all = True)
logger.debug("ps containers. %s" % json.dumps(containers, sort_keys=True, indent=2))
#containers = map(lambda r: json.load(r), result)
for spec in docker_spec_list:
results = utility.filter_contianers(containers, { "Names" : "/" + spec.name() })
logger.debug("filtered containers spec:%s " % results)
if (len(results) == 0):
# no containers are matched
# create and start it
img = spec.image() if (spec.tag() == None) else (spec.image() + ":" + spec.tag())
hname = spec.hostname()
logger.debug("docker hostname => %s" % hname)
docker = cli.create_container(image = img,
command = spec.command(), hostname = hname,
detach = True, name = spec.name(), mem_limit = spec.memory(),
cpu_shares = spec.cpu(), tty = True, stdin_open = True)
logger.info("container is created. Info: %s" % docker)
docker = cli.start(docker["Id"])
logger.info("docker is started. Info: %s" % docker)
else:
if (results[0]["Status"].find("Up") >= 0):
logger.info(("Container %s is started." % spec.name()))
else:
try:
cli.start(results[0]["Id"])
logger.info("start container %s successfully." % spec.name())
except docker.client.APIError as e:
logger.error("error when start container %s. %s" % spec.name(), e)
configure_docker_hostnames_to_dnsmasq()
def configure_docker_hostnames_to_dnsmasq():
hostname2ip_map = {}
cli = get_client()
try:
containers = cli.containers()
except docker.client.APIError as e:
logger.error("error when doing docker ps.")
return
for container in containers:
try:
container_info = cli.inspect_container(container)
hostname_ip = utility.parse_hostname_ip(container_info)
hostname2ip_map[hostname_ip[0]] = hostname_ip[1]
except docker.client.APIError as e:
logger.error("error when inspecting container:" + container["Id"])
GangerDns.generate_dnsmasq_host(hostname2ip_map)
GangerDns.restart_dns_masq()
if __name__ == "__main__":
parser = OptionParser(usage = "usage: %prog [options] arg")
parser.add_option("-d", "--dockers", dest="docker_description", action = "store",
help = "the file which describe docker container information",
metavar = "FILE")
parser.add_option("-l", "--loglevel", dest = "log_level",
default = "INFO", action = "store", help = "set log level",
metavar = "[DEBUG | INFO | WARN | ERROR]")
parser.add_option("-u", "--docker-url", dest = "url",
default = "unix:///var/run/docker.sock",
help = "host url of docker server",
metavar = "URI")
parser.add_option("-n", "--dns", dest = "update_dns", action= "store_true",
default = False,
help = "if this argument is set, update dnsmasq configuration")
parser.add_option("-i", "--init-dns", dest = "init_dns", action = "store_true",
default = False, help = "configure dnsmasq")
parser.add_option("-b", "--docker-bridge", dest = "bridge", action = "store",
default = "docker0", help = "the name of docker network bridge")
parser.add_option("-s", "--dns-server", dest = "dserver", action = "store",
help = "upstream dns server name")
#parser.add_option("", "")
(options, args) = parser.parse_args()
numeric_level = getattr(logging, options.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.log_level)
logging.basicConfig(level=numeric_level)
# set docker host
docker_host = options.url
if options.init_dns:
gangerdns = GangerDns(options.dserver, options.bridge)
gangerdns.configure_dnsmasq()
GangerDns.restart_dns_masq()
elif options.docker_description != None:
conf = GangerConfiguration(options.docker_description)
start_dockers(conf.get_docker_specs())
elif options.update_dns:
configure_docker_hostnames_to_dnsmasq()
| {
"content_hash": "b72818d40e23a8cd8f6ba0f409881abf",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 94,
"avg_line_length": 38.785714285714285,
"alnum_prop": 0.6028238182934316,
"repo_name": "GordonWang/ganger",
"id": "610e7d89a2ba51d0e3abc7208dcf2647bb85c7fc",
"size": "4887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ganger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17288"
}
],
"symlink_target": ""
} |
import numpy as np
from sklearn import cluster
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
from sklearn.metrics import homogeneity_score, completeness_score, v_measure_score, adjusted_rand_score, \
adjusted_mutual_info_score, silhouette_score
from sklearn.preprocessing import scale
digits = datasets.load_digits()
data = scale(digits.data)
X_train, X_test, y_train, y_test, images_train, images_test = train_test_split(data, digits.target, digits.images,
test_size=0.25, random_state=42)
n_samples, n_features = X_train.shape
print(n_samples)
print(n_features)
n_digits = len(np.unique(y_train))
clf = cluster.KMeans(init='k-means++', n_clusters=10, random_state=42)
clf.fit(X_train)
# Visualization code
# fig = plt.figure(figsize=(8, 3))
#
# fig.suptitle('Cluster Center Images', fontsize=14, fontweight='bold')
#
# for i in range(10):
# # Initialize subplots in a grid of 2X5, at i+1th position
# ax = fig.add_subplot(2, 5, 1 + i)
# # Display images
# ax.imshow(clf.cluster_centers_[i].reshape((8, 8)), cmap=plt.cm.binary)
# # Don't show the axes
# plt.axis('off')
#
# plt.show()
y_pred = clf.predict(X_test)
print(y_pred[:100])
print(y_test[:100])
print(clf.cluster_centers_.shape)
# X_iso = Isomap(n_neighbors=10).fit_transform(X_train)
#
# clusters = clf.fit_predict(X_train)
#
# fig, ax = plt.subplots(1, 2, figsize=(8, 4))
#
# fig.suptitle('Predicted Versus Training Labels', fontsize=14, fontweight='bold')
# fig.subplots_adjust(top=0.85)
#
# ax[0].scatter(X_iso[:, 0], X_iso[:, 1], c=clusters)
# ax[0].set_title('Predicted Training Labels')
# ax[1].scatter(X_iso[:, 0], X_iso[:, 1], c=y_train)
# ax[1].set_title('Actual Training Labels')
#
# plt.show()
print(metrics.confusion_matrix(y_test, y_pred))
print('% 9s' % 'inertia homo compl v-meas ARI AMI silhouette')
print('%i %.3f %.3f %.3f %.3f %.3f %.3f'
% (clf.inertia_,
homogeneity_score(y_test, y_pred),
completeness_score(y_test, y_pred),
v_measure_score(y_test, y_pred),
adjusted_rand_score(y_test, y_pred),
adjusted_mutual_info_score(y_test, y_pred),
silhouette_score(X_test, y_pred, metric='euclidean')))
| {
"content_hash": "f04599c9f95125e4a4a45820cf75e2b7",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 114,
"avg_line_length": 31.68918918918919,
"alnum_prop": 0.646908315565032,
"repo_name": "monal94/digits-scikit-learn",
"id": "86084deded9b7dd37b95209a644bb9f777d57291",
"size": "2354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "k_means.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8035"
}
],
"symlink_target": ""
} |
"""
Commands that update or process the application data.
"""
from datetime import datetime
import json
from fabric.api import task
from facebook import GraphAPI
from twitter import Twitter, OAuth
import app_config
import copytext
@task(default=True)
def update():
"""
Stub function for updating app-specific data.
"""
#update_featured_social()
@task
def update_featured_social():
"""
Update featured tweets
"""
COPY = copytext.Copy(app_config.COPY_PATH)
secrets = app_config.get_secrets()
# Twitter
print('Fetching tweets...')
twitter_api = Twitter(
auth=OAuth(
secrets['TWITTER_API_OAUTH_TOKEN'],
secrets['TWITTER_API_OAUTH_SECRET'],
secrets['TWITTER_API_CONSUMER_KEY'],
secrets['TWITTER_API_CONSUMER_SECRET']
)
)
tweets = []
for i in range(1, 4):
tweet_url = COPY['share']['featured_tweet%i' % i]
if isinstance(tweet_url, copytext.Error) or str(tweet_url).strip() == '':
continue
tweet_id = str(tweet_url).split('/')[-1]
tweet = twitter_api.statuses.show(id=tweet_id)
creation_date = datetime.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
creation_date = '%s %i' % (creation_date.strftime('%b'), creation_date.day)
tweet_url = 'http://twitter.com/%s/status/%s' % (tweet['user']['screen_name'], tweet['id'])
photo = None
html = tweet['text']
subs = {}
for media in tweet['entities'].get('media', []):
original = tweet['text'][media['indices'][0]:media['indices'][1]]
replacement = '<a href="%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'link\', 0, \'%s\']);">%s</a>' % (media['url'], app_config.PROJECT_SLUG, tweet_url, media['display_url'])
subs[original] = replacement
if media['type'] == 'photo' and not photo:
photo = {
'url': media['media_url']
}
for url in tweet['entities'].get('urls', []):
original = tweet['text'][url['indices'][0]:url['indices'][1]]
replacement = '<a href="%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'link\', 0, \'%s\']);">%s</a>' % (url['url'], app_config.PROJECT_SLUG, tweet_url, url['display_url'])
subs[original] = replacement
for hashtag in tweet['entities'].get('hashtags', []):
original = tweet['text'][hashtag['indices'][0]:hashtag['indices'][1]]
replacement = '<a href="https://twitter.com/hashtag/%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'hashtag\', 0, \'%s\']);">%s</a>' % (hashtag['text'], app_config.PROJECT_SLUG, tweet_url, '#%s' % hashtag['text'])
subs[original] = replacement
for original, replacement in subs.items():
html = html.replace(original, replacement)
# https://dev.twitter.com/docs/api/1.1/get/statuses/show/%3Aid
tweets.append({
'id': tweet['id'],
'url': tweet_url,
'html': html,
'favorite_count': tweet['favorite_count'],
'retweet_count': tweet['retweet_count'],
'user': {
'id': tweet['user']['id'],
'name': tweet['user']['name'],
'screen_name': tweet['user']['screen_name'],
'profile_image_url': tweet['user']['profile_image_url'],
'url': tweet['user']['url'],
},
'creation_date': creation_date,
'photo': photo
})
# Facebook
print('Fetching Facebook posts...')
fb_api = GraphAPI(secrets['FACEBOOK_API_APP_TOKEN'])
facebook_posts = []
for i in range(1, 4):
fb_url = COPY['share']['featured_facebook%i' % i]
if isinstance(fb_url, copytext.Error) or str(fb_url).strip() == '':
continue
fb_id = str(fb_url).split('/')[-1]
post = fb_api.get_object(fb_id)
user = fb_api.get_object(post['from']['id'])
user_picture = fb_api.get_object('%s/picture' % post['from']['id'])
likes = fb_api.get_object('%s/likes' % fb_id, summary='true')
comments = fb_api.get_object('%s/comments' % fb_id, summary='true')
#shares = fb_api.get_object('%s/sharedposts' % fb_id)
creation_date = datetime.strptime(post['created_time'],'%Y-%m-%dT%H:%M:%S+0000')
creation_date = '%s %i' % (creation_date.strftime('%b'), creation_date.day)
# https://developers.facebook.com/docs/graph-api/reference/v2.0/post
facebook_posts.append({
'id': post['id'],
'message': post['message'],
'link': {
'url': post['link'],
'name': post['name'],
'caption': (post['caption'] if 'caption' in post else None),
'description': post['description'],
'picture': post['picture']
},
'from': {
'name': user['name'],
'link': user['link'],
'picture': user_picture['url']
},
'likes': likes['summary']['total_count'],
'comments': comments['summary']['total_count'],
#'shares': shares['summary']['total_count'],
'creation_date': creation_date
})
# Render to JSON
output = {
'tweets': tweets,
'facebook_posts': facebook_posts
}
with open('data/featured.json', 'w') as f:
json.dump(output, f)
| {
"content_hash": "87d645093b4d7d17e8aec75c788fe337",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 271,
"avg_line_length": 35.36875,
"alnum_prop": 0.5317193850503622,
"repo_name": "PostDispatchInteractive/app-template",
"id": "1f0ced6b29502ae0fa569850cfb50b2f0a74ae87",
"size": "5682",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "fabfile/data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "238"
},
{
"name": "HTML",
"bytes": "24530"
},
{
"name": "JavaScript",
"bytes": "196068"
},
{
"name": "Less",
"bytes": "36224"
},
{
"name": "Python",
"bytes": "77763"
},
{
"name": "Shell",
"bytes": "327"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PasswordReset'
db.create_table('accounts_passwordreset', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('account', self.gf('django.db.models.fields.related.ForeignKey')(related_name='password_resets', to=orm['auth.User'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=40)),
('last_sent', self.gf('django.db.models.fields.DateTimeField')(null=True)),
))
db.send_create_signal('accounts', ['PasswordReset'])
# Adding model 'Profile'
db.create_table('accounts_profile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)),
('sex', self.gf('django.db.models.fields.CharField')(max_length=1)),
('birthday', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal('accounts', ['Profile'])
# Adding model 'EmailConfirmation'
db.create_table('accounts_emailconfirmation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='email_confirmations', to=orm['auth.User'])),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('key', self.gf('django.db.models.fields.CharField')(max_length=40)),
('last_sent', self.gf('django.db.models.fields.DateTimeField')(null=True)),
))
db.send_create_signal('accounts', ['EmailConfirmation'])
def backwards(self, orm):
# Deleting model 'PasswordReset'
db.delete_table('accounts_passwordreset')
# Deleting model 'Profile'
db.delete_table('accounts_profile')
# Deleting model 'EmailConfirmation'
db.delete_table('accounts_emailconfirmation')
models = {
'accounts.emailconfirmation': {
'Meta': {'object_name': 'EmailConfirmation'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'last_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'email_confirmations'", 'to': "orm['auth.User']"})
},
'accounts.passwordreset': {
'Meta': {'object_name': 'PasswordReset'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'password_resets'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'last_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'accounts.profile': {
'Meta': {'object_name': 'Profile'},
'birthday': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
| {
"content_hash": "aba26568af86f873174a429a22b1d479",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 182,
"avg_line_length": 61.4375,
"alnum_prop": 0.5664874291527394,
"repo_name": "softak/webfaction_demo",
"id": "7c4364429c9ecffb0fa29af74a371cd3ae77f177",
"size": "6899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/accounts/migrations/0005_auto__add_passwordreset__add_profile__add_emailconfirmation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "33283"
},
{
"name": "JavaScript",
"bytes": "984889"
},
{
"name": "Python",
"bytes": "8055804"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
} |
"""
Write and read logs that can be re-parsed back into mostly intact records,
regardless of the contents of the log message.
"""
import logging
import os
import time
from collections import namedtuple
from conary.lib import util
class StructuredLogFormatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self,
'[%(asctime)s] %(levelno)s %(name)s %(message)s',
None)
def formatTime(self, record, datefmt=None):
# ISO 8601 timestamp, UTC only
timetup = time.gmtime(record.created)
timestampPart = time.strftime('%FT%T', timetup)
return '%s.%06dZ' % (timestampPart, int(round(record.msecs * 1000)))
def format(self, record):
# Prefix each record with its size, this way newlines or binary garbage
# in the message don't prevent the log from being parsed.
payload = logging.Formatter.format(self, record)
if isinstance(payload, unicode):
payload = payload.encode('utf8')
size = len(payload) + 1 # add trailing newline
return '%x %s' % (size, payload)
_LogLine = namedtuple('_LogLine',
'timestamp level name message startPos endPos raw')
class StructuredLogParser(object):
def __init__(self, stream, asRecords=True):
self.stream = stream
self.asRecords = asRecords
def __iter__(self):
return self
def next(self):
startPos = self.stream.tell()
buf = ''
while True:
d = self.stream.read(1)
if not d:
self.stream.seek(startPos)
raise StopIteration
buf += d
if d == ' ':
break
if not (('0' <= d <= '9') or ('a' <= d <= 'f')):
raise ValueError("malformed logfile")
size = int(buf, 16)
payload = self.stream.read(size)
if len(payload) < size:
self.stream.seek(startPos)
raise StopIteration
endPos = startPos + len(buf) + size
timestamp, level, name, message = payload.split(' ', 3)
level = int(level)
message = message[:-1] # remove newline
logLine = _LogLine(timestamp, level, name, message, startPos, endPos,
buf + payload)
if self.asRecords:
# Trick strptime into parsing UTC timestamps
# [1970-01-01T00:00:00.000000Z] -> 1970-01-01T00:00:00 UTC
parseable = timestamp[1:-9] + ' UTC'
timetup = time.strptime(parseable, '%Y-%m-%dT%H:%M:%S %Z')
microseconds = int(timestamp[-8:-2])
# Trick mktime into epoch-izing UTC timestamps
timetup = timetup[:8] + (0,) # Set DST off
epoch = time.mktime(timetup) - time.timezone
epoch += microseconds / 1e6 # Add microseconds
record = logging.LogRecord(
name=name,
level=int(level),
pathname=None,
lineno=-1,
msg=message,
args=None,
exc_info=None,
)
record.created = epoch
record.msecs = (epoch - long(epoch)) * 1000
record.relativeCreated = 0
return record
else:
return logLine
class BulkHandler(object):
formatter = StructuredLogFormatter()
level = logging.NOTSET
def __init__(self, path, mode='a'):
self.path = path
self.mode = mode
self.stream = None
self.lastUsed = 0
def _open(self):
dirpath = os.path.dirname(self.path)
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
return open(self.path, self.mode)
def emit(self, record):
self.emitMany([record])
handle = emit
def emitMany(self, records):
self.lastUsed = time.time()
if self.stream is None:
self.stream = self._open()
for record in records:
self.stream.write(self.formatter.format(record) + '\n')
self.stream.flush()
def close(self):
if self.stream:
self.stream.close()
self.stream = None
class JobLogManager(object):
handlerClass = BulkHandler
timeout = 60
def __init__(self, basePath):
self.basePath = basePath
self.handlers = {}
def _get(self, task_uuid):
handler = self.handlers.get(task_uuid)
if handler:
return handler
path = self.getPath(task_uuid)
self.handlers[task_uuid] = handler = self.handlerClass(path, 'ab')
return handler
def getPath(self, task_uuid=None):
if task_uuid:
return os.path.join(self.basePath, 'task-%s.log' % task_uuid)
else:
return os.path.join(self.basePath, 'job.log')
def getAllPaths(self):
out = []
for name in self.handlers:
out.append(self.getPath(name))
return out
def emitMany(self, records, task_uuid=None):
self._get(task_uuid).emitMany(records)
def getLogger(self, task_uuid=None, name='dispatcher'):
handler = self._get(task_uuid)
logger = logging.Logger(name, level=logging.DEBUG)
logger.handlers = [handler]
return logger
def prune(self):
cutoff = time.time() - self.timeout
for subpath, handler in self.handlers.items():
if handler.lastUsed < cutoff:
handler.close()
def close(self):
for handler in self.handlers.values():
handler.close()
self.handlers = {}
def _softIter(iterable):
try:
return iterable.next()
except StopIteration:
return None
def _splitLog(inFile):
"""
Split a logfile into a series of subfiles at each boundary where the
timestamp goes backwards
"""
firstByte = 0
lastByte = 0
lastStamp = None
regions = []
for record in StructuredLogParser(inFile, asRecords=False):
timestamp = record.timestamp
if timestamp <= lastStamp:
# Timestamp went backwards, start a new segment
regions.append((firstByte, lastByte))
firstByte = lastByte
lastByte = inFile.tell()
lastStamp = timestamp
if firstByte != lastByte:
regions.append((firstByte, lastByte))
return [util.SeekableNestedFile(inFile, end - start, start)
for (start, end) in regions]
def mergeLogs(inFiles, sort=True):
if not inFiles:
return
if sort:
# Sort records within an invididual log by breaking them apart wherever
# a discontinuity exists
splitFiles = []
for inFile in inFiles:
splitFiles.extend(_splitLog(inFile))
inFiles = splitFiles
# Get the first record from each file
parsers = [StructuredLogParser(fobj, asRecords=True) for fobj in inFiles]
nextRecord = [_softIter(x) for x in parsers]
while True:
if not any(nextRecord):
break
# Find which record from all files that has the lowest timestamp
n = min((x.created, n) for (n, x) in enumerate(nextRecord) if x)[1]
yield nextRecord[n]
# Grab the next one from the same file
nextRecord[n] = _softIter(parsers[n])
| {
"content_hash": "561ee8e6da3944419beae86e65ce3ade",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 79,
"avg_line_length": 30.682008368200837,
"alnum_prop": 0.5746624846583935,
"repo_name": "sassoftware/rmake3",
"id": "336c181605914aed7dd46727f49ccc2b1ad0a39b",
"size": "7920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rmake/lib/structlog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39733"
},
{
"name": "Makefile",
"bytes": "36996"
},
{
"name": "PLpgSQL",
"bytes": "34497"
},
{
"name": "Python",
"bytes": "1351324"
},
{
"name": "Shell",
"bytes": "6388"
}
],
"symlink_target": ""
} |
from pymba import Vimba
if __name__ == '__main__':
with Vimba() as vimba:
print(vimba.interface_ids())
| {
"content_hash": "757ce7929ea11ab02e75053a22b90806",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 36,
"avg_line_length": 16.857142857142858,
"alnum_prop": 0.576271186440678,
"repo_name": "morefigs/pymba",
"id": "9a7cbecd27bce8918d0fae406479e098cda7cdcd",
"size": "118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/interface/list_interface_ids.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77300"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import os
import math
import hmac
import json
import hashlib
import argparse
from random import shuffle
from pathlib2 import Path
import numpy as np
import tensorflow as tf
from tensorflow.data import Dataset
def info(msg, char="#", width=75):
print("")
print(char * width)
print(char + " %0*s" % ((-1 * width) + 5, msg) + char)
print(char * width)
def check_dir(path):
if not os.path.exists(path):
os.makedirs(path)
return Path(path).resolve(strict=False)
def process_image(path, label, img_size):
img_raw = tf.io.read_file(path)
img_tensor = tf.image.decode_jpeg(img_raw, channels=3)
img_final = tf.image.resize(img_tensor, [img_size, img_size]) / 255
return img_final, label
def load_dataset(base_path, dset, split=None):
# normalize splits
if split is None:
split = [8, 1, 1]
splits = np.array(split) / np.sum(np.array(split))
# find labels - parent folder names
labels = {}
for (_, dirs, _) in os.walk(base_path):
print('found {}'.format(dirs))
labels = {k: v for (v, k) in enumerate(dirs)}
print('using {}'.format(labels))
break
# load all files along with idx label
print('loading dataset from {}'.format(dset))
with open(dset, 'r') as d:
data = [(str(Path(line.strip()).absolute()),
labels[Path(line.strip()).parent.name]) for line in d.readlines()] # noqa: E501
print('dataset size: {}\nsuffling data...'.format(len(data)))
# shuffle data
shuffle(data)
print('splitting data...')
# split data
train_idx = int(len(data) * splits[0])
return data[:train_idx]
# @print_info
def run(
dpath,
img_size=160,
epochs=10,
batch_size=32,
learning_rate=0.0001,
output='model',
dset=None):
img_shape = (img_size, img_size, 3)
info('Loading Data Set')
# load dataset
train = load_dataset(dpath, dset)
# training data
train_data, train_labels = zip(*train)
train_ds = Dataset.zip((Dataset.from_tensor_slices(list(train_data)),
Dataset.from_tensor_slices(list(train_labels)),
Dataset.from_tensor_slices([img_size]*len(train_data))))
print(train_ds)
train_ds = train_ds.map(map_func=process_image,
num_parallel_calls=5)
train_ds = train_ds.apply(tf.data.experimental.ignore_errors())
train_ds = train_ds.batch(batch_size)
train_ds = train_ds.prefetch(buffer_size=5)
train_ds = train_ds.repeat()
# model
info('Creating Model')
base_model = tf.keras.applications.MobileNetV2(input_shape=img_shape,
include_top=False,
weights='imagenet')
base_model.trainable = True
model = tf.keras.Sequential([
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
# training
info('Training')
steps_per_epoch = math.ceil(len(train) / batch_size)
model.fit(train_ds, epochs=epochs, steps_per_epoch=steps_per_epoch)
# save model
info('Saving Model')
# check existence of base model folder
output = check_dir(output)
print('Serializing into saved_model format')
tf.saved_model.save(model, str(output))
print('Done!')
# add time prefix folder
file_output = str(Path(output).joinpath('latest.h5'))
print('Serializing h5 model to:\n{}'.format(file_output))
model.save(file_output)
return generate_hash(file_output, 'kf_pipeline')
def generate_hash(dfile, key):
print('Generating hash for {}'.format(dfile))
m = hmac.new(str.encode(key), digestmod=hashlib.sha256)
BUF_SIZE = 65536
with open(str(dfile), 'rb') as myfile:
while True:
data = myfile.read(BUF_SIZE)
if not data:
break
m.update(data)
return m.hexdigest()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='transfer learning for binary image task')
parser.add_argument('-s', '--base_path',
help='directory to base data', default='../../data')
parser.add_argument(
'-d', '--data', help='directory to training and test data', default='train') # noqa: E501
parser.add_argument(
'-e', '--epochs', help='number of epochs', default=10, type=int)
parser.add_argument('-b', '--batch', help='batch size',
default=32, type=int)
parser.add_argument('-i', '--image_size',
help='image size', default=160, type=int)
parser.add_argument('-l', '--lr', help='learning rate',
default=0.0001, type=float)
parser.add_argument('-o', '--outputs',
help='output directory', default='model')
parser.add_argument('-f', '--dataset', help='cleaned data listing')
args = parser.parse_args()
info('Using TensorFlow v.{}'.format(tf.__version__))
data_path = Path(args.base_path).joinpath(args.data).resolve(strict=False)
target_path = Path(args.base_path).resolve(
strict=False).joinpath(args.outputs)
dataset = Path(args.base_path).joinpath(args.dataset)
image_size = args.image_size
params = Path(args.base_path).joinpath('params.json')
args = {
"dpath": str(data_path),
"img_size": image_size,
"epochs": args.epochs,
"batch_size": args.batch,
"learning_rate": args.lr,
"output": str(target_path),
"dset": str(dataset)
}
dataset_signature = generate_hash(dataset, 'kf_pipeline')
# printing out args for posterity
for i in args:
print('{} => {}'.format(i, args[i]))
model_signature = run(**args)
args['dataset_signature'] = dataset_signature.upper()
args['model_signature'] = model_signature.upper()
args['model_type'] = 'tfv2-MobileNetV2'
print('Writing out params...', end='')
with open(str(params), 'w') as f:
json.dump(args, f)
print(' Saved to {}'.format(str(params)))
# python train.py -d train -e 3 -b 32 -l 0.0001 -o model -f train.txt
| {
"content_hash": "5b95ca1af0bba8e6c6a1eb5ffa9577e7",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 98,
"avg_line_length": 30.848341232227487,
"alnum_prop": 0.5954831771393455,
"repo_name": "kubeflow/examples",
"id": "a67f23f636bcd48dc31aea63dbec7654684a507d",
"size": "6509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipelines/azurepipeline/code/training/train.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13943"
},
{
"name": "Dockerfile",
"bytes": "29907"
},
{
"name": "HTML",
"bytes": "24896"
},
{
"name": "JavaScript",
"bytes": "8954"
},
{
"name": "Jinja",
"bytes": "13921"
},
{
"name": "Jsonnet",
"bytes": "6403554"
},
{
"name": "Jupyter Notebook",
"bytes": "3199129"
},
{
"name": "Makefile",
"bytes": "20439"
},
{
"name": "Python",
"bytes": "505177"
},
{
"name": "Shell",
"bytes": "35152"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import time
pin = 17
first = 1
gap = 1
second = 1
# B Letter
firstCount = 3
secondCount = 7
firstSleepTimeOn = 0.02
firstSleepTimeOff = 0.04
secondSleepTimeOn = 0.04
secondSleepTimeOff = 0.06
gapSleepTime = 0.1
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin,GPIO.OUT)
if (first == 1):
for x in xrange(firstCount):
GPIO.output(pin, GPIO.LOW)
time.sleep(firstSleepTimeOn);
GPIO.output(pin, GPIO.HIGH)
time.sleep(firstSleepTimeOff);
# wait for second string
if (gap==1):
GPIO.output(pin, GPIO.LOW)
time.sleep(gapSleepTime);
if (second == 1):
for x in xrange(secondCount):
GPIO.output(pin, GPIO.LOW)
time.sleep(secondSleepTimeOn);
GPIO.output(pin, GPIO.HIGH)
time.sleep(secondSleepTimeOff);
GPIO.cleanup()
| {
"content_hash": "f795e06b43895a0c8ae75f26fc4e1819",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 34,
"avg_line_length": 16.933333333333334,
"alnum_prop": 0.7047244094488189,
"repo_name": "larssima/AmiJukeBoxRemote",
"id": "65d424bbc5957009f693ac1f0a9cb422a62b720e",
"size": "780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AmiJukeBoxRemote/gui/RaspberryPi/b7.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP.NET",
"bytes": "110"
},
{
"name": "C#",
"bytes": "51819"
},
{
"name": "CSS",
"bytes": "4174"
},
{
"name": "HTML",
"bytes": "41597"
},
{
"name": "JavaScript",
"bytes": "302352"
},
{
"name": "Python",
"bytes": "37987"
}
],
"symlink_target": ""
} |
import numpy
from matplotlib.pyplot import figure, show, rc
from numpy.random import normal
from kapteyn import kmpfit
from scipy.odr import Data, Model, ODR, RealData, odr_stop
def model(p, x):
# Model: Y = a + b*x
a, b = p
return a + b*x
def residuals(p, data):
# Merit function for data with errors in both coordinates
a, b = p
x, y, ex, ey = data
w1 = ey*ey + b*b*ex*ex
w = numpy.sqrt(numpy.where(w1==0.0, 0.0, 1.0/(w1)))
d = w*(y-model(p,x))
return d
# Create the data
N = 40
a0 = 2; b0 = 1
x = numpy.linspace(0.0, 7.0, N)
y = model((a0,b0),x) + normal(0.0, 1.0, N) # Mean 0, sigma 1
errx = normal(0.0, 0.3, N)
erry = normal(0.0, 0.4, N)
beta0 = [0,0]
print("\n========== Results SciPy's ODR ============")
linear = Model(model)
mydata = RealData(x, y, sx=errx, sy=erry)
myodr = ODR(mydata, linear, beta0=beta0, maxit=5000)
myoutput = myodr.run()
print("Fitted parameters: ", myoutput.beta)
print("Covariance errors: ", numpy.sqrt(myoutput.cov_beta.diagonal()))
print("Standard errors: ", myoutput.sd_beta)
print("Minimum (reduced)chi^2: ", myoutput.res_var)
beta = myoutput.beta
# Prepare fit routine
fitobj = kmpfit.Fitter(residuals=residuals, data=(x, y, errx, erry))
try:
fitobj.fit(params0=beta0)
except Exception as mes:
print("Something wrong with fit: ", mes)
raise SystemExit
print("\n\n======== Results kmpfit errors in both variables =========")
print("Params: ", fitobj.params)
print("Covariance errors: ", fitobj.xerror)
print("Standard errors ", fitobj.stderr)
print("Chi^2 min: ", fitobj.chi2_min)
print("Reduced Chi^2: ", fitobj.rchi2_min)
print("Status Message: ", fitobj.message)
# Some plotting
rc('font', size=9)
rc('legend', fontsize=8)
fig = figure(1)
frame = fig.add_subplot(1,1,1, aspect=1)
frame.errorbar(x, y, xerr=errx, yerr=erry, fmt='bo')
# Plot first fit
frame.plot(x, beta[1]*x+beta[0], '-y', lw=4, label="ODR", alpha=0.6)
frame.plot(x, fitobj.params[1]*x+fitobj.params[0], 'c', ls='--', lw=2, label="kmpfit")
frame.set_xlabel("X")
frame.set_ylabel("Y")
frame.set_title("Weights in both coords ($\chi^2_{min}$ ODR and Kmpfit)")
leg = frame.legend(loc=2)
show() | {
"content_hash": "29a98bbe6bebb9227e6ac4cef02f1afc",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 86,
"avg_line_length": 30.88888888888889,
"alnum_prop": 0.6326438848920863,
"repo_name": "kapteyn-astro/kapteyn",
"id": "847d337fa650053436ff346e2f819632a2b8fcdd",
"size": "2547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/EXAMPLES/kmpfit_errorsinXandY.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "414"
},
{
"name": "C",
"bytes": "4364193"
},
{
"name": "C++",
"bytes": "23758"
},
{
"name": "CSS",
"bytes": "26526"
},
{
"name": "FORTRAN",
"bytes": "357291"
},
{
"name": "Groff",
"bytes": "4223"
},
{
"name": "HTML",
"bytes": "3811525"
},
{
"name": "JavaScript",
"bytes": "3140"
},
{
"name": "Lex",
"bytes": "136508"
},
{
"name": "M4",
"bytes": "16619"
},
{
"name": "Makefile",
"bytes": "43728"
},
{
"name": "Pascal",
"bytes": "37285"
},
{
"name": "Python",
"bytes": "1388615"
},
{
"name": "Shell",
"bytes": "30827"
},
{
"name": "TeX",
"bytes": "15968"
}
],
"symlink_target": ""
} |
from nova.policies import base
POLICY_ROOT = 'os_compute_api:os-availability-zone:%s'
availability_zone_policies = [
base.create_rule_default(
POLICY_ROOT % 'list',
base.RULE_ADMIN_OR_OWNER,
"Lists availability zone information without host information",
[
{
'method': 'GET',
'path': 'os-availability-zone'
}
]),
base.create_rule_default(
POLICY_ROOT % 'detail',
base.RULE_ADMIN_API,
"Lists detailed availability zone information with host information",
[
{
'method': 'GET',
'path': '/os-availability-zone/detail'
}
])
]
def list_rules():
return availability_zone_policies
| {
"content_hash": "fb9fe2266bc95eae70a432c784f9f982",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 77,
"avg_line_length": 24.46875,
"alnum_prop": 0.5389527458492975,
"repo_name": "rajalokan/nova",
"id": "281d8c675eea8027be86827b3b1d400e430d8a82",
"size": "1422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/policies/availability_zone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "4503"
},
{
"name": "Python",
"bytes": "19100322"
},
{
"name": "Shell",
"bytes": "26793"
},
{
"name": "Smarty",
"bytes": "299237"
}
],
"symlink_target": ""
} |
"""
pyshtools Global Spectral Analysis Routines.
This submodule of pyshtools defines the following functions:
Real spectral analysis
----------------------
SHPowerL Compute the power of a real function for a single
spherical harmonic degree.
SHPowerDensityL Compute the power spectral density of a real
function for a single spherical harmonic degree.
SHCrossPowerL Compute the cross-power of two functions for a
single spherical harmonic degree.
SHCrossPowerDensityL Compute the cross-power spectral density of two
functions for a single spherical harmonic degree.
SHPowerSpectrum Compute the power spectrum of a function.
SHPowerSpectrumDensity Compute the power spectral density of a function.
SHCrossPowerSpectrum Compute the cross-power spectrum of two
functions.
SHCrossPowerSpectrumDensity Compute the cross-power spectral density of two
functions.
SHAdmitCorr Calculate the admittance and correlation spectra
of two functions.
SHConfidence Compute the probability that two sets of
spherical harmonic coefficients are correlated at
a given degree and for a given correlation
coefficient.
Complex spectral analysis
-------------------------
SHPowerLC Compute the power of a complex function for a
single spherical harmonic degree.
SHPowerDensityLC Compute the power spectral density of a complex
function for a single spherical harmonic degree.
SHCrossPowerLC Compute the cross-power of two complex functions
for a single spherical harmonic degree.
SHCrossPowerDensityLC Compute the cross-power spectral density of two
complex functions for a single spherical harmonic
degree.
SHPowerSpectrumC Compute the power spectrum of a complex function.
SHPowerSpectrumDensityC Compute the power spectral density of a complex
function.
SHCrossPowerSpectrumC Compute the cross-power spectrum of two complex
functions.
SHCrossPowerSpectrumDensityC Compute the cross-power spectral density of two
complex functions.
"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
from ._SHTOOLS import SHPowerL
from ._SHTOOLS import SHPowerDensityL
from ._SHTOOLS import SHCrossPowerL
from ._SHTOOLS import SHCrossPowerDensityL
from ._SHTOOLS import SHPowerSpectrum
from ._SHTOOLS import SHPowerSpectrumDensity
from ._SHTOOLS import SHCrossPowerSpectrum
from ._SHTOOLS import SHCrossPowerSpectrumDensity
from ._SHTOOLS import SHAdmitCorr
from ._SHTOOLS import SHConfidence
from ._SHTOOLS import SHPowerLC
from ._SHTOOLS import SHPowerDensityLC
from ._SHTOOLS import SHCrossPowerLC
from ._SHTOOLS import SHCrossPowerDensityLC
from ._SHTOOLS import SHPowerSpectrumC
from ._SHTOOLS import SHPowerSpectrumDensityC
from ._SHTOOLS import SHCrossPowerSpectrumC
from ._SHTOOLS import SHCrossPowerSpectrumDensityC
| {
"content_hash": "62949a32c5290856fe2a470a9709a82f",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 50.785714285714285,
"alnum_prop": 0.6545710267229254,
"repo_name": "heroxbd/SHTOOLS",
"id": "6b60cb375467e8374c1707cf01eb174f99e6ebfc",
"size": "3555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyshtools/spectralanalysis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "FORTRAN",
"bytes": "1041524"
},
{
"name": "HTML",
"bytes": "5762"
},
{
"name": "Makefile",
"bytes": "23618"
},
{
"name": "Python",
"bytes": "188629"
}
],
"symlink_target": ""
} |
import functools
from .errors import ClaripyOperationError, ClaripyTypeError, ClaripyZeroDivisionError
from .backend_object import BackendObject
def compare_bits(f):
@functools.wraps(f)
def compare_guard(self, o):
if self.bits == 0 or o.bits == 0:
raise ClaripyTypeError("The operation is not allowed on zero-length bitvectors.")
if self.bits != o.bits:
raise ClaripyTypeError("bitvectors are differently-sized (%d and %d)" % (self.bits, o.bits))
return f(self, o)
return compare_guard
def compare_bits_0_length(f):
@functools.wraps(f)
def compare_guard(self, o):
if self.bits != o.bits:
raise ClaripyTypeError("bitvectors are differently-sized (%d and %d)" % (self.bits, o.bits))
return f(self, o)
return compare_guard
def normalize_types(f):
@functools.wraps(f)
def normalize_helper(self, o):
if hasattr(o, '__module__') and o.__module__ == 'z3':
raise ValueError("this should no longer happen")
if type(o) in (int, long):
o = BVV(o, self.bits)
if type(self) in (int, long):
self = BVV(self, self.bits)
if not isinstance(self, BVV) or not isinstance(o, BVV):
return NotImplemented
return f(self, o)
return normalize_helper
class BVV(BackendObject):
__slots__ = [ 'bits', '_value', 'mod', 'value' ]
def __init__(self, value, bits):
if bits < 0 or type(bits) not in (int, long) or type(value) not in (int, long):
raise ClaripyOperationError("BVV needs a non-negative length and an int/long value")
if bits == 0 and value not in (0, "", None):
raise ClaripyOperationError("Zero-length BVVs cannot have a meaningful value.")
self.bits = bits
self._value = 0
self.mod = 2**bits
self.value = value
def __hash__(self):
return hash((str(self.value), self.bits))
def __getstate__(self):
return (self.bits, self.value)
def __setstate__(self, s):
self.bits = s[0]
self.mod = 2**self.bits
self.value = s[1]
@property
def value(self):
return self._value
@value.setter
def value(self, v):
self._value = v & (self.mod - 1)
@property
def signed(self):
return self._value if self._value < self.mod/2 else self._value % (self.mod/2) - (self.mod/2)
@signed.setter
def signed(self, v):
self._value = v % -self.mod
#
# Arithmetic stuff
#
@normalize_types
@compare_bits
def __add__(self, o):
return BVV(self.value + o.value, self.bits)
@normalize_types
@compare_bits
def __sub__(self, o):
return BVV(self.value - o.value, self.bits)
@normalize_types
@compare_bits
def __mul__(self, o):
return BVV(self.value * o.value, self.bits)
@normalize_types
@compare_bits
def __mod__(self, o):
try:
return BVV(self.value % o.value, self.bits)
except ZeroDivisionError:
raise ClaripyZeroDivisionError()
@normalize_types
@compare_bits
def __div__(self, o):
try:
return BVV(self.value / o.value, self.bits)
except ZeroDivisionError:
raise ClaripyZeroDivisionError()
#
# Reverse arithmetic stuff
#
@normalize_types
@compare_bits
def __radd__(self, o):
return BVV(self.value + o.value, self.bits)
@normalize_types
@compare_bits
def __rsub__(self, o):
return BVV(o.value - self.value, self.bits)
@normalize_types
@compare_bits
def __rmul__(self, o):
return BVV(self.value * o.value, self.bits)
@normalize_types
@compare_bits
def __rmod__(self, o):
return BVV(o.value % self.value, self.bits)
@normalize_types
@compare_bits
def __rdiv__(self, o):
return BVV(o.value / self.value, self.bits)
#
# Bit operations
#
@normalize_types
@compare_bits
def __and__(self, o):
return BVV(self.value & o.value, self.bits)
@normalize_types
@compare_bits
def __or__(self, o):
return BVV(self.value | o.value, self.bits)
@normalize_types
@compare_bits
def __xor__(self, o):
return BVV(self.value ^ o.value, self.bits)
@normalize_types
@compare_bits
def __lshift__(self, o):
if o.signed < self.bits:
return BVV(self.value << o.signed, self.bits)
else:
return BVV(0, self.bits)
@normalize_types
@compare_bits
def __rshift__(self, o):
# arithmetic shift uses the signed version
if o.signed < self.bits:
return BVV(self.signed >> o.signed, self.bits)
else:
return BVV(0, self.bits)
def __invert__(self):
return BVV(self.value ^ self.mod-1, self.bits)
def __neg__(self):
return BVV((-self.value) % self.mod, self.bits)
#
# Reverse bit operations
#
@normalize_types
@compare_bits
def __rand__(self, o):
return BVV(self.value & o.value, self.bits)
@normalize_types
@compare_bits
def __ror__(self, o):
return BVV(self.value | o.value, self.bits)
@normalize_types
@compare_bits
def __rxor__(self, o):
return BVV(self.value ^ o.value, self.bits)
@normalize_types
@compare_bits
def __rlshift__(self, o):
return BVV(o.value << self.signed, self.bits)
@normalize_types
@compare_bits
def __rrshift__(self, o):
return BVV(o.signed >> self.signed, self.bits)
#
# Boolean stuff
#
@normalize_types
@compare_bits_0_length
def __eq__(self, o):
return self.value == o.value
@normalize_types
@compare_bits_0_length
def __ne__(self, o):
return self.value != o.value
@normalize_types
@compare_bits
def __lt__(self, o):
return self.value < o.value
@normalize_types
@compare_bits
def __gt__(self, o):
return self.value > o.value
@normalize_types
@compare_bits
def __le__(self, o):
return self.value <= o.value
@normalize_types
@compare_bits
def __ge__(self, o):
return self.value >= o.value
#
# Conversions
#
def size(self):
return self.bits
def __repr__(self):
return 'BVV(0x%x, %d)' % (self.value, self.bits)
#
# External stuff
#
def BitVecVal(value, bits):
return BVV(value, bits)
def ZeroExt(num, o):
return BVV(o.value, o.bits + num)
def SignExt(num, o):
return BVV(o.signed, o.bits + num)
def Extract(f, t, o):
return BVV((o.value >> t) & (2**(f+1) - 1), f-t+1)
def Concat(*args):
total_bits = 0
total_value = 0
for o in args:
total_value = (total_value << o.bits) | o.value
total_bits += o.bits
return BVV(total_value, total_bits)
def RotateRight(self, bits):
bits_smaller = bits % self.size()
return LShR(self, bits_smaller) | (self << (self.size()-bits_smaller))
def RotateLeft(self, bits):
bits_smaller = bits % self.size()
return (self << bits_smaller) | (LShR(self, (self.size()-bits_smaller)))
def Reverse(a):
size = a.size()
if size == 8:
return a
elif size % 8 != 0:
raise ClaripyOperationError("can't reverse non-byte sized bitvectors")
else:
value = a.value
out = 0
if size == 64:
out = _reverse_64(value)
elif size == 32:
out = _reverse_32(value)
elif size == 16:
out = _reverse_16(value)
else:
for i in xrange(0, size, 8):
out |= ((value & (0xff << i)) >> i) << (size - 8 - i)
return BVV(out, size)
# the RIGHT way to do it:
#return BVV(int(("%x" % a.value).rjust(size/4, '0').decode('hex')[::-1].encode('hex'), 16), size)
def _reverse_16(v):
return ((v & 0xff) << 8) | \
((v & 0xff00) >> 8)
def _reverse_32(v):
return ((v & 0xff) << 24) | \
((v & 0xff00) << 8) | \
((v & 0xff0000) >> 8) | \
((v & 0xff000000) >> 24)
def _reverse_64(v):
return ((v & 0xff) << 56) | \
((v & 0xff00) << 40) | \
((v & 0xff0000) << 24) | \
((v & 0xff000000) << 8) | \
((v & 0xff00000000) >> 8) | \
((v & 0xff0000000000) >> 24) | \
((v & 0xff000000000000) >> 40) | \
((v & 0xff00000000000000) >> 56)
@normalize_types
@compare_bits
def ULT(self, o):
return self.value < o.value
@normalize_types
@compare_bits
def UGT(self, o):
return self.value > o.value
@normalize_types
@compare_bits
def ULE(self, o):
return self.value <= o.value
@normalize_types
@compare_bits
def UGE(self, o):
return self.value >= o.value
@normalize_types
@compare_bits
def SLT(self, o):
return self.signed < o.signed
@normalize_types
@compare_bits
def SGT(self, o):
return self.signed > o.signed
@normalize_types
@compare_bits
def SLE(self, o):
return self.signed <= o.signed
@normalize_types
@compare_bits
def SGE(self, o):
return self.signed >= o.signed
@normalize_types
@compare_bits
def SMod(self, o):
# compute the remainder like the % operator in C
a = self.signed
b = o.signed
division_result = a//b if a*b>0 else (a+(-a%b))//b
val = a - division_result*b
return BVV(val, self.bits)
@normalize_types
@compare_bits
def SDiv(self, o):
# compute the round towards 0 division
a = self.signed
b = o.signed
val = a//b if a*b>0 else (a+(-a%b))//b
return BVV(val, self.bits)
#
# Pure boolean stuff
#
def BoolV(b):
return b
def And(*args):
return all(args)
def Or(*args):
return any(args)
def Not(b):
return not b
@normalize_types
def normalizer(*args):
return args
def If(c, t, f):
t,f = normalizer(t,f) #pylint:disable=unbalanced-tuple-unpacking
if c: return t
else: return f
@normalize_types
@compare_bits
def LShR(a, b):
return BVV(a.value >> b.signed, a.bits)
| {
"content_hash": "81a48b4d8e317563e4a0b4b99622215c",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 105,
"avg_line_length": 23.68075117370892,
"alnum_prop": 0.5688937351308485,
"repo_name": "Ruide/angr-dev",
"id": "8be5f4208ab21f8cb67df7d3f1a19a9e4c151be2",
"size": "10088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "claripy/claripy/bv.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "2962"
},
{
"name": "Batchfile",
"bytes": "4542"
},
{
"name": "C",
"bytes": "18511978"
},
{
"name": "C++",
"bytes": "295194"
},
{
"name": "Haskell",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "12558"
},
{
"name": "OpenEdge ABL",
"bytes": "2415"
},
{
"name": "Perl",
"bytes": "9974"
},
{
"name": "Python",
"bytes": "5611416"
},
{
"name": "Shell",
"bytes": "41791"
}
],
"symlink_target": ""
} |
import hashlib
import sys
import time
import warnings
from django.conf import settings
from django.db.utils import load_backend
from django.utils.deprecation import RemovedInDjango18Warning
from django.utils.encoding import force_bytes
from django.utils.functional import cached_property
from django.utils.six.moves import input
from django.utils.six import StringIO
from django.core.management.commands.dumpdata import sort_dependencies
from django.db import router
from django.apps import apps
from django.core import serializers
from .utils import truncate_name
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
NO_DB_ALIAS = '__no_db__'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
data_types_suffix = {}
data_type_check_constraints = {}
def __init__(self, connection):
self.connection = connection
@cached_property
def _nodb_connection(self):
"""
Alternative connection to be used when there is no need to access
the main database, specifically for test db creation/deletion.
This also prevents the production database from being exposed to
potential child threads while (or after) the test database is destroyed.
Refs #10868, #17786, #16969.
"""
settings_dict = self.connection.settings_dict.copy()
settings_dict['NAME'] = None
backend = load_backend(settings_dict['ENGINE'])
nodb_connection = backend.DatabaseWrapper(
settings_dict,
alias=NO_DB_ALIAS,
allow_thread_sharing=False)
return nodb_connection
@classmethod
def _digest(cls, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(force_bytes(arg))
return h.hexdigest()[:8]
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy or opts.swapped:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
db_params = f.db_parameters(connection=self.connection)
col_type = db_params['type']
if db_params['check']:
col_type = '%s CHECK (%s)' % (col_type, db_params['check'])
col_type_suffix = f.db_type_suffix(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
null = f.null
if (f.empty_strings_allowed and not f.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if not null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
tablespace_sql = self.connection.ops.tablespace_sql(
tablespace, inline=True)
if tablespace_sql:
field_output.append(tablespace_sql)
if f.rel and f.db_constraint:
ref_output, pending = self.sql_for_inline_foreign_key_references(
model, f, known_models, style)
if pending:
pending_references.setdefault(f.rel.to, []).append(
(model, f))
else:
field_output.extend(ref_output)
if col_type_suffix:
field_output.append(style.SQL_KEYWORD(col_type_suffix))
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' %
", ".join(
[style.SQL_FIELD(qn(opts.get_field(f).column))
for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' +
style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(
' %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
full_statement.append(')')
if opts.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(
opts.db_tablespace)
if tablespace_sql:
full_statement.append(tablespace_sql)
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary
# keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table,
auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, model, field, known_models, style):
"""
Return the SQL snippet defining the foreign key reference for a field.
"""
qn = self.connection.ops.quote_name
rel_to = field.rel.to
if rel_to in known_models or rel_to == model:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' +
style.SQL_TABLE(qn(rel_to._meta.db_table)) + ' (' +
style.SQL_FIELD(qn(rel_to._meta.get_field(
field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"""
Returns any ALTER TABLE statements to add constraints after the fact.
"""
opts = model._meta
if not opts.managed or opts.swapped:
return []
qn = self.connection.ops.quote_name
final_output = []
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (
r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') +
' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table), qn(truncate_name(
r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_indexes_for_model(self, model, style):
"""
Returns the CREATE INDEX SQL statements for a single model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
for fs in model._meta.index_together:
fields = [model._meta.get_field_by_name(f)[0] for f in fs]
output.extend(self.sql_indexes_for_fields(model, fields, style))
return output
def sql_indexes_for_field(self, model, f, style):
"""
Return the CREATE INDEX SQL statements for a single model field.
"""
if f.db_index and not f.unique:
return self.sql_indexes_for_fields(model, [f], style)
else:
return []
def sql_indexes_for_fields(self, model, fields, style):
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
field_names = []
qn = self.connection.ops.quote_name
for f in fields:
field_names.append(style.SQL_FIELD(qn(f.column)))
index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
return [
style.SQL_KEYWORD("CREATE INDEX") + " " +
style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
style.SQL_KEYWORD("ON") + " " +
style.SQL_TABLE(qn(model._meta.db_table)) + " " +
"(%s)" % style.SQL_FIELD(", ".join(field_names)) +
"%s;" % tablespace_sql,
]
def sql_destroy_model(self, model, references_to_delete, style):
"""
Return the DROP TABLE and restraint dropping statements for a single
model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(
model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (
col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % (
style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(
r_name, self.connection.ops.max_name_length())))
))
del references_to_delete[model]
return output
def sql_destroy_indexes_for_model(self, model, style):
"""
Returns the DROP INDEX SQL statements for a single model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_destroy_indexes_for_field(model, f, style))
for fs in model._meta.index_together:
fields = [model._meta.get_field_by_name(f)[0] for f in fs]
output.extend(self.sql_destroy_indexes_for_fields(model, fields, style))
return output
def sql_destroy_indexes_for_field(self, model, f, style):
"""
Return the DROP INDEX SQL statements for a single model field.
"""
if f.db_index and not f.unique:
return self.sql_destroy_indexes_for_fields(model, [f], style)
else:
return []
def sql_destroy_indexes_for_fields(self, model, fields, style):
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
field_names = []
qn = self.connection.ops.quote_name
for f in fields:
field_names.append(style.SQL_FIELD(qn(f.column)))
index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
return [
style.SQL_KEYWORD("DROP INDEX") + " " +
style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
";",
]
def create_test_db(self, verbosity=1, autoclobber=False, serialize=True):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Creating test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr))
self._create_test_db(verbosity, autoclobber)
self.connection.close()
settings.DATABASES[self.connection.alias]["NAME"] = test_database_name
self.connection.settings_dict["NAME"] = test_database_name
# We report migrate messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded).
call_command(
'migrate',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
test_database=True,
test_flush=True,
)
# We then serialize the current state of the database into a string
# and store it on the connection. This slightly horrific process is so people
# who are testing on databases without transactions or who are using
# a TransactionTestCase still get a clean database on every test run.
if serialize:
self.connection._test_serialized_contents = self.serialize_db_to_string()
call_command('createcachetable', database=self.connection.alias)
# Ensure a connection for the side effect of initializing the test database.
self.connection.ensure_connection()
return test_database_name
def serialize_db_to_string(self):
"""
Serializes all data in the database into a JSON string.
Designed only for test runner usage; will not handle large
amounts of data.
"""
# Build list of all apps to serialize
from django.db.migrations.loader import MigrationLoader
loader = MigrationLoader(self.connection)
app_list = []
for app_config in apps.get_app_configs():
if (
app_config.models_module is not None and
app_config.label in loader.migrated_apps and
app_config.name not in settings.TEST_NON_SERIALIZED_APPS
):
app_list.append((app_config, None))
# Make a function to iteratively return every object
def get_objects():
for model in sort_dependencies(app_list):
if not model._meta.proxy and model._meta.managed and router.allow_migrate(self.connection.alias, model):
queryset = model._default_manager.using(self.connection.alias).order_by(model._meta.pk.name)
for obj in queryset.iterator():
yield obj
# Serialise to a string
out = StringIO()
serializers.serialize("json", get_objects(), indent=None, stream=out)
return out.getvalue()
def deserialize_db_from_string(self, data):
"""
Reloads the database with data from a string generated by
the serialize_db_to_string method.
"""
data = StringIO(data)
for obj in serializers.deserialize("json", data, using=self.connection.alias):
obj.save()
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
or 'TEST_NAME' settings.
"""
if self.connection.settings_dict['TEST']['NAME']:
return self.connection.settings_dict['TEST']['NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _create_test_db(self, verbosity, autoclobber):
"""
Internal implementation - creates the test db tables.
"""
suffix = self.sql_table_creation_suffix()
test_database_name = self._get_test_db_name()
qn = self.connection.ops.quote_name
# Create the test database and connect to it.
with self._nodb_connection.cursor() as cursor:
try:
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception as e:
sys.stderr.write(
"Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input(
"Type 'yes' if you would like to try deleting the test "
"database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database '%s'..."
% self.connection.alias)
cursor.execute(
"DROP DATABASE %s" % qn(test_database_name))
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name),
suffix))
except Exception as e:
sys.stderr.write(
"Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists.
"""
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Destroying test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr))
self._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"""
Internal implementation - remove the test db tables.
"""
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
with self._nodb_connection.cursor() as cursor:
# Wait to avoid "database is being accessed by other users" errors.
time.sleep(1)
cursor.execute("DROP DATABASE %s"
% self.connection.ops.quote_name(test_database_name))
def set_autocommit(self):
"""
Make sure a connection is in autocommit mode. - Deprecated, not used
anymore by Django code. Kept for compatibility with user code that
might use it.
"""
warnings.warn(
"set_autocommit was moved from BaseDatabaseCreation to "
"BaseDatabaseWrapper.", RemovedInDjango18Warning, stacklevel=2)
return self.connection.set_autocommit(True)
def sql_table_creation_suffix(self):
"""
SQL to append to the end of the test table creation statements.
"""
return ''
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME']
)
| {
"content_hash": "fd59ca6b0b24efe94d3e68ebb4859e74",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 120,
"avg_line_length": 41.86055045871559,
"alnum_prop": 0.5718418514946962,
"repo_name": "wfxiang08/django178",
"id": "25e3aa16e033f8ad5234e6e7adb2d9e63f8defcf",
"size": "22836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/db/backends/creation.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42829"
},
{
"name": "HTML",
"bytes": "169506"
},
{
"name": "JavaScript",
"bytes": "75783"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "9164014"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
from . import observer
OPEN = 1
""" The open status value, meant to be used in situations
where the status of the entity is open (opposite of closed) """
CLOSED = 2
""" Closed status value to be used in entities which have
no pending structured opened and operations are limited """
PENDING = 3
""" The pending status used for transient states (eg: created)
connections under this state must be used carefully """
class Stream(observer.Observable):
"""
Abstract stream class responsible for the representation of
a "virtual" connection state for situation where multiplexing
of single connection exists (connections within connections)
Most of the interface for a stream should be "logically" similar
to the one defined by a connection.
A good example of the stream usage is the HTTP2 protocol where
multiple parallel streams co-exist within a single TCP connection
allowing huge performance improvements.
"""
def __init__(self, owner = None):
observer.Observable.__init__(self)
self.status = PENDING
self.owner = owner
self.connection = owner.owner
def reset(self):
pass
def open(self):
if self.status == OPEN: return
self.status = OPEN
self.connection.owner.on_stream_c(self)
def close(self):
if self.status == CLOSED: return
self.status = CLOSED
self.connection.owner.on_stream_d(self)
def info_dict(self, full = False):
info = dict(
status = self.status
)
return info
def is_open(self):
return self.status == OPEN
def is_closed(self):
return self.status == CLOSED
def is_pending(self):
return self.status == PENDING
| {
"content_hash": "ae56f7330800576d3621e28c0e4474f1",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 76,
"avg_line_length": 31.20408163265306,
"alnum_prop": 0.6602354480052322,
"repo_name": "hivesolutions/netius",
"id": "48ae3e847d175d33428cdf7a046df87a8ae8da0f",
"size": "3104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/netius/base/stream.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1400497"
}
],
"symlink_target": ""
} |
"""
Python-based HLA:peptide binding prediction cache and IEDB-tool wrapper
========================================================
"""
from .cache import hlaPredCache, RandCache
from .helpers import *
from . import predict
from .iedb_src import predict_binding as iedb_predict
from .new_iedb_predict import *
__all__ = ['predict',
'hlaPredCache',
'iedb_predict',
'convertHLAAsterisk',
'isvalidmer',
'isvalidHLA',
'rankEpitopes',
'rankKmers',
'rankMers',
'getIC50',
'getMers',
'getMerInds',
'grabKmer',
'grabKmerInds',
'findpeptide',
'grabOverlappingKmer',
'overlappingMers',
'checkHLAs',
'iedbPepPredict',
'generateMersFromNT']
| {
"content_hash": "0efb0a19273082be95ed0ff553665ce3",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 71,
"avg_line_length": 26.8125,
"alnum_prop": 0.5,
"repo_name": "agartland/HLAPredCache",
"id": "fda6e044aa0a00d4c1b9ba00e1b230be1c643f70",
"size": "859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "169031"
}
],
"symlink_target": ""
} |
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ct', '0015_migrate_fsm'),
]
operations = [
migrations.AlterField(
model_name='concept',
name='title',
field=models.CharField(max_length=200),
preserve_default=True,
),
migrations.AlterField(
model_name='lesson',
name='kind',
field=models.CharField(default='base', max_length=50, choices=[('base', 'brief definition and explanation'), ('explanation', 'long explanation'), ('orct', 'Open Response Concept Test question'), ('mcct', 'Concept Inventory Test question'), ('exercise', 'exercise'), ('project', 'project'), ('practice', 'practice exam question'), ('answer', 'answer'), ('errmod', 'error model'), ('data', 'data'), ('case', 'Case Study'), ('e-pedia', 'Encyclopedia'), ('faq', 'frequently asked question'), ('forum', 'forum')]),
preserve_default=True,
),
migrations.AlterField(
model_name='lesson',
name='title',
field=models.CharField(max_length=200),
preserve_default=True,
),
]
| {
"content_hash": "46739f53c86ae59db45ad5f3d95ded4f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 521,
"avg_line_length": 41.58620689655172,
"alnum_prop": 0.5754560530679934,
"repo_name": "cjlee112/socraticqs2",
"id": "5eb100dbb0d696d5a60712b084844f646f003f84",
"size": "1206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/ct/migrations/0016_auto_20150626_0301.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "138226"
},
{
"name": "Dockerfile",
"bytes": "3865"
},
{
"name": "Gherkin",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "467395"
},
{
"name": "JavaScript",
"bytes": "234788"
},
{
"name": "Makefile",
"bytes": "4696"
},
{
"name": "Python",
"bytes": "1785754"
},
{
"name": "Shell",
"bytes": "2889"
}
],
"symlink_target": ""
} |
"""
This script discovers all the listening ports and then enumerates the method
calls it sees on each port.
Make sure you have the library installed:
```
$ pip install thrift-tools
```
And then run it as root:
```
$ sudo examples/methods_per_port.py
On port 3030, method rewrite was called
On port 3031, method search was called
...
```
"""
from __future__ import print_function
import argparse
import os
import sys
from thrift_tools.message_sniffer import MessageSnifferOptions, MessageSniffer
PROC_TCP = '/proc/net/tcp'
def listening_ports():
""" Reads listening ports from /proc/net/tcp """
ports = []
if not os.path.exists(PROC_TCP):
return ports
with open(PROC_TCP) as fh:
for line in fh:
if '00000000:0000' not in line:
continue
parts = line.lstrip(' ').split(' ')
if parts[2] != '00000000:0000':
continue
local_port = parts[1].split(':')[1]
local_port = int('0x' + local_port, base=16)
ports.append(local_port)
return ports
def discover_on_port(port, iface, handler):
options = MessageSnifferOptions(
iface=iface,
port=port,
ip=None,
pcap_file=None,
protocol=None,
finagle_thrift=False,
read_values=False,
max_queued=20000,
max_message_size=2000,
debug=False)
return MessageSniffer(options, handler)
class MsgHandler(object):
def __init__(self, port):
self._methods = set()
self._port = port
def __call__(self, timestamp, src, dst, msg):
if msg.method in self._methods:
return True
self._methods.add(msg.method)
print('On port %d, method %s was called' % (self._port, msg.method))
return True # must return true, or sniffer will exit
def discover_methods(iface):
sniffers = []
for port in listening_ports():
sniff = discover_on_port(port, iface, MsgHandler(port))
sniffers.append(sniff)
# done when all sniffers are done (or the user gets tired)
try:
while True:
if all(not sniff.isAlive() for sniff in sniffers):
break
except KeyboardInterrupt:
pass
def get_flags():
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('--iface', type=str, default='eth0', metavar='<iface>',
help='The interface to sniff from')
return p.parse_args()
if __name__ == '__main__':
if os.getuid() != 0:
print('Must be root (or have CAP_NET_ADMIN)')
sys.exit(1)
flags = get_flags()
discover_methods(flags.iface)
| {
"content_hash": "1406f68636e94534bcafecd104f783ae",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 78,
"avg_line_length": 23.084745762711865,
"alnum_prop": 0.6035242290748899,
"repo_name": "shrijeet/thrift-tools",
"id": "77e0b803e0005972c47ce86b3c30cbb1fad9e5f5",
"size": "2747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/methods_per_port.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "71706"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ids", parent_name="candlestick", **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| {
"content_hash": "0120785e8b5dcbcdbc7eaf2f240961eb",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 79,
"avg_line_length": 35.63636363636363,
"alnum_prop": 0.6173469387755102,
"repo_name": "plotly/plotly.py",
"id": "6f32ec6b9b65375516b8571883590f37a3f89adc",
"size": "392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/candlestick/_ids.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import MixinABC, _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"fabricName": _SERIALIZER.url("fabric_name", fabric_name, "str"),
"containerName": _SERIALIZER.url("container_name", container_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_register_request(
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"fabricName": _SERIALIZER.url("fabric_name", fabric_name, "str"),
"containerName": _SERIALIZER.url("container_name", container_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_unregister_request(
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"fabricName": _SERIALIZER.url("fabric_name", fabric_name, "str"),
"containerName": _SERIALIZER.url("container_name", container_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_inquire_request(
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/inquire",
) # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"fabricName": _SERIALIZER.url("fabric_name", fabric_name, "str"),
"containerName": _SERIALIZER.url("container_name", container_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
def build_refresh_request(
vault_name: str,
resource_group_name: str,
fabric_name: str,
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/refreshContainers",
) # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"fabricName": _SERIALIZER.url("fabric_name", fabric_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class ProtectionContainersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.RecoveryServicesBackupClient`'s
:attr:`protection_containers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self, vault_name: str, resource_group_name: str, fabric_name: str, container_name: str, **kwargs: Any
) -> _models.ProtectionContainerResource:
"""Gets details of the specific container registered to your Recovery Services Vault.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param fabric_name: Name of the fabric where the container belongs. Required.
:type fabric_name: str
:param container_name: Name of the container whose details need to be fetched. Required.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionContainerResource or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.ProtectionContainerResource]
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
fabric_name=fabric_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ProtectionContainerResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}"} # type: ignore
@overload
def register(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
parameters: _models.ProtectionContainerResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> Optional[_models.ProtectionContainerResource]:
"""Registers the container with Recovery Services vault.
This is an asynchronous operation. To track the operation status, use location header to call
get latest status of
the operation.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the container. Required.
:type fabric_name: str
:param container_name: Name of the container to be registered. Required.
:type container_name: str
:param parameters: Request body for operation. Required.
:type parameters:
~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionContainerResource or None or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource or
None
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def register(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> Optional[_models.ProtectionContainerResource]:
"""Registers the container with Recovery Services vault.
This is an asynchronous operation. To track the operation status, use location header to call
get latest status of
the operation.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the container. Required.
:type fabric_name: str
:param container_name: Name of the container to be registered. Required.
:type container_name: str
:param parameters: Request body for operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionContainerResource or None or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource or
None
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def register(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
parameters: Union[_models.ProtectionContainerResource, IO],
**kwargs: Any
) -> Optional[_models.ProtectionContainerResource]:
"""Registers the container with Recovery Services vault.
This is an asynchronous operation. To track the operation status, use location header to call
get latest status of
the operation.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the container. Required.
:type fabric_name: str
:param container_name: Name of the container to be registered. Required.
:type container_name: str
:param parameters: Request body for operation. Is either a model type or a IO type. Required.
:type parameters:
~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionContainerResource or None or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource or
None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.ProtectionContainerResource]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ProtectionContainerResource")
request = build_register_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
fabric_name=fabric_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.register.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ProtectionContainerResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
register.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}"} # type: ignore
@distributed_trace
def unregister( # pylint: disable=inconsistent-return-statements
self, vault_name: str, resource_group_name: str, fabric_name: str, container_name: str, **kwargs: Any
) -> None:
"""Unregisters the given container from your Recovery Services Vault. This is an asynchronous
operation. To determine
whether the backend service has finished processing the request, call Get Container Operation
Result API.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param fabric_name: Name of the fabric where the container belongs. Required.
:type fabric_name: str
:param container_name: Name of the container which needs to be unregistered from the Recovery
Services Vault. Required.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_unregister_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
fabric_name=fabric_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.unregister.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
unregister.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}"} # type: ignore
@distributed_trace
def inquire( # pylint: disable=inconsistent-return-statements
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> None:
"""Inquires all the protectable items under the given container.
This is an async operation and the results should be tracked using location header or
Azure-async-url.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param fabric_name: Fabric Name associated with the container. Required.
:type fabric_name: str
:param container_name: Name of the container in which inquiry needs to be triggered. Required.
:type container_name: str
:param filter: OData filter options. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_inquire_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
fabric_name=fabric_name,
container_name=container_name,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.inquire.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
inquire.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/inquire"} # type: ignore
@distributed_trace
def refresh( # pylint: disable=inconsistent-return-statements
self, vault_name: str, resource_group_name: str, fabric_name: str, filter: Optional[str] = None, **kwargs: Any
) -> None:
"""Discovers all the containers in the subscription that can be backed up to Recovery Services
Vault. This is an
asynchronous operation. To know the status of the operation, call GetRefreshOperationResult
API.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param fabric_name: Fabric name associated the container. Required.
:type fabric_name: str
:param filter: OData filter options. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_refresh_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
fabric_name=fabric_name,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.refresh.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
refresh.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/refreshContainers"} # type: ignore
| {
"content_hash": "ad4fbf616b8a7e3314f015fb0faa44b4",
"timestamp": "",
"source": "github",
"line_count": 694,
"max_line_length": 244,
"avg_line_length": 44.038904899135446,
"alnum_prop": 0.6570362857049373,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d367044f22364dc4e7358e8c73f3c098d326bcaa",
"size": "31063",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/recoveryservices/azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/activestamp/operations/_protection_containers_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
toradbapi
=========
Wrapper for twisted.enterprise.adbapi.ConnectionPool to use with tornado.
Copyright (c) 2014, Timofey Trukhanov.
MIT, see LICENSE for more details.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from twisted.enterprise.adbapi import ConnectionPool as TxConnectionPool
from tornado.concurrent import TracebackFuture
class ConnectionPool(object):
"""
Wrapper for twisted.enterprise.adbapi.ConnectionPool to use with tornado.
"""
def __init__(self, *args, **kwargs):
self._pool = TxConnectionPool(*args, **kwargs)
def run_query(self, *args, **kwargs):
return self._defer_to_future(self._pool.runQuery(*args, **kwargs))
def run_operation(self, *args, **kwargs):
return self._defer_to_future(self._pool.runOperation(*args, **kwargs))
def run_interaction(self, *args, **kwargs):
return self._defer_to_future(self._pool.runInteraction(*args, **kwargs))
def close(self):
self._pool.close()
@staticmethod
def _defer_to_future(defer):
future = TracebackFuture()
defer.addCallbacks(
future.set_result,
lambda failure: future.set_exc_info(
(failure.type, failure.value, failure.tb)))
return future
| {
"content_hash": "f69bb4a7a3033099b1df90439fbcfd3b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 80,
"avg_line_length": 29.6,
"alnum_prop": 0.6539039039039038,
"repo_name": "geerk/toradbapi",
"id": "42a4e154968ca87f5460b1ab5cd27ec22727aea5",
"size": "1332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toradbapi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10737"
}
],
"symlink_target": ""
} |
from django import template
from finial.util import user_has_override
register = template.Library()
@register.filter
def has_finial_flag(user, flag):
return user_has_override(user, flag)
| {
"content_hash": "185abcd9859bc52fd6e10b2650a78029",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 41,
"avg_line_length": 24.125,
"alnum_prop": 0.772020725388601,
"repo_name": "urbanairship/django-finial",
"id": "a294633dc7fdeb80ec753920725678b18be31d94",
"size": "193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "finial/templatetags/finial_flags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "41915"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
} |
from cargo import *
import json
import pickle
from psycopg2.extensions import *
from psycopg2.extras import *
from cargo.fields.binary import cargobytes
from vital.debug import Compare, RandData
from vital.cache import high_pickle
c = Compare(cargobytes, bytes)
c.time(1E6, high_pickle.dumps('foo'))
| {
"content_hash": "45b5f619ff88a65d8ac06932280e7149",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 42,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.7947019867549668,
"repo_name": "jaredlunde/cargo-orm",
"id": "1bc335cd27362ce6157a5d0a7be87da4de7c3a85",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "unit_bench/dbval.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1155740"
},
{
"name": "Shell",
"bytes": "288"
}
],
"symlink_target": ""
} |
MAIN_TEMPLATE="""# PROD BUILDING STEPS
options:
machineType: 'E2_HIGHCPU_32'
env:
- DOCKER_CLI_EXPERIMENTAL=enabled
steps:
- name: 'docker/binfmt:a7996909642ee92942dcd6cff44b9b95f08dad64'
- name: 'gcr.io/cloud-builders/docker'
id: multi_arch_step1
args:
- 'buildx'
- 'create'
- '--name'
- 'mybuilder'
- name: 'gcr.io/cloud-builders/docker'
id: multi_arch_step2
args:
- 'buildx'
- 'use'
- 'mybuilder'
waitFor: ['multi_arch_step1']
- name: 'gcr.io/cloud-builders/docker'
id: multi_arch_step3
args:
- 'buildx'
- 'inspect'
- '--bootstrap'
waitFor: ['multi_arch_step2']
{BUILDSTEPS}
# END OF PROD BUILDING STEPS
{MULTIARCH_BUILDSTEPS}
- name: 'gcr.io/cloud-builders/docker'
id: dockersecret
entrypoint: 'bash'
args: ['-c', 'docker login --username=$_USERNAME --password=$$PASSWORD']
secretEnv: ['PASSWORD']
{DOCKER_PUSHSTEPS}
images:
{GCR_IO_TAGS_SORTED}
secrets:
- kmsKeyName: projects/google.com:cloudsdktool/locations/global/keyRings/docker/cryptoKeys/dockerhub-password
secretEnv:
PASSWORD: |
CiQA9btlfpg/kWmwXQvrNXtkVpu2tDdD2VOi1FYd3mmjCUGaK4YSNwC8sn1MepjracHAg8VAQEWm
s26BTGccqD1NTS83DGFdY9moRGhSPm4WJKCg2tTQKYeTfdqUjjM=
timeout: 7200s"""
GCRIO_PROJECT='google.com/cloudsdktool'
GCR_PREFIXES = ['gcr.io', 'eu.gcr.io', 'asia.gcr.io', 'us.gcr.io']
DOCKERHUB_PREFIX='google'
OLD_NAME='cloud-sdk'
REBRAND_NAME='google-cloud-cli'
IMAGES=['alpine', 'debian_slim', 'default', 'debian_component_based', 'emulators']
MULTI_ARCH=['debian_slim', 'debian_component_based', 'alpine', 'emulators']
LABEL_FOR_IMAGE={
'alpine': 'alpine',
'debian_slim': 'slim',
'default': '',
'debian_component_based': 'debian_component_based',
'emulators': 'emulators'
}
def MakeGcrTags(label_without_tag,
label_with_tag,
maybe_hypen,
include_old_name=True,
include_rebrand_name=True):
t = []
for gcr_prefix in GCR_PREFIXES:
if include_old_name:
t.append(
'\'{gcrprefix}/{gcrio_project}/{old_name}:{label}\''
.format(gcrprefix=gcr_prefix,
gcrio_project=GCRIO_PROJECT,
old_name=OLD_NAME,
label=label_without_tag))
t.append(
'\'{gcr_prefix}/{gcrio_project}/{old_name}:$TAG_NAME{maybe_hypen}{label}\''
.format(gcr_prefix=gcr_prefix,
gcrio_project=GCRIO_PROJECT,
old_name=OLD_NAME,
maybe_hypen=maybe_hypen,
label=label_with_tag))
if include_rebrand_name:
t.append(
'\'{gcrprefix}/{gcrio_project}/{rebrand_name}:{label}\''
.format(gcrprefix=gcr_prefix,
gcrio_project=GCRIO_PROJECT,
rebrand_name=REBRAND_NAME,
label=label_without_tag))
t.append(
'\'{gcr_prefix}/{gcrio_project}/{rebrand_name}:$TAG_NAME{maybe_hypen}{label}\''
.format(gcr_prefix=gcr_prefix,
gcrio_project=GCRIO_PROJECT,
rebrand_name=REBRAND_NAME,
maybe_hypen=maybe_hypen,
label=label_with_tag))
return t
# Make all the tags and save them
tags={}
multi_arch_tags={}
for i in IMAGES:
tags[i]=[]
if i in MULTI_ARCH:
multi_arch_tags[i]=[]
label_name = LABEL_FOR_IMAGE[i]
label_without_tag = label_name
label_with_tag = label_name
maybe_hypen = '-'
if i == 'default':
label_without_tag = 'latest'
maybe_hypen = ''
# Make dockerhub tags for i
tags[i].append('\'{dockerhub_prefix}/{old_name}:{label}\''
.format(dockerhub_prefix=DOCKERHUB_PREFIX,
old_name=OLD_NAME,
label=label_without_tag))
tags[i].append('\'{dockerhub_prefix}/{old_name}:$TAG_NAME{maybe_hypen}{label}\''
.format(dockerhub_prefix=DOCKERHUB_PREFIX,
old_name=OLD_NAME,
maybe_hypen=maybe_hypen,
label=label_with_tag))
# Make gcr tags for i
if i not in MULTI_ARCH:
tags[i].extend(MakeGcrTags(label_without_tag, label_with_tag, maybe_hypen))
else:
# old gcr tags go into tags
tags[i].extend(MakeGcrTags(label_without_tag,
label_with_tag,
maybe_hypen,
include_rebrand_name=False))
# new gcr tags go into multiarch tags
multi_arch_tags[i].extend(MakeGcrTags(label_without_tag,
label_with_tag,
maybe_hypen,
include_old_name=False))
build_steps=''
for i in IMAGES:
image_directory = '{}/'.format(i)
if i == 'default':
image_directory = '.'
build_step = """- name: 'gcr.io/cloud-builders/docker'
id: {image_name}
args: ['build', {tags}, '{image_directory}']
waitFor: ['-']"""
output_build_step = build_step.format(
image_name=i,
tags=', '.join(['\'-t\', {}'.format(t) for t in tags[i]]),
image_directory=image_directory)
if len(build_steps) > 0:
build_steps+='\n'
build_steps+=output_build_step
multi_arch_build_steps=''
for i in MULTI_ARCH:
image_directory = '{}/'.format(i)
if i == 'default':
image_directory = '.'
multi_arch_build_step = """- name: 'gcr.io/cloud-builders/docker'
id: multi_arch_{image_name}
args: ['buildx', 'build', '--platform', 'linux/arm64,linux/amd64', {tags}, '{image_directory}', '--push']
waitFor: ['multi_arch_step3']"""
output_build_step = multi_arch_build_step.format(
image_name=i,
tags=', '.join(['\'-t\', {}'.format(t) for t in multi_arch_tags[i]]),
image_directory=image_directory)
if len(multi_arch_build_steps) > 0:
multi_arch_build_steps+='\n'
multi_arch_build_steps+=output_build_step
docker_push_steps=''
for i in IMAGES:
push_step = """- name: 'gcr.io/cloud-builders/docker'
args: ['push', {tag}]
waitFor: ['dockersecret', '{build_step}']"""
for tag in tags[i]:
if tag.startswith('\'google/cloud-sdk'):
if len(docker_push_steps) > 0:
docker_push_steps+='\n'
docker_push_steps+=push_step.format(tag=tag, build_step=i)
all_gcr_io_tags_for_images=''
all_images_tags=[]
for i in IMAGES:
all_images_tags.extend([t for t in tags[i] if not t.startswith('\'google/cloud-sdk')])
for tag in sorted(all_images_tags):
if len(all_gcr_io_tags_for_images) > 0:
all_gcr_io_tags_for_images+='\n'
all_gcr_io_tags_for_images+='- {}'.format(tag)
print(MAIN_TEMPLATE.format(
BUILDSTEPS=build_steps,
MULTIARCH_BUILDSTEPS=multi_arch_build_steps,
DOCKER_PUSHSTEPS=docker_push_steps,
GCR_IO_TAGS_SORTED=all_gcr_io_tags_for_images
))
| {
"content_hash": "3543c8d3e2be3bb3a2168868428f61c2",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 109,
"avg_line_length": 34.995024875621894,
"alnum_prop": 0.575774808075064,
"repo_name": "GoogleCloudPlatform/cloud-sdk-docker",
"id": "02fedbdfaebbb9b65a24344d8a2fdc5f620239c3",
"size": "7034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generate_cloudbuild.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "9856"
},
{
"name": "Python",
"bytes": "7034"
}
],
"symlink_target": ""
} |
import numpy as np
from glumpy import app
from glumpy.graphics.collections import PointCollection
from glumpy.transforms import PowerScale, Position, Viewport
window = app.Window(1024,1024, color=(1,1,1,1))
@window.event
def on_draw(dt):
window.clear()
points.draw()
@window.event
def on_mouse_scroll(x,y,dx,dy):
if dy < 0:
transform["exponent"] = np.minimum(10.0, 1.1*transform["exponent"])
else:
transform["exponent"] = np.maximum(0.1, transform["exponent"]/1.1)
transform = Position(PowerScale())
transform["exponent"] = 2
transform["domain"] = -10,+10
points = PointCollection("agg", transform = transform)
P = np.random.uniform(-100,100,(10000,3))
P = np.copysign(np.sqrt(abs(P)),P)
points.append(P)
window.attach(points["transform"])
window.attach(points["viewport"])
app.run()
| {
"content_hash": "444d2c9d62e961f889f994be7de09c71",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 25.71875,
"alnum_prop": 0.6998784933171325,
"repo_name": "glumpy/glumpy",
"id": "392c8237d8a3d9d3c0d802f7eaf312b140d801cf",
"size": "1093",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/transform-power-scale.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "26075"
},
{
"name": "Cython",
"bytes": "660"
},
{
"name": "GLSL",
"bytes": "177965"
},
{
"name": "Makefile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "1320773"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.views.decorators.csrf import csrf_exempt
from bootcamp.traceroute import views
urlpatterns = [
url(r'^$', views.traceroute, name='traceroute'),
url(r'^inttraceroute$', views.inttraceroute, name='inttraceroute'),
## traceroute with ansible
url(r'^gettrace/$', views.gettraceroute, name='gettrace'),
url(r'^getinterfacetrace/$', views.getinterfacetraceroute, name='getinterfacetrace'),
url(r'^runtraceroute/$', csrf_exempt(views.runtraceroute), name='runtraceroute'),
url(r'^runinterfacetraceroute/$', csrf_exempt(views.runinterfacetraceroute), name='runinterfacetraceroute'),
## new traceroute using custom library
url(r'^runtrace/$', views.runtrace, name='runtrace'),
url(r'^runtraceapi/$', csrf_exempt(views.runtraceapi), name='runtraceapi'),
url(r'^runinterfacetrace/$', views.runinterfacetrace, name='runinterfacetrace'),
url(r'^runinterfacetraceapi/$', csrf_exempt(views.runinterfacetraceapi), name='runinterfacetraceapi'),
# url(r'^preview/$', views.preview, name='preview'),
# url(r'^drafts/$', views.drafts, name='drafts'),
# url(r'^comment/$', views.comment, name='comment'),
# url(r'^tag/(?P<tag_name>.+)/$', views.tag, name='tag'),
# url(r'^edit/(?P<id>\d+)/$', views.edit, name='edit_article'),
# url(r'^(?P<slug>[-\w]+)/$', views.article, name='article'),
] | {
"content_hash": "174d434ea28421fef168db9bafebd7bb",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 112,
"avg_line_length": 53.84615384615385,
"alnum_prop": 0.6914285714285714,
"repo_name": "davismathew/netbot-django",
"id": "6453786b8752f10274cd418f81613a7282db4a0a",
"size": "1417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootcamp/traceroute/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15529"
},
{
"name": "HTML",
"bytes": "192483"
},
{
"name": "JavaScript",
"bytes": "105116"
},
{
"name": "Python",
"bytes": "231279"
}
],
"symlink_target": ""
} |
"""
A script removing animations from SVG graphics.
"""
import sys, os, re
# etree fails utterly at producing nice-looking XML
from xml.dom import minidom
def process(inpt, outp):
def traverse(node):
for child in node.childNodes:
if child.nodeType != minidom.Node.ELEMENT_NODE:
continue
elif child.tagName in ('animate', 'animateTransform'):
node.removeChild(child)
elif child.tagName in ('style', 'script'):
if child.getAttribute('key') == 'animation':
node.removeChild(child)
else:
traverse(child)
node.normalize()
if len(node.childNodes) == 0: return
for child in (node.childNodes[0], node.childNodes[-1]):
if child.nodeType != minidom.Node.TEXT_NODE:
continue
if not child.data.isspace() or child.data.count('\n') <= 1:
continue
if len(node.childNodes) == 1:
node.removeChild(child)
return
child.data = re.sub(r'\n.*\n', r'\n', child.data)
document = minidom.parse(inpt)
traverse(document.documentElement)
outp.write('<?xml version="1.0" encoding="utf-8"?>\n')
document.documentElement.writexml(outp)
outp.write('\n')
def main():
if len(sys.argv) != 3:
sys.stderr.write('USAGE: %s input output\n' % sys.argv[0])
sys.stderr.flush()
sys.exit(0)
with open(sys.argv[1]) as inpt, open(sys.argv[2], 'w') as outp:
process(inpt, outp)
if __name__ == '__main__': main()
| {
"content_hash": "5202dab3ecedc4343c58e0ce42d1b2a9",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 71,
"avg_line_length": 34.255319148936174,
"alnum_prop": 0.5627329192546584,
"repo_name": "CylonicRaider/Instant",
"id": "be44a976b641ca294be115a5fcc626a3cd29e12c",
"size": "1658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/deanimate.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48065"
},
{
"name": "HTML",
"bytes": "7487"
},
{
"name": "Java",
"bytes": "583676"
},
{
"name": "JavaScript",
"bytes": "407493"
},
{
"name": "Makefile",
"bytes": "4652"
},
{
"name": "Python",
"bytes": "327455"
},
{
"name": "Shell",
"bytes": "4152"
}
],
"symlink_target": ""
} |
"""
Provides FileStorage implementation for local filesystem.
This is usefull for storing files inside a local path.
"""
import os
import uuid
import shutil
import json
from datetime import datetime
from .interfaces import FileStorage, StoredFile
from . import utils
from .._compat import unicode_text
class LocalStoredFile(StoredFile):
def __init__(self, file_id, local_path):
_check_file_id(file_id)
self._metadata_path = _metadata_path(local_path)
self._file_path = _file_path(local_path)
self._file = None
try:
metadata = open(self._metadata_path, 'r')
except:
raise IOError('File %s not existing' % file_id)
metadata_info = {'filename': 'unnamed',
'content_type': 'application/octet-stream',
'last_modified': None}
with metadata:
try:
metadata_content = metadata.read()
metadata_info.update(json.loads(metadata_content))
last_modified = metadata_info['last_modified']
if last_modified:
metadata_info['last_modified'] = datetime.strptime(last_modified,
'%Y-%m-%d %H:%M:%S')
except Exception:
raise ValueError('Invalid file metadata for %s' % file_id)
super(LocalStoredFile, self).__init__(file_id=file_id, **metadata_info)
def read(self, n=-1):
if self._file is None:
self._file = open(self._file_path, 'rb')
return self._file.read(n)
def close(self):
if self._file is None:
# This is to guarantee that closing a file
# before even reading it behaves correctly
self._file = open(self._file_path, 'rb')
self._file.close()
@property
def closed(self):
if self._file is None:
return False
return self._file.closed
class LocalFileStorage(FileStorage):
""":class:`depot.io.interfaces.FileStorage` implementation that stores files locally.
All the files are stored inside a directory specified by the ``storage_path`` parameter.
"""
def __init__(self, storage_path):
self.storage_path = storage_path
def __local_path(self, fileid):
return os.path.join(self.storage_path, fileid)
def get(self, file_or_id):
fileid = self.fileid(file_or_id)
local_file_path = self.__local_path(fileid)
return LocalStoredFile(fileid, local_file_path)
def __save_file(self, file_id, content, filename, content_type=None):
local_file_path = self.__local_path(file_id)
os.makedirs(local_file_path)
saved_file_path = _file_path(local_file_path)
if hasattr(content, 'read'):
with open(saved_file_path, 'wb') as fileobj:
shutil.copyfileobj(content, fileobj)
else:
if isinstance(content, unicode_text):
raise TypeError('Only bytes can be stored, not unicode')
with open(saved_file_path, 'wb') as fileobj:
fileobj.write(content)
fileobj.flush()
metadata = {'filename': filename,
'content_type': content_type,
'content_length': os.path.getsize(saved_file_path),
'last_modified': utils.timestamp()}
with open(_metadata_path(local_file_path), 'w') as metadatafile:
metadatafile.write(json.dumps(metadata))
def create(self, content, filename=None, content_type=None):
new_file_id = str(uuid.uuid1())
content, filename, content_type = self.fileinfo(content, filename, content_type)
self.__save_file(new_file_id, content, filename, content_type)
return new_file_id
def replace(self, file_or_id, content, filename=None, content_type=None):
fileid = self.fileid(file_or_id)
_check_file_id(fileid)
# First check file existed and we are not using replace
# as a way to force a specific file id on creation.
if not self.exists(fileid):
raise IOError('File %s not existing' % file_or_id)
content, filename, content_type = self.fileinfo(content, filename, content_type,
lambda: self.get(fileid))
self.delete(fileid)
self.__save_file(fileid, content, filename, content_type)
return fileid
def delete(self, file_or_id):
fileid = self.fileid(file_or_id)
_check_file_id(fileid)
local_file_path = self.__local_path(fileid)
try:
shutil.rmtree(local_file_path)
except:
pass
def exists(self, file_or_id):
fileid = self.fileid(file_or_id)
_check_file_id(fileid)
local_file_path = self.__local_path(fileid)
return os.path.exists(local_file_path)
def list(self):
return [os.path.basename(fileid) for fileid in os.listdir(self.storage_path)]
def _check_file_id(file_id):
# Check that the given file id is valid, this also
# prevents unsafe paths.
try:
uuid.UUID('{%s}' % file_id)
except:
raise ValueError('Invalid file id %s' % file_id)
def _metadata_path(local_path):
return os.path.join(local_path, 'metadata.json')
def _file_path(local_path):
return os.path.join(local_path, 'file')
| {
"content_hash": "4696bfd7506b7b598f0515f0d14b82d3",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 92,
"avg_line_length": 33.157575757575756,
"alnum_prop": 0.5907512337781027,
"repo_name": "amol-/depot",
"id": "a5574da259470fe2dd5f29276514d3555e843bd3",
"size": "5471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "depot/io/local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "161671"
}
],
"symlink_target": ""
} |
import httplib # Used only for handling httplib.HTTPException (case #26701)
import urllib2
import traceback
import json
from kunai.log import logger
from kunai.collector import Collector
class RabbitMQ(Collector):
def launch(self):
logger.debug('getRabbitMQStatus: start')
if 'rabbitMQStatusUrl' not in self.config or \
'rabbitMQUser' not in self.config or \
'rabbitMQPass' not in self.config or \
self.config['rabbitMQStatusUrl'] == 'http://www.example.com:55672/json':
logger.debug('getRabbitMQStatus: config not set')
return False
logger.debug('getRabbitMQStatus: config set')
try:
logger.debug('getRabbitMQStatus: attempting authentication setup')
manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
manager.add_password(None, self.config['rabbitMQStatusUrl'], self.config['rabbitMQUser'], self.config['rabbitMQPass'])
handler = urllib2.HTTPBasicAuthHandler(manager)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
logger.debug('getRabbitMQStatus: attempting urlopen')
req = urllib2.Request(self.config['rabbitMQStatusUrl'], None, {})
# Do the request, log any errors
request = urllib2.urlopen(req)
response = request.read()
except urllib2.HTTPError, e:
logger.error('Unable to get RabbitMQ status - HTTPError = %s', e)
return False
except urllib2.URLError, e:
logger.error('Unable to get RabbitMQ status - URLError = %s', e)
return False
except httplib.HTTPException, e:
logger.error('Unable to get RabbitMQ status - HTTPException = %s', e)
return False
except Exception:
logger.error('Unable to get RabbitMQ status - Exception = %s', traceback.format_exc())
return False
try:
status = json.loads(response)
logger.debug(status)
if 'connections' not in status:
# We are probably using the newer RabbitMQ 2.x status plugin, so try to parse that instead.
status = {}
logger.debug('getRabbitMQStatus: using 2.x management plugin data')
import urlparse
split_url = urlparse.urlsplit(self.config['rabbitMQStatusUrl'])
# Connections
url = split_url[0] + '://' + split_url[1] + '/api/connections'
logger.debug('getRabbitMQStatus: attempting urlopen on %s', url)
manager.add_password(None, url, self.config['rabbitMQUser'], self.config['rabbitMQPass'])
req = urllib2.Request(url, None, {})
# Do the request, log any errors
request = urllib2.urlopen(req)
response = request.read()
connections = json.loads(response)
status['connections'] = len(connections)
logger.debug('getRabbitMQStatus: connections = %s', status['connections'])
# Queues
url = split_url[0] + '://' + split_url[1] + '/api/queues'
logger.debug('getRabbitMQStatus: attempting urlopen on %s', url)
manager.add_password(None, url, self.config['rabbitMQUser'], self.config['rabbitMQPass'])
req = urllib2.Request(url, None, {})
# Do the request, log any errors
request = urllib2.urlopen(req)
response = request.read()
queues = json.loads(response)
status['queues'] = queues
logger.debug(status['queues'])
except Exception:
logger.error('Unable to load RabbitMQ status JSON - Exception = %s', traceback.format_exc())
return False
logger.debug('getRabbitMQStatus: completed, returning')
# Fix for queues with the same name (case 32788)
for queue in status.get('queues', []):
vhost = queue.get('vhost', '/')
if vhost == '/':
continue
queue['name'] = '%s/%s' % (vhost, queue['name'])
return status
| {
"content_hash": "7be6e406365fd06ca33fc8641c507e4a",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 130,
"avg_line_length": 38.25892857142857,
"alnum_prop": 0.577362893815636,
"repo_name": "pombredanne/kunai-1",
"id": "af3f106cfe5eaab3b1eb893eb9eaa4da4b4e3feb",
"size": "4285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "etc/packs/rabbitmq/collectors/collector_rabbitmq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8345"
},
{
"name": "HTML",
"bytes": "2986"
},
{
"name": "JavaScript",
"bytes": "26239"
},
{
"name": "Python",
"bytes": "643940"
},
{
"name": "Shell",
"bytes": "5371"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
from flask import make_response
from flask import request
from .constants import OK, NO_CONTENT
from .resources import Resource
class FlaskResource(Resource):
"""
A Flask-specific ``Resource`` subclass.
Doesn't require any special configuration, but helps when working in a
Flask environment.
"""
@classmethod
def as_list(cls, *init_args, **init_kwargs):
# Overridden here, because Flask uses a global ``request`` object
# rather than passing it to each view.
def _wrapper(*args, **kwargs):
# Make a new instance so that no state potentially leaks between
# instances.
inst = cls(*init_args, **init_kwargs)
inst.request = request
return inst.handle('list', *args, **kwargs)
return _wrapper
@classmethod
def as_detail(cls, *init_args, **init_kwargs):
# Overridden here, because Flask uses a global ``request`` object
# rather than passing it to each view.
def _wrapper(*args, **kwargs):
# Make a new instance so that no state potentially leaks between
# instances.
inst = cls(*init_args, **init_kwargs)
inst.request = request
return inst.handle('detail', *args, **kwargs)
return _wrapper
def request_body(self):
return self.request.data
def is_debug(self):
from flask import current_app
return current_app.debug
def build_response(self, data, status=OK):
if status == NO_CONTENT:
# Avoid crashing the client when it tries to parse nonexisting JSON.
content_type = 'text/plain'
else:
content_type = 'application/json'
return make_response(data, status, {
'Content-Type': content_type,
})
@classmethod
def build_endpoint_name(cls, name, endpoint_prefix=None):
"""
Given a ``name`` & an optional ``endpoint_prefix``, this generates a name
for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param endpoint_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blogpost_list``
:type endpoint_prefix: string
:returns: The final name
:rtype: string
"""
if endpoint_prefix is None:
endpoint_prefix = 'api_{}'.format(
cls.__name__.replace('Resource', '').lower()
)
endpoint_prefix = endpoint_prefix.rstrip('_')
return '_'.join([endpoint_prefix, name])
@classmethod
def add_url_rules(cls, app, rule_prefix, endpoint_prefix=None):
"""
A convenience method for hooking up the URLs.
This automatically adds a list & a detail endpoint to your routes.
:param app: The ``Flask`` object for your app.
:type app: ``flask.Flask``
:param rule_prefix: The start of the URL to handle.
:type rule_prefix: string
:param endpoint_prefix: (Optional) A prefix for the URL's name (for
endpoints). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blog_post_list``
:type endpoint_prefix: string
:returns: Nothing
"""
methods = ['GET', 'POST', 'PUT', 'DELETE']
app.add_url_rule(
rule_prefix,
endpoint=cls.build_endpoint_name('list', endpoint_prefix),
view_func=cls.as_list(),
methods=methods
)
app.add_url_rule(
rule_prefix + '<pk>/',
endpoint=cls.build_endpoint_name('detail', endpoint_prefix),
view_func=cls.as_detail(),
methods=methods
)
| {
"content_hash": "1d6182b4f892da01c356cb6c6a1f0d6a",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 81,
"avg_line_length": 33.41525423728814,
"alnum_prop": 0.5830585848338828,
"repo_name": "toastdriven/restless",
"id": "a41a7d63ab520d2cb387bd166f3f160c491de519",
"size": "3943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restless/fl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "107128"
}
],
"symlink_target": ""
} |
"""Utilities for testing the mapreduce backend."""
import collections
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.core.backends.mapreduce import forms
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.computation import computation_base
from tensorflow_federated.python.core.impl.federated_context import federated_computation
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.tensorflow_context import tensorflow_computation
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.templates import iterative_process
MapReduceFormExample = collections.namedtuple('MapReduceFormExample',
['mrf', 'initialize'])
def generate_unnamed_type_signature(update, work):
"""Generates a type signature for the MapReduceForm based on components."""
parameter = computation_types.StructType([
(None,
computation_types.FederatedType(update.type_signature.parameter[0],
placements.SERVER)),
(None,
computation_types.FederatedType(work.type_signature.parameter[0],
placements.CLIENTS)),
])
result = computation_types.StructType([
(None,
computation_types.FederatedType(update.type_signature.parameter[0],
placements.SERVER)),
(None,
computation_types.FederatedType(update.type_signature.result[1],
placements.SERVER)),
])
return computation_types.FunctionType(parameter, result)
def get_temperature_sensor_example():
"""Constructs `forms.MapReduceForm` for temperature sensors example.
The temperature sensor example computes the fraction of sensors that report
temperatures over the threshold.
Returns:
A tuple of: (1) an instance of `forms.MapReduceForm` and (2) an associated
`computation_base.Computation` that generates an initial state compatible
with the server state expected by the `forms.MapReduceForm`.
"""
@federated_computation.federated_computation()
def initialize():
@tensorflow_computation.tf_computation
def initialize_tf():
return collections.OrderedDict(num_rounds=tf.constant(0))
return intrinsics.federated_value(initialize_tf(), placements.SERVER)
# The state of the server is a singleton tuple containing just the integer
# counter `num_rounds`.
server_state_type = collections.OrderedDict(num_rounds=tf.int32)
@tensorflow_computation.tf_computation(server_state_type)
def prepare(state):
return collections.OrderedDict(max_temperature=32.0 +
tf.cast(state['num_rounds'], tf.float32))
# The initial state of the client is a singleton tuple containing a single
# float `max_temperature`, which is the threshold received from the server.
client_state_type = collections.OrderedDict(max_temperature=tf.float32)
# The client data is a sequence of floats.
client_data_type = computation_types.SequenceType(tf.float32)
@tensorflow_computation.tf_computation(client_data_type, client_state_type)
def work(data, state):
"""See the `forms.MapReduceForm` definition of `work`."""
def fn(s, x):
return {
'num': s['num'] + 1,
'max': tf.maximum(s['max'], x),
}
reduce_result = data.reduce({
'num': np.int32(0),
'max': np.float32(-459.67)
}, fn)
client_updates = collections.OrderedDict(
is_over=reduce_result['max'] > state['max_temperature'])
return client_updates, [], [], []
# The client update is a singleton tuple with a Boolean-typed `is_over`.
client_update_type = collections.OrderedDict(is_over=tf.bool)
# The accumulator for client updates is a pair of counters, one for the
# number of clients over threshold, and the other for the total number of
# client updates processed so far.
accumulator_type = collections.OrderedDict(
num_total=tf.int32, num_over=tf.int32)
@tensorflow_computation.tf_computation
def zero():
return collections.OrderedDict(
num_total=tf.constant(0), num_over=tf.constant(0))
@tensorflow_computation.tf_computation(accumulator_type, client_update_type)
def accumulate(accumulator, update):
return collections.OrderedDict(
num_total=accumulator['num_total'] + 1,
num_over=accumulator['num_over'] + tf.cast(update['is_over'], tf.int32))
@tensorflow_computation.tf_computation(accumulator_type, accumulator_type)
def merge(accumulator1, accumulator2):
return collections.OrderedDict(
num_total=accumulator1['num_total'] + accumulator2['num_total'],
num_over=accumulator1['num_over'] + accumulator2['num_over'])
@tensorflow_computation.tf_computation(merge.type_signature.result)
def report(accumulator):
return collections.OrderedDict(
ratio_over_threshold=(tf.cast(accumulator['num_over'], tf.float32) /
tf.cast(accumulator['num_total'], tf.float32)))
unit_comp = tensorflow_computation.tf_computation(lambda: [])
bitwidth = unit_comp
max_input = unit_comp
modulus = unit_comp
update_type = (collections.OrderedDict(ratio_over_threshold=tf.float32), (),
(), ())
@tensorflow_computation.tf_computation(server_state_type, update_type)
def update(state, update):
return (collections.OrderedDict(num_rounds=state['num_rounds'] + 1),
update[0])
type_signature = generate_unnamed_type_signature(update, work)
return MapReduceFormExample(
mrf=forms.MapReduceForm(type_signature, prepare, work, zero, accumulate,
merge, report, bitwidth, max_input, modulus,
update),
initialize=initialize)
def get_federated_sum_example(
*,
secure_sum: bool = False
) -> tuple[forms.MapReduceForm, computation_base.Computation]:
"""Constructs `forms.MapReduceForm` which performs a sum aggregation.
Args:
secure_sum: Whether to use `federated_secure_sum_bitwidth`. Defaults to
`federated_sum`.
Returns:
An instance of `forms.MapReduceForm`.
"""
@tensorflow_computation.tf_computation
def initialize_tf():
return ()
@federated_computation.federated_computation()
def initialize():
return intrinsics.federated_value(initialize_tf(), placements.SERVER)
server_state_type = initialize_tf.type_signature.result
@tensorflow_computation.tf_computation(server_state_type)
def prepare(state):
return state
@tensorflow_computation.tf_computation(
computation_types.SequenceType(tf.int32), prepare.type_signature.result)
def work(data, _):
client_sum = data.reduce(initial_state=0, reduce_func=tf.add)
if secure_sum:
return [], client_sum, [], []
else:
return client_sum, [], [], []
@tensorflow_computation.tf_computation
def zero():
if secure_sum:
return ()
else:
return 0
client_update_type = work.type_signature.result[0]
accumulator_type = zero.type_signature.result
@tensorflow_computation.tf_computation(accumulator_type, client_update_type)
def accumulate(accumulator, update):
if secure_sum:
return ()
else:
return accumulator + update
@tensorflow_computation.tf_computation(accumulator_type, accumulator_type)
def merge(accumulator1, accumulator2):
if secure_sum:
return ()
else:
return accumulator1 + accumulator2
@tensorflow_computation.tf_computation(merge.type_signature.result)
def report(accumulator):
return accumulator
bitwidth = tensorflow_computation.tf_computation(lambda: 32)
max_input = tensorflow_computation.tf_computation(lambda: 0)
modulus = tensorflow_computation.tf_computation(lambda: 0)
update_type = (
merge.type_signature.result,
work.type_signature.result[1],
work.type_signature.result[2],
work.type_signature.result[3],
)
@tensorflow_computation.tf_computation(server_state_type, update_type)
def update(state, update):
if secure_sum:
return state, update[1]
else:
return state, update[0]
type_signature = generate_unnamed_type_signature(update, work)
return MapReduceFormExample(
mrf=forms.MapReduceForm(type_signature, prepare, work, zero, accumulate,
merge, report, bitwidth, max_input, modulus,
update),
initialize=initialize)
def get_mnist_training_example():
"""Constructs `forms.MapReduceForm` for mnist training.
Returns:
An instance of `forms.MapReduceForm`.
"""
model_nt = collections.namedtuple('Model', 'weights bias')
server_state_nt = (collections.namedtuple('ServerState', 'model num_rounds'))
# Start with a model filled with zeros, and the round counter set to zero.
@federated_computation.federated_computation()
def initialize():
@tensorflow_computation.tf_computation
def initialize_tf():
return server_state_nt(
model=model_nt(weights=tf.zeros([784, 10]), bias=tf.zeros([10])),
num_rounds=tf.constant(0))
return intrinsics.federated_value(initialize_tf(), placements.SERVER)
server_state_tff_type = server_state_nt(
model=model_nt(weights=(tf.float32, [784, 10]), bias=(tf.float32, [10])),
num_rounds=tf.int32)
client_state_nt = (
collections.namedtuple('ClientState', 'model learning_rate'))
# Pass the model to the client, along with a dynamically adjusted learning
# rate that starts at 0.1 and decays exponentially by a factor of 0.9.
@tensorflow_computation.tf_computation(server_state_tff_type)
def prepare(state):
learning_rate = 0.1 * tf.pow(0.9, tf.cast(state.num_rounds, tf.float32))
return client_state_nt(model=state.model, learning_rate=learning_rate)
batch_nt = collections.namedtuple('Batch', 'x y')
batch_tff_type = batch_nt(x=(tf.float32, [None, 784]), y=(tf.int32, [None]))
dataset_tff_type = computation_types.SequenceType(batch_tff_type)
model_tff_type = model_nt(
weights=(tf.float32, [784, 10]), bias=(tf.float32, [10]))
client_state_tff_type = client_state_nt(
model=model_tff_type, learning_rate=tf.float32)
loop_state_nt = collections.namedtuple('LoopState', 'num_examples total_loss')
update_nt = collections.namedtuple('Update', 'model num_examples loss')
# Train the model locally, emit the loclaly-trained model and the number of
# examples as an update, and the average loss and the number of examples as
# local client stats.
@tensorflow_computation.tf_computation(dataset_tff_type,
client_state_tff_type)
def work(data, state): # pylint: disable=missing-docstring
model_vars = model_nt(
weights=tf.Variable(initial_value=state.model.weights, name='weights'),
bias=tf.Variable(initial_value=state.model.bias, name='bias'))
init_model = tf.compat.v1.global_variables_initializer()
optimizer = tf.keras.optimizers.SGD(state.learning_rate)
@tf.function
def reduce_fn(loop_state, batch):
"""Compute a single gradient step on an given batch of examples."""
with tf.GradientTape() as tape:
pred_y = tf.nn.softmax(
tf.matmul(batch.x, model_vars.weights) + model_vars.bias)
loss = -tf.reduce_mean(
tf.reduce_sum(
tf.one_hot(batch.y, 10) * tf.math.log(pred_y), axis=[1]))
grads = tape.gradient(loss, model_vars)
optimizer.apply_gradients(
zip(tf.nest.flatten(grads), tf.nest.flatten(model_vars)))
return loop_state_nt(
num_examples=loop_state.num_examples + 1,
total_loss=loop_state.total_loss + loss)
with tf.control_dependencies([init_model]):
loop_state = data.reduce(
loop_state_nt(num_examples=0, total_loss=np.float32(0.0)), reduce_fn)
num_examples = loop_state.num_examples
total_loss = loop_state.total_loss
with tf.control_dependencies([num_examples, total_loss]):
loss = total_loss / tf.cast(num_examples, tf.float32)
return update_nt(
model=model_vars, num_examples=num_examples, loss=loss), [], [], []
accumulator_nt = update_nt
# Initialize accumulators for aggregation with zero model and zero examples.
@tensorflow_computation.tf_computation
def zero():
return accumulator_nt(
model=model_nt(weights=tf.zeros([784, 10]), bias=tf.zeros([10])),
num_examples=tf.constant(0),
loss=tf.constant(0.0, dtype=tf.float32))
update_tff_type = update_nt(
model=model_tff_type, num_examples=tf.int32, loss=tf.float32)
accumulator_tff_type = update_tff_type
# We add an update to an accumulator with the update's model multipled by the
# number of examples, so we can compute a weighted average in the end.
@tensorflow_computation.tf_computation(accumulator_tff_type, update_tff_type)
def accumulate(accumulator, update):
scaling_factor = tf.cast(update.num_examples, tf.float32)
scaled_model = tf.nest.map_structure(lambda x: x * scaling_factor,
update.model)
return accumulator_nt(
model=tf.nest.map_structure(tf.add, accumulator.model, scaled_model),
num_examples=accumulator.num_examples + update.num_examples,
loss=accumulator.loss + update.loss * scaling_factor)
# Merging accumulators does not involve scaling.
@tensorflow_computation.tf_computation(accumulator_tff_type,
accumulator_tff_type)
def merge(accumulator1, accumulator2):
return accumulator_nt(
model=tf.nest.map_structure(tf.add, accumulator1.model,
accumulator2.model),
num_examples=accumulator1.num_examples + accumulator2.num_examples,
loss=accumulator1.loss + accumulator2.loss)
report_nt = accumulator_nt
# The result of aggregation is produced by dividing the accumulated model by
# the total number of examples. Same for loss.
@tensorflow_computation.tf_computation(accumulator_tff_type)
def report(accumulator):
scaling_factor = 1.0 / tf.cast(accumulator.num_examples, tf.float32)
scaled_model = model_nt(
weights=accumulator.model.weights * scaling_factor,
bias=accumulator.model.bias * scaling_factor)
return report_nt(
model=scaled_model,
num_examples=accumulator.num_examples,
loss=accumulator.loss * scaling_factor)
unit_computation = tensorflow_computation.tf_computation(lambda: [])
secure_sum_bitwidth = unit_computation
secure_sum_max_input = unit_computation
secure_sum_modulus = unit_computation
update_type = (accumulator_tff_type, (), (), ())
metrics_nt = collections.namedtuple('Metrics', 'num_rounds num_examples loss')
# Pass the newly averaged model along with an incremented round counter over
# to the next round, and output the counters and loss as server metrics.
@tensorflow_computation.tf_computation(server_state_tff_type, update_type)
def update(state, update):
report = update[0]
num_rounds = state.num_rounds + 1
return (server_state_nt(model=report.model, num_rounds=num_rounds),
metrics_nt(
num_rounds=num_rounds,
num_examples=report.num_examples,
loss=report.loss))
type_signature = generate_unnamed_type_signature(update, work)
return MapReduceFormExample(
mrf=forms.MapReduceForm(type_signature, prepare, work, zero, accumulate,
merge, report, secure_sum_bitwidth,
secure_sum_max_input, secure_sum_modulus, update),
initialize=initialize)
def get_iterative_process_for_example_with_unused_lambda_arg():
"""Returns an iterative process having a Lambda not referencing its arg."""
server_state_type = collections.OrderedDict(num_clients=tf.int32)
def _bind_federated_value(unused_input, input_type, federated_output_value):
federated_input_type = computation_types.FederatedType(
input_type, placements.CLIENTS)
wrapper = federated_computation.federated_computation(
lambda _: federated_output_value, federated_input_type)
return wrapper(unused_input)
def count_clients_federated(client_data):
client_ones = intrinsics.federated_value(1, placements.CLIENTS)
client_ones = _bind_federated_value(
client_data, computation_types.SequenceType(tf.string), client_ones)
return intrinsics.federated_sum(client_ones)
@federated_computation.federated_computation
def init_fn():
return intrinsics.federated_value(
collections.OrderedDict(num_clients=0), placements.SERVER)
@federated_computation.federated_computation([
computation_types.FederatedType(server_state_type, placements.SERVER),
computation_types.FederatedType(
computation_types.SequenceType(tf.string), placements.CLIENTS)
])
def next_fn(server_state, client_val):
"""`next` function for `tff.templates.IterativeProcess`."""
server_update = intrinsics.federated_zip(
collections.OrderedDict(
num_clients=count_clients_federated(client_val)))
server_output = intrinsics.federated_value((), placements.SERVER)
server_output = _bind_federated_value(
intrinsics.federated_broadcast(server_state), server_state_type,
server_output)
return server_update, server_output
return iterative_process.IterativeProcess(init_fn, next_fn)
def get_iterative_process_for_example_with_unused_tf_computation_arg():
"""Returns an iterative process with a @tf.function with an unused arg."""
server_state_type = collections.OrderedDict(num_clients=tf.int32)
def _bind_tf_function(unused_input, tf_func):
tf_wrapper = tf.function(lambda _: tf_func())
input_federated_type = unused_input.type_signature
wrapper = tensorflow_computation.tf_computation(tf_wrapper,
input_federated_type.member)
return intrinsics.federated_map(wrapper, unused_input)
def count_clients_federated(client_data):
@tf.function
def client_ones_fn():
return tf.ones(shape=[], dtype=tf.int32)
client_ones = _bind_tf_function(client_data, client_ones_fn)
return intrinsics.federated_sum(client_ones)
@federated_computation.federated_computation
def init_fn():
return intrinsics.federated_value(
collections.OrderedDict(num_clients=0), placements.SERVER)
@federated_computation.federated_computation([
computation_types.FederatedType(server_state_type, placements.SERVER),
computation_types.FederatedType(
computation_types.SequenceType(tf.string), placements.CLIENTS)
])
def next_fn(server_state, client_val):
"""`next` function for `tff.templates.IterativeProcess`."""
server_update = intrinsics.federated_zip(
collections.OrderedDict(
num_clients=count_clients_federated(client_val)))
server_output = intrinsics.federated_value((), placements.SERVER)
server_output = intrinsics.federated_sum(
_bind_tf_function(
intrinsics.federated_broadcast(server_state), tf.timestamp))
return server_update, server_output
return iterative_process.IterativeProcess(init_fn, next_fn)
def get_iterative_process_for_example_with_lambda_returning_aggregation():
"""Gets iterative process with indirection to the called intrinsic."""
server_state_type = collections.OrderedDict(num_clients=tf.int32)
client_val_type = computation_types.FederatedType(server_state_type,
placements.CLIENTS)
@federated_computation.federated_computation
def computation_returning_lambda():
@federated_computation.federated_computation(tf.int32)
def computation_returning_sum(x):
tuple_containing_intrinsic = [
building_blocks.Intrinsic(
'federated_sum',
computation_types.FunctionType(
client_val_type,
computation_types.FederatedType(client_val_type.member,
placements.SERVER))), x
]
return tuple_containing_intrinsic[0]
return computation_returning_sum
@federated_computation.federated_computation
def init_fn():
return intrinsics.federated_value(
collections.OrderedDict(num_clients=0), placements.SERVER)
@federated_computation.federated_computation([
computation_types.FederatedType(server_state_type, placements.SERVER),
client_val_type,
])
def next_fn(server_state, client_val):
"""`next` function for `tff.templates.IterativeProcess`."""
server_update = intrinsics.federated_sum(client_val)
server_output = intrinsics.federated_value((), placements.SERVER)
state_at_clients = intrinsics.federated_broadcast(server_state)
lambda_returning_sum = computation_returning_lambda()
sum_fn = lambda_returning_sum(1)
server_output = sum_fn(state_at_clients)
return server_update, server_output
return iterative_process.IterativeProcess(init_fn, next_fn)
| {
"content_hash": "50299a71053eab3e67e0070ff766370c",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 91,
"avg_line_length": 39.644444444444446,
"alnum_prop": 0.6916106128550075,
"repo_name": "tensorflow/federated",
"id": "bd8cc63886e531498a3c2acacdf52e428c1d8bc4",
"size": "22007",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_federated/python/core/backends/mapreduce/mapreduce_test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "729470"
},
{
"name": "Dockerfile",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6700736"
},
{
"name": "Shell",
"bytes": "7123"
},
{
"name": "Starlark",
"bytes": "387382"
}
],
"symlink_target": ""
} |
import operator
import numpy as np
import math
from numpy import (pi, asarray, floor, isscalar, iscomplex, real,
imag, sqrt, where, mgrid, sin, place, issubdtype,
extract, inexact, nan, zeros, sinc)
from . import _ufuncs as ufuncs
from ._ufuncs import (mathieu_a, mathieu_b, iv, jv, gamma,
psi, hankel1, hankel2, yv, kv, ndtri,
poch, binom, hyp0f1)
from . import specfun
from . import orthogonal
from ._comb import _comb_int
__all__ = [
'ai_zeros',
'assoc_laguerre',
'bei_zeros',
'beip_zeros',
'ber_zeros',
'bernoulli',
'berp_zeros',
'bi_zeros',
'clpmn',
'comb',
'digamma',
'diric',
'erf_zeros',
'euler',
'factorial',
'factorial2',
'factorialk',
'fresnel_zeros',
'fresnelc_zeros',
'fresnels_zeros',
'gamma',
'h1vp',
'h2vp',
'hankel1',
'hankel2',
'hyp0f1',
'iv',
'ivp',
'jn_zeros',
'jnjnp_zeros',
'jnp_zeros',
'jnyn_zeros',
'jv',
'jvp',
'kei_zeros',
'keip_zeros',
'kelvin_zeros',
'ker_zeros',
'kerp_zeros',
'kv',
'kvp',
'lmbda',
'lpmn',
'lpn',
'lqmn',
'lqn',
'mathieu_a',
'mathieu_b',
'mathieu_even_coef',
'mathieu_odd_coef',
'ndtri',
'obl_cv_seq',
'pbdn_seq',
'pbdv_seq',
'pbvv_seq',
'perm',
'polygamma',
'pro_cv_seq',
'psi',
'riccati_jn',
'riccati_yn',
'sinc',
'y0_zeros',
'y1_zeros',
'y1p_zeros',
'yn_zeros',
'ynp_zeros',
'yv',
'yvp',
'zeta'
]
def _nonneg_int_or_fail(n, var_name, strict=True):
try:
if strict:
# Raises an exception if float
n = operator.index(n)
elif n == floor(n):
n = int(n)
else:
raise ValueError()
if n < 0:
raise ValueError()
except (ValueError, TypeError) as err:
raise err.__class__("{} must be a non-negative integer".format(var_name)) from err
return n
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x, n) = sin(x * n/2) / (n * sin(x / 2)),
where `n` is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8, 8));
>>> for idx, n in enumerate([2, 3, 4, 9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
The following example demonstrates that `diric` gives the magnitudes
(modulo the sign and scaling) of the Fourier coefficients of a
rectangular pulse.
Suppress output of values that are effectively 0:
>>> np.set_printoptions(suppress=True)
Create a signal `x` of length `m` with `k` ones:
>>> m = 8
>>> k = 3
>>> x = np.zeros(m)
>>> x[:k] = 1
Use the FFT to compute the Fourier transform of `x`, and
inspect the magnitudes of the coefficients:
>>> np.abs(np.fft.fft(x))
array([ 3. , 2.41421356, 1. , 0.41421356, 1. ,
0.41421356, 1. , 2.41421356])
Now find the same values (up to sign) using `diric`. We multiply
by `k` to account for the different scaling conventions of
`numpy.fft.fft` and `diric`:
>>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)
>>> k * special.diric(theta, k)
array([ 3. , 2.41421356, 1. , -0.41421356, -1. ,
-0.41421356, 1. , 2.41421356])
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def jnjnp_zeros(nt):
"""Compute zeros of integer-order Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n, m, t, zo = specfun.jdzo(nt)
return zo[1:nt+1], n[:nt], m[:nt], t[:nt]
def jnyn_zeros(n, nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length `nt`, corresponding to the first `nt`
zeros of Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively. The zeros
are returned in ascending order.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
Returns
-------
Jn : ndarray
First `nt` zeros of Jn
Jnp : ndarray
First `nt` zeros of Jn'
Yn : ndarray
First `nt` zeros of Yn
Ynp : ndarray
First `nt` zeros of Yn'
See Also
--------
jn_zeros, jnp_zeros, yn_zeros, ynp_zeros
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n), nt)
def jn_zeros(n, nt):
r"""Compute zeros of integer-order Bessel functions Jn.
Compute `nt` zeros of the Bessel functions :math:`J_n(x)` on the
interval :math:`(0, \infty)`. The zeros are returned in ascending
order. Note that this interval excludes the zero at :math:`x = 0`
that exists for :math:`n > 0`.
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
Returns
-------
ndarray
First `n` zeros of the Bessel function.
See Also
--------
jv
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
Examples
--------
>>> import scipy.special as sc
We can check that we are getting approximations of the zeros by
evaluating them with `jv`.
>>> n = 1
>>> x = sc.jn_zeros(n, 3)
>>> x
array([ 3.83170597, 7.01558667, 10.17346814])
>>> sc.jv(n, x)
array([-0.00000000e+00, 1.72975330e-16, 2.89157291e-16])
Note that the zero at ``x = 0`` for ``n > 0`` is not included.
>>> sc.jv(1, 0)
0.0
"""
return jnyn_zeros(n, nt)[0]
def jnp_zeros(n, nt):
r"""Compute zeros of integer-order Bessel function derivatives Jn'.
Compute `nt` zeros of the functions :math:`J_n'(x)` on the
interval :math:`(0, \infty)`. The zeros are returned in ascending
order. Note that this interval excludes the zero at :math:`x = 0`
that exists for :math:`n > 1`.
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
Returns
-------
ndarray
First `n` zeros of the Bessel function.
See Also
--------
jvp, jv
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
Examples
--------
>>> import scipy.special as sc
We can check that we are getting approximations of the zeros by
evaluating them with `jvp`.
>>> n = 2
>>> x = sc.jnp_zeros(n, 3)
>>> x
array([3.05423693, 6.70613319, 9.96946782])
>>> sc.jvp(n, x)
array([ 2.77555756e-17, 2.08166817e-16, -3.01841885e-16])
Note that the zero at ``x = 0`` for ``n > 1`` is not included.
>>> sc.jvp(n, 0)
0.0
"""
return jnyn_zeros(n, nt)[1]
def yn_zeros(n, nt):
r"""Compute zeros of integer-order Bessel function Yn(x).
Compute `nt` zeros of the functions :math:`Y_n(x)` on the interval
:math:`(0, \infty)`. The zeros are returned in ascending order.
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
Returns
-------
ndarray
First `n` zeros of the Bessel function.
See Also
--------
yn, yv
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
Examples
--------
>>> import scipy.special as sc
We can check that we are getting approximations of the zeros by
evaluating them with `yn`.
>>> n = 2
>>> x = sc.yn_zeros(n, 3)
>>> x
array([ 3.38424177, 6.79380751, 10.02347798])
>>> sc.yn(n, x)
array([-1.94289029e-16, 8.32667268e-17, -1.52655666e-16])
"""
return jnyn_zeros(n, nt)[2]
def ynp_zeros(n, nt):
r"""Compute zeros of integer-order Bessel function derivatives Yn'(x).
Compute `nt` zeros of the functions :math:`Y_n'(x)` on the
interval :math:`(0, \infty)`. The zeros are returned in ascending
order.
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
See Also
--------
yvp
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
Examples
--------
>>> import scipy.special as sc
We can check that we are getting approximations of the zeros by
evaluating them with `yvp`.
>>> n = 2
>>> x = sc.ynp_zeros(n, 3)
>>> x
array([ 5.00258293, 8.3507247 , 11.57419547])
>>> sc.yvp(n, x)
array([ 2.22044605e-16, -3.33066907e-16, 2.94902991e-16])
"""
return jnyn_zeros(n, nt)[3]
def y0_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1p_zeros(nt, complex=False):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = not complex
return specfun.cyzo(nt, kf, kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1
# L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
v = asarray(v)
p = 1.0
s = L(v-n, z)
for i in range(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
def jvp(v, z, n=1):
"""Compute derivatives of Bessel functions of the first kind.
Compute the nth derivative of the Bessel function `Jv` with
respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative; can be real or
complex.
n : int, default 1
Order of derivative
Returns
-------
scalar or ndarray
Values of the derivative of the Bessel function.
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.6.E7
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return jv(v, z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
def yvp(v, z, n=1):
"""Compute derivatives of Bessel functions of the second kind.
Compute the nth derivative of the Bessel function `Yv` with
respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Returns
-------
scalar or ndarray
nth derivative of the Bessel function.
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.6.E7
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return yv(v, z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
def kvp(v, z, n=1):
"""Compute nth derivative of real-order modified Bessel function Kv(z)
Kv(z) is the modified Bessel function of the second kind.
Derivative is calculated with respect to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int
Order of derivative. Default is first derivative.
Returns
-------
out : ndarray
The results
Examples
--------
Calculate multiple values at order 5:
>>> from scipy.special import kvp
>>> kvp(5, (1, 2, 3+5j))
array([-1.84903536e+03+0.j , -2.57735387e+01+0.j ,
-3.06627741e-02+0.08750845j])
Calculate for a single value at multiple orders:
>>> kvp((4, 4.5, 5), 1)
array([ -184.0309, -568.9585, -1849.0354])
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.29.E5
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return kv(v, z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v, z, n=1):
"""Compute derivatives of modified Bessel functions of the first kind.
Compute the nth derivative of the modified Bessel function `Iv`
with respect to `z`.
Parameters
----------
v : array_like
Order of Bessel function
z : array_like
Argument at which to evaluate the derivative; can be real or
complex.
n : int, default 1
Order of derivative
Returns
-------
scalar or ndarray
nth derivative of the modified Bessel function.
See Also
--------
iv
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.29.E5
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return iv(v, z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v, z, n=1):
"""Compute nth derivative of Hankel function H1v(z) with respect to `z`.
Parameters
----------
v : array_like
Order of Hankel function
z : array_like
Argument at which to evaluate the derivative. Can be real or
complex.
n : int, default 1
Order of derivative
Returns
-------
scalar or ndarray
Values of the derivative of the Hankel function.
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.6.E7
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return hankel1(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
def h2vp(v, z, n=1):
"""Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Parameters
----------
v : array_like
Order of Hankel function
z : array_like
Argument at which to evaluate the derivative. Can be real or
complex.
n : int, default 1
Order of derivative
Returns
-------
scalar or ndarray
Values of the derivative of the Hankel function.
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.6.E7
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return hankel2(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
def riccati_jn(n, x):
r"""Compute Ricatti-Bessel function of the first kind and its derivative.
The Ricatti-Bessel function of the first kind is defined as :math:`x
j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first
kind of order :math:`n`.
This function computes the value and first derivative of the
Ricatti-Bessel function for all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
Notes
-----
The computation is carried out via backward recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = _nonneg_int_or_fail(n, 'n', strict=False)
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rctj(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def riccati_yn(n, x):
"""Compute Ricatti-Bessel function of the second kind and its derivative.
The Ricatti-Bessel function of the second kind is defined as :math:`x
y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second
kind of order :math:`n`.
This function computes the value and first derivative of the function for
all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
Notes
-----
The computation is carried out via ascending recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = _nonneg_int_or_fail(n, 'n', strict=False)
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rcty(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def erf_zeros(nt):
"""Compute the first nt zero in the first quadrant, ordered by absolute value.
Zeros in the other quadrants can be obtained by using the symmetries erf(-z) = erf(z) and
erf(conj(z)) = conj(erf(z)).
Parameters
----------
nt : int
The number of zeros to compute
Returns
-------
The locations of the zeros of erf : ndarray (complex)
Complex values at which zeros of erf(z)
Examples
--------
>>> from scipy import special
>>> special.erf_zeros(1)
array([1.45061616+1.880943j])
Check that erf is (close to) zero for the value returned by erf_zeros
>>> special.erf(special.erf_zeros(1))
array([4.95159469e-14-1.16407394e-16j])
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1, nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt), specfun.fcszo(1, nt)
def assoc_laguerre(x, n, k=0.0):
"""Compute the generalized (associated) Laguerre polynomial of degree n and order k.
The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**k`` with ``k > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
r"""Polygamma functions.
Defined as :math:`\psi^{(n)}(x)` where :math:`\psi` is the
`digamma` function. See [dlmf]_ for details.
Parameters
----------
n : array_like
The order of the derivative of the digamma function; must be
integral
x : array_like
Real valued input
Returns
-------
ndarray
Function results
See Also
--------
digamma
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/5.15
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m, q)
fc = specfun.fcoef(kd, m, q, a)
return fc[:km]
def mathieu_odd_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd
input m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m, q)
fc = specfun.fcoef(kd, m, q, b)
return fc[:km]
def lpmn(m, n, z):
"""Sequence of associated Legendre functions of the first kind.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
with ufuncs.errstate(all='ignore'):
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
else:
mp = m
p, pd = specfun.lpmn(mp, n, z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind for complex arguments.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
with ufuncs.errstate(all='ignore'):
if type == 2:
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
else:
mp = m
p, pd = specfun.clpmn(mp, n, real(z), imag(z), type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def lqmn(m, n, z):
"""Sequence of associated Legendre functions of the second kind.
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1, m)
nn = max(1, n)
if iscomplex(z):
q, qd = specfun.clqmn(mm, nn, z)
else:
q, qd = specfun.lqmn(mm, nn, z)
return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E(0), E(1), ..., E(n).
The Euler numbers [1]_ are also known as the secant numbers.
Because ``euler(n)`` returns floating point values, it does not give
exact values for large `n`. The first inexact value is E(22).
Parameters
----------
n : int
The highest index of the Euler number to be returned.
Returns
-------
ndarray
The Euler numbers [E(0), E(1), ..., E(n)].
The odd Euler numbers, which are all zero, are included.
References
----------
.. [1] Sequence A122045, The On-Line Encyclopedia of Integer Sequences,
https://oeis.org/A122045
.. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
Examples
--------
>>> from scipy.special import euler
>>> euler(6)
array([ 1., 0., -1., 0., 5., 0., -61.])
>>> euler(13).astype(np.int64)
array([ 1, 0, -1, 0, 5, 0, -61,
0, 1385, 0, -50521, 0, 2702765, 0])
>>> euler(22)[-1] # Exact value of E(22) is -69348874393137901.
-69348874393137976.0
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n, z):
"""Legendre function of the first kind.
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
n = _nonneg_int_or_fail(n, 'n', strict=False)
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn, pd = specfun.clpn(n1, z)
else:
pn, pd = specfun.lpn(n1, z)
return pn[:(n+1)], pd[:(n+1)]
def lqn(n, z):
"""Legendre function of the second kind.
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
n = _nonneg_int_or_fail(n, 'n', strict=False)
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn, qd = specfun.clqn(n1, z)
else:
qn, qd = specfun.lqnb(n1, z)
return qn[:(n+1)], qd[:(n+1)]
def ai_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Ai and its derivative.
Computes the first `nt` zeros, `a`, of the Airy function Ai(x);
first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x);
the corresponding values Ai(a');
and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First `nt` zeros of Ai(x)
ap : ndarray
First `nt` zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first `nt` zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first `nt` zeros of Ai(x)
Examples
--------
>>> from scipy import special
>>> a, ap, ai, aip = special.ai_zeros(3)
>>> a
array([-2.33810741, -4.08794944, -5.52055983])
>>> ap
array([-1.01879297, -3.24819758, -4.82009921])
>>> ai
array([ 0.53565666, -0.41901548, 0.38040647])
>>> aip
array([ 0.70121082, -0.80311137, 0.86520403])
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def bi_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Bi and its derivative.
Computes the first `nt` zeros, b, of the Airy function Bi(x);
first `nt` zeros, b', of the derivative of the Airy function Bi'(x);
the corresponding values Bi(b');
and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First `nt` zeros of Bi(x)
bp : ndarray
First `nt` zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first `nt` zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first `nt` zeros of Bi(x)
Examples
--------
>>> from scipy import special
>>> b, bp, bi, bip = special.bi_zeros(3)
>>> b
array([-1.17371322, -3.2710933 , -4.83073784])
>>> bp
array([-2.29443968, -4.07315509, -5.51239573])
>>> bi
array([-0.45494438, 0.39652284, -0.36796916])
>>> bip
array([ 0.60195789, -0.76031014, 0.83699101])
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def lmbda(v, x):
r"""Jahnke-Emden Lambda function, Lambdav(x).
This function is defined as [2]_,
.. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v},
where :math:`\Gamma` is the gamma function and :math:`J_v` is the
Bessel function of the first kind.
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and
Curves" (4th ed.), Dover, 1945
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1, x)
else:
vm, vl, dl = specfun.lamn(v1, x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v, x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbdv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbvv_seq(v, x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbvv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbdn_seq(n, z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb, cpd = specfun.cpbdn(n1, z)
return cpb[:n1+1], cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the Kelvin function.
See Also
--------
ber
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the Kelvin function.
See Also
--------
bei
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the Kelvin function.
See Also
--------
ker
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the Kelvin function.
See Also
--------
kei
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 4)
def berp_zeros(nt):
"""Compute nt zeros of the derivative of the Kelvin function ber.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the derivative of the Kelvin function.
See Also
--------
ber, berp
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 5)
def beip_zeros(nt):
"""Compute nt zeros of the derivative of the Kelvin function bei.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the derivative of the Kelvin function.
See Also
--------
bei, beip
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 6)
def kerp_zeros(nt):
"""Compute nt zeros of the derivative of the Kelvin function ker.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the derivative of the Kelvin function.
See Also
--------
ker, kerp
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 7)
def keip_zeros(nt):
"""Compute nt zeros of the derivative of the Kelvin function kei.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the derivative of the Kelvin function.
See Also
--------
kei, keip
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return (specfun.klvnzo(nt, 1),
specfun.klvnzo(nt, 2),
specfun.klvnzo(nt, 3),
specfun.klvnzo(nt, 4),
specfun.klvnzo(nt, 5),
specfun.klvnzo(nt, 6),
specfun.klvnzo(nt, 7),
specfun.klvnzo(nt, 8))
def pro_cv_seq(m, n, c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, 1)[1][:maxL]
def obl_cv_seq(m, n, c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, -1)[1][:maxL]
def comb(N, k, exact=False, repetition=False):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, float, ndarray
The total number of combinations.
See Also
--------
binom : Binomial coefficient ufunc
Notes
-----
- Array arguments accepted only for exact=False case.
- If N < 0, or k < 0, then 0 is returned.
- If k > N and repetition=False, then 0 is returned.
Examples
--------
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120
>>> comb(10, 3, exact=True, repetition=True)
220
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
return _comb_int(N, k)
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in range(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
# https://stackoverflow.com/a/16327037
def _range_prod(lo, hi):
"""
Product of a range of numbers.
Returns the product of
lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi
= hi! / (lo-1)!
Breaks into smaller products first for speed:
_range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))
"""
if lo + 1 < hi:
mid = (hi + lo) // 2
return _range_prod(lo, mid) * _range_prod(mid + 1, hi)
if lo == hi:
return lo
return lo * hi
def factorial(n, exact=False):
"""
The factorial of a number or array of numbers.
The factorial of non-negative integer `n` is the product of all
positive integers less than or equal to `n`::
n! = n * (n - 1) * (n - 2) * ... * 1
Parameters
----------
n : int or array_like of ints
Input values. If ``n < 0``, the return value is 0.
exact : bool, optional
If True, calculate the answer exactly using long integer arithmetic.
If False, result is approximated in floating point rapidly using the
`gamma` function.
Default is False.
Returns
-------
nf : float or int or ndarray
Factorial of `n`, as integer or float depending on `exact`.
Notes
-----
For arrays with ``exact=True``, the factorial is computed only once, for
the largest input, with each other result computed in the process.
The output dtype is increased to ``int64`` or ``object`` if necessary.
With ``exact=False`` the factorial is approximated using the gamma
function:
.. math:: n! = \\Gamma(n+1)
Examples
--------
>>> from scipy.special import factorial
>>> arr = np.array([3, 4, 5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(arr, exact=True)
array([ 6, 24, 120])
>>> factorial(5, exact=True)
120
"""
if exact:
if np.ndim(n) == 0:
if np.isnan(n):
return n
return 0 if n < 0 else math.factorial(n)
else:
n = asarray(n)
un = np.unique(n).astype(object)
# Convert to object array of long ints if np.int_ can't handle size
if np.isnan(n).any():
dt = float
elif un[-1] > 20:
dt = object
elif un[-1] > 12:
dt = np.int64
else:
dt = np.int_
out = np.empty_like(n, dtype=dt)
# Handle invalid/trivial values
# Ignore runtime warning when less operator used w/np.nan
with np.errstate(all='ignore'):
un = un[un > 1]
out[n < 2] = 1
out[n < 0] = 0
# Calculate products of each range of numbers
if un.size:
val = math.factorial(un[0])
out[n == un[0]] = val
for i in range(len(un) - 1):
prev = un[i] + 1
current = un[i + 1]
val *= _range_prod(prev, current)
out[n == current] = val
if np.isnan(n).any():
out = out.astype(np.float64)
out[np.isnan(n)] = n[np.isnan(n)]
return out
else:
out = ufuncs._factorial(n)
return out
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in range(n, 0, -2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape, 'd')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1, n)
evenn = extract(cond2, n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120
>>> factorialk(5, 3, exact=True)
10
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in range(n, 0, -k):
val = val*j
return val
else:
raise NotImplementedError
def zeta(x, q=None, out=None):
r"""
Riemann or Hurwitz zeta function.
Parameters
----------
x : array_like of float
Input data, must be real
q : array_like of float, optional
Input data, must be real. Defaults to Riemann zeta.
out : ndarray, optional
Output array for the computed values.
Returns
-------
out : array_like
Values of zeta(x).
Notes
-----
The two-argument version is the Hurwitz zeta function
.. math::
\zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x};
see [dlmf]_ for details. The Riemann zeta function corresponds to
the case when ``q = 1``.
See Also
--------
zetac
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/25.11#i
Examples
--------
>>> from scipy.special import zeta, polygamma, factorial
Some specific values:
>>> zeta(2), np.pi**2/6
(1.6449340668482266, 1.6449340668482264)
>>> zeta(4), np.pi**4/90
(1.0823232337111381, 1.082323233711138)
Relation to the `polygamma` function:
>>> m = 3
>>> x = 1.25
>>> polygamma(m, x)
array(2.782144009188397)
>>> (-1)**(m+1) * factorial(m) * zeta(m+1, x)
2.7821440091883969
"""
if q is None:
return ufuncs._riemann_zeta(x, out)
else:
return ufuncs._zeta(x, q, out)
| {
"content_hash": "d7312c3fa9e7937691f03a9f8cc758e5",
"timestamp": "",
"source": "github",
"line_count": 2540,
"max_line_length": 94,
"avg_line_length": 27.96771653543307,
"alnum_prop": 0.5674850080238746,
"repo_name": "nmayorov/scipy",
"id": "c0a125fdc73ef20df2c5ecb826f3bab29c7cb331",
"size": "71076",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scipy/special/_basic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4418291"
},
{
"name": "C++",
"bytes": "672553"
},
{
"name": "Dockerfile",
"bytes": "1328"
},
{
"name": "Fortran",
"bytes": "5300184"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "13498627"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('category', models.CharField(max_length=255)),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
],
),
]
| {
"content_hash": "8b635c92d0735f0e8f1a7975a88e9fba",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 114,
"avg_line_length": 28.75,
"alnum_prop": 0.5695652173913044,
"repo_name": "juandc/platzi-courses",
"id": "7b8d64c187fdcc636efc81a641eaec28b097118b",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python-Django-2016/Django/Clase1/Shoppy/products/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56245"
},
{
"name": "HTML",
"bytes": "61675"
},
{
"name": "JavaScript",
"bytes": "15712"
},
{
"name": "Python",
"bytes": "137764"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(name='fieldbook_py',
version='0.4.1',
description='Helper package for using the Fieldbook.com API',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython'
],
url='http://github.com/mattstibbs/fieldbook_py',
author='Matt Stibbs',
author_email='git@stibbsy.co.uk',
license='MIT',
packages=['fieldbook_py'],
install_requires=[
'requests'
],
test_suite='nose.collector',
tests_require=[
'nose'
],
zip_safe=False)
| {
"content_hash": "3442c677cde7071bb13f1e0e923a892b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 30.208333333333332,
"alnum_prop": 0.576551724137931,
"repo_name": "mattstibbs/fieldbook_py",
"id": "4c89f086edc432a795c32aa4317ef5da4905c3eb",
"size": "725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6520"
}
],
"symlink_target": ""
} |
get_credentials = '''
select password, password_salt
from person
where id = %(id)s
'''
get_person = '''
select *
from person
where id = %(id)s
'''
create_user = '''
insert into person (id, password, password_salt)
values (%(id)s, %(password)s, %(password_salt)s)
'''
| {
"content_hash": "9b79608dc6e256c2168b6d31af096240",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 64,
"avg_line_length": 21.8125,
"alnum_prop": 0.498567335243553,
"repo_name": "best-coloc-ever/globibot",
"id": "944d97f9c90e58382a86bb480fa295f0371b1f6f",
"size": "349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/src/globibot/api/queries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2285"
},
{
"name": "HTML",
"bytes": "804"
},
{
"name": "JavaScript",
"bytes": "5583"
},
{
"name": "Python",
"bytes": "232680"
},
{
"name": "Shell",
"bytes": "330"
}
],
"symlink_target": ""
} |
import sys
from form_designer.exceptions import HttpRedirectException
from django.template.base import TemplateSyntaxError
from django.http import HttpResponseRedirect
class RedirectMiddleware(object):
def process_exception(self, request, exception):
#django wraps the original exception in a template exception if it
#is raised in a node
if isinstance(exception, TemplateSyntaxError):
try:
exception = sys.exc_info()[1]
except IndexError, e:
return
if isinstance(exception, HttpRedirectException):
return exception.response
| {
"content_hash": "cfc6e117f194b7d79f363cefd4b2f32e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 35.111111111111114,
"alnum_prop": 0.6930379746835443,
"repo_name": "guilleCoro/django-form-designer",
"id": "062d7b4689189abc953ded283eb324119aee549e",
"size": "632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "form_designer/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "7459"
},
{
"name": "Python",
"bytes": "148474"
}
],
"symlink_target": ""
} |
import time, select, errno
from gearman.compat import *
from gearman.connection import GearmanConnection
from gearman.task import Task, Taskset
class GearmanBaseClient(object):
class ServerUnavailable(Exception):
pass
class CommandError(Exception):
pass
class InvalidResponse(Exception):
pass
def __init__(self, job_servers, prefix=None, pre_connect=False):
"""
job_servers = ['host:post', 'host', ...]
"""
self.prefix = prefix and "%s\t" % prefix or ""
self.set_job_servers(job_servers, pre_connect)
def set_job_servers(self, servers, pre_connect=False):
# TODO: don't shut down dups. shut down old ones gracefully
self.connections = []
self.connections_by_hostport = {}
for serv in servers:
connection = GearmanConnection(serv,timeout=2)
if pre_connect:
try:
connection.connect()
except connection.ConnectionError:
pass
self.connections.append(connection)
self.connections_by_hostport[connection.hostspec] = connection
class GearmanClient(GearmanBaseClient):
class TaskFailed(Exception):
pass
def __call__(self, func, arg, uniq=None, **kwargs):
return self.do_task(Task(func, arg, uniq, **kwargs))
def do_task(self, task):
"""Return the result of the task or raise a TaskFailed exception on failure."""
def _on_fail():
raise self.TaskFailed("Task failed")
task.on_fail.append(_on_fail)
taskset = Taskset([task])
if not self.do_taskset(taskset, timeout=task.timeout):
raise self.TaskFailed("Task timeout")
return task.result
def dispatch_background_task(self, func, arg, uniq=None, high_priority=False):
"""Submit a background task and return its handle."""
task = Task(func, arg, uniq, background=True, high_priority=high_priority)
taskset = Taskset([task])
self.do_taskset(taskset)
return task.handle
def get_server_from_hash(self, hsh):
"""Return a live connection for the given hash"""
# TODO: instead of cycling through, should we shuffle the list if the first connection fails or is dead?
first_idx = hsh % len(self.connections)
all_dead = all(conn.is_dead for conn in self.connections)
for idx in range(first_idx, len(self.connections)) + range(0, first_idx):
conn = self.connections[idx]
# if all of the connections are dead we should try reconnecting
if conn.is_dead and not all_dead:
continue
try:
conn.connect() # Make sure the connection is up (noop if already connected)
except conn.ConnectionError:
pass
else:
return conn
raise self.ServerUnavailable("Unable to Locate Server")
def _submit_task(self, task):
server = self.get_server_from_hash(hash(task))
if task.background:
func = "submit_job_bg"
elif task.high_priority:
func = "submit_job_high"
else:
func = "submit_job"
server.send_command(func,
dict(func=self.prefix + task.func, arg=task.arg, uniq=task.uniq))
server.waiting_for_handles.insert(0, task)
return server
def _command_handler(self, taskset, conn, cmd, args):
# DEBUG and _D( "RECEIVED COMMAND:", cmd, args )
handle = ('handle' in args) and ("%s//%s" % (conn.hostspec, args['handle'])) or None
if cmd != 'job_created' and handle:
task = taskset.get( taskset.handles.get(handle, None), None)
if not task:
return
if task.is_finished:
raise self.InvalidResponse("Task %s received %s" % (repr(task), cmd))
if cmd == 'work_complete':
task.complete(args['result'])
elif cmd == 'work_fail':
if task.retries_done < task.retry_count:
task.retries_done += 1
task.retrying()
task.handle = None
taskset.connections.add(self._submit_task(task))
else:
task.fail()
elif cmd == 'work_status':
task.status(int(args['numerator']), int(args['denominator']))
elif cmd == 'job_created':
task = conn.waiting_for_handles.pop()
task.handle = handle
taskset.handles[handle] = hash( task )
if task.background:
task.is_finished = True
elif cmd == 'error':
raise self.CommandError(str(args)) # TODO make better
else:
raise Exception("Unexpected command: %s" % cmd)
def do_taskset(self, taskset, timeout=None):
"""Execute a Taskset and return True iff all tasks finished before timeout."""
# set of connections to which jobs were submitted
taskset.connections = set(self._submit_task(task) for task in taskset.itervalues())
taskset.handles = {}
start_time = time.time()
end_time = timeout and start_time + timeout or 0
while not taskset.cancelled and not all(t.is_finished for t in taskset.itervalues()):
timeleft = timeout and end_time - time.time() or 0.5
if timeleft <= 0:
taskset.cancel()
break
rx_socks = [c for c in taskset.connections if c.readable()]
tx_socks = [c for c in taskset.connections if c.writable()]
try:
rd_list, wr_list, ex_list = select.select(rx_socks, tx_socks, taskset.connections, timeleft)
except select.error, exc:
# Ignore interrupted system call, reraise anything else
if exc[0] != errno.EINTR:
raise
continue
for conn in ex_list:
pass # TODO
for conn in rd_list:
for cmd in conn.recv():
self._command_handler(taskset, conn, *cmd)
for conn in wr_list:
conn.send()
# TODO: should we fail all tasks that didn't finish or leave that up to the caller?
return all(t.is_finished for t in taskset.itervalues())
def get_status(self, handle):
hostport, shandle = handle.split("//")
server = self.connections_by_hostport[hostport]
server.connect() # Make sure the connection is up (noop if already connected)
server.send_command("get_status", dict(handle=shandle))
return server.recv_blocking()[1]
| {
"content_hash": "e1506dcc7c7a728d3676d3f5a224156c",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 112,
"avg_line_length": 38.27428571428572,
"alnum_prop": 0.5809196775156763,
"repo_name": "samuel/python-gearman",
"id": "44be53368695f13977c3ac9ced9ad57757589691",
"size": "6721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gearman/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53867"
}
],
"symlink_target": ""
} |
"""Deploy GPFS cache cluster.
"""
import logging
import math
import os
import tempfile
import time
from orchestrate import base
log = logging.getLogger(__name__)
class GPFS(base.OrchestrateSystem):
"""Deploy GPFS instance."""
def __init__(self):
super(GPFS, self).__init__()
# Storage
self.cluster_name = 'gpfs'
self.nodes = 1
self.machine_type = 'n1-highmem-32'
self.network = 'compute'
self.cpu_platform = 'Intel Skylake'
# Creating instances with at least 4xNVME local SSDs for 1.5T gets
# maximum performance.
self.disks = 4
self.storage_type = 'elastifile'
self.provisioning_script_file_name = None
self.filesystem_exports_script_file_name = None
# fine-tunning
self.gateways = 1
self.data_replicas = 1
self.max_data_replicas = 2
self.metadata_replicas = 1
self.max_metadata_replicas = 2
self.page_pool = '100G'
self.seq_discard_threshold = '1T'
self.worker_threads = 512
self.max_stat_cache = 50000
self.max_files_to_cache = 50000
@property
def description(self):
return """Deploys an GPFS cache cluster."""
def run(self):
"""Executes system deployment.
Returns:
True if successful. False, otherwise.
"""
log.info('Deploying GPFS')
self.create_nodes()
self.wait(180, 'Waiting for nodes to boot up. Thanks for your patience.')
self.init_ssh_access()
self.init_cluster()
self.export_filesystem()
def configure(self):
"""Configure."""
self.region = '-'.join(self.zone.split('-')[:-1])
if self.prefix:
self.prefix += '-'
if self.nodes:
self.nodes = int(self.nodes)
if self.gateways:
self.gateways = int(self.gateways)
if self.deploy_dir:
command = 'mkdir -p {self.deploy_dir}'.format(self=self)
self.run_command(command)
else:
self.deploy_dir = tempfile.mkdtemp(
prefix='orchestrate-{self.project}-{self.name}-'.format(self=self),
dir='/var/tmp',
)
if not self.provisioning_script_file_name:
self.provisioning_script_file_name = \
'{self.deploy_dir}/gpfs_provisioning.sh'.format(self=self)
if not self.filesystem_exports_script_file_name:
self.filesystem_exports_script_file_name = \
'{self.deploy_dir}/gpfs_filesystem_exports.sh'.format(self=self)
self.storage_server = '{}.resources'.format(self.storage_type)
storage = self.others.get(self.storage_type, dict())
# e.g. 'projects:/projects/root|tools:/tools/root'
# split into: ['projects', '/projects/root']
name, volume = storage['volumes'].split('|')[0].split(':')
self.volume_remote = volume
self.volume_local = '/{}'.format(name)
def wait(self, seconds, message):
"""Wait specified amount of time.
Args:
seconds (float): Time to wait.
message: Reason for the wait.
"""
log.info('Delay %ss: %s', seconds, message)
if not self.dry_run:
time.sleep(seconds)
def get_node_name(self, node):
"""Returns a unique node name based on the cluster name and given node.
Args:
node: Node number.
"""
return '{self.prefix}{self.cluster_name}-node{node}'.format(
self=self,
node=node,
)
def create_nodes(self):
"""Create nodes for the cluster."""
log.info('Creating nodes')
local_ssds = ' --local-ssd=interface=NVME'*self.disks
for node in range(1, self.nodes+1):
node_name = self.get_node_name(node)
command = (
'gcloud compute instances create {node_name}'
' --project={self.project}'
' --zone={self.zone}'
' --machine-type={self.machine_type}'
' --subnet={self.network}'
' --no-address'
' --maintenance-policy=MIGRATE'
' --scopes='
'https://www.googleapis.com/auth/devstorage.read_only,'
'https://www.googleapis.com/auth/logging.write,'
'https://www.googleapis.com/auth/monitoring.write,'
'https://www.googleapis.com/auth/servicecontrol,'
'https://www.googleapis.com/auth/service.management.readonly,'
'https://www.googleapis.com/auth/trace.append'
' --min-cpu-platform="{self.cpu_platform}"'
' --image=gpfs-final-1'
' --image-project=dean-182715'
' --boot-disk-size=100GB'
' --boot-disk-type=pd-standard'
' --boot-disk-device-name={node_name}'
' --reservation-affinity=any'
' {local_ssds}'
).format(
self=self,
node_name=node_name,
local_ssds=local_ssds,
)
self.run_command(command)
command = (
'gcloud compute instances update {node_name} --deletion-protection'
).format(node_name=node_name)
self.run_command(command)
def init_ssh_access(self):
"""Initialize passwordless access."""
log.info('Initializing passwordless access between nodes.')
for node in range(1, self.nodes+1):
# Add key to authorized_keys
command = (
'gcloud compute ssh --project={self.project} root@{node_name}'
' --command="cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys"'
).format(
self=self,
node_name=self.get_node_name(node),
)
self.run_command(command)
# Add public keys from all other nodes in the cluster to known_hosts
# inclusive of current node
for other_node in range(1, self.nodes+1):
command = (
'gcloud compute ssh --project={self.project} root@{node_name} --command=\'ssh-keyscan {other_node_name} | grep ssh-rsa | sed -e "s/{other_node_name}/{other_node_name}.{self.zone}.c.{self.project}.internal/g" - >> /root/.ssh/known_hosts\''
).format(
self=self,
node_name=self.get_node_name(node),
other_node_name=self.get_node_name(other_node),
)
self.run_command(command)
def create_provisioning_script(self):
"""Create provisioning script for first node in the cluster."""
log.info('Creating provisioning script')
node_entries = []
node_names = []
quorum_count = math.ceil(self.nodes/2)
quorum = 0
for node in range(1, self.nodes+1):
node_name = self.get_node_name(node)
node_names.append(node_name)
node_entry = self.get_node_name(node)
if quorum < quorum_count:
node_entry += ':quorum'
quorum += 1
node_entries.append(node_entry)
node_entries_content = '\n'.join(node_entries)
disk_entries = []
for node in range(1, self.nodes+1):
node_name = self.get_node_name(node)
for index in range(1, self.disks+1):
nsd_name = 'd{node_name}nsd{index}'.format(
node_name=node_name,
index=index,
)
nsd_name = nsd_name.replace('-', '').replace('_', '').replace(' ', '')
disk_entry = """
%nsd: device=/dev/nvme0n{index}
nsd={nsd_name}
servers={node_name}
usage=dataAndMetadata
""".lstrip().format(
index=index,
node_name=node_name,
nsd_name=nsd_name,
)
disk_entries.append(disk_entry)
disk_entries_content = ''.join(disk_entries)
wait_on_nodes = []
for node in range(1, self.nodes+1):
wait_on_node = (
'while [ `mmgetstate -N {node_name} -Y | grep {node_name}'
' | cut -d: -f9` != "active" ]; do echo "Waiting on {node_name}"'
' && sleep 5; done'
).format(node_name=self.get_node_name(node))
wait_on_nodes.append(wait_on_node)
wait_on_nodes_content = '\n'.join(wait_on_nodes)
script = """#!/bin/env sh
echo "Creating list of nodes"
cat << EOT > /var/tmp/nodes.txt
{node_entries_content}
EOT
echo "Creating list of disks across all nodes"
cat << EOT > /var/tmp/disks.txt
{disk_entries_content}
EOT
echo "Creating cluster"
mmcrcluster -t lc -n /var/tmp/nodes.txt
# mmlscluster
echo "Accepting license for all nodes"
mmchlicense server --accept -N {node_names}
echo "Fine-tuning performance parameters"
mmchconfig pagepool={self.page_pool} -i -N {node_names}
mmchconfig seqDiscardThreshold={self.seq_discard_threshold} -i
mmchconfig maxStatCache={self.max_stat_cache}
mmchconfig maxFilesToCache={self.max_files_to_cache}
mmchconfig workerThreads={self.worker_threads}
echo "Starting cluster"
mmstartup -a
# mmgetstate -a
# wait until it's done arbitrating
echo "Waiting on all cluster nodes to become active"
{wait_on_nodes_content}
echo "Creating NSDs from disks"
mmcrnsd -F /var/tmp/disks.txt
# mmlsnsd
# ll -lad /dev/n*
echo "Creating file system"
mmcrfs /gpfs/gpfsA /dev/gpfsA -F /var/tmp/disks.txt -B256K -Q yes -r {self.data_replicas} -R {self.max_data_replicas} -m {self.metadata_replicas} -M {self.max_metadata_replicas}
# mmlsnsd
echo "Mounting filesystem"
mmmount gpfsA -a
df -h
# echo "hello from gpfs cluster {self.cluster_name}" > /gpfs/gpfsA/{self.cluster_name}.txt
# cat /gpfs/gpfsA/{self.cluster_name}.txt
# mmlsconfig
# mmlsconfig pagepool
echo "Setting gateway"
mmchnode --gateway -N {gateway_node_names}
echo "Creating fileset {self.volume_local}"
mmcrfileset gpfsA cache -p "afmTarget={self.storage_server}:{self.volume_remote},afmMode=iw" --inode-space=new --inode-limit=10M
mmlinkfileset gpfsA cache -J /gpfs/gpfsA{self.volume_local}
# mmafmctl gpfsA getState
# mmlsfileset gpfsA cache --afm -L
""".lstrip().format(
self=self,
node_name=self.get_node_name(1),
node_names=','.join(node_names),
gateway_node_names=','.join(node_names[:self.gateways]),
disk_entries_content=disk_entries_content,
node_entries_content=node_entries_content,
wait_on_nodes_content=wait_on_nodes_content,
)
if not self.dry_run:
with open(self.provisioning_script_file_name, 'w') as output_file:
output_file.write(script)
def init_cluster(self):
"""Initialize first node in the cluster."""
log.info('Initializing cluster')
self.create_provisioning_script()
self.upload_provisioning_script()
self.execute_provisioning_script()
def upload_provisioning_script(self):
"""Upload provisioning script to first node in the cluster for execution."""
log.info('Uploading provisioning script')
file_name = os.path.basename(self.provisioning_script_file_name)
command = (
'gcloud compute scp --project={self.project}'
' {self.provisioning_script_file_name}'
' root@{node_name}:/var/tmp/{file_name}'
).format(
self=self,
node_name=self.get_node_name(1),
file_name=file_name,
)
self.run_command(command)
def execute_provisioning_script(self):
"""Execute provisioning script on first node."""
log.info('Executing provisioning script')
command = (
'gcloud compute ssh --project={self.project} root@{node_name}'
' --command=\'echo "PATH=$PATH:$HOME/bin:/usr/lpp/mmfs/bin ; export PATH" >> /root/.bashrc\''
).format(
self=self,
node_name=self.get_node_name(1),
)
self.run_command(command)
file_name = os.path.basename(self.provisioning_script_file_name)
command = (
'gcloud compute ssh --project={self.project} root@{node_name}'
' --command="sh /var/tmp/{file_name}"'
).format(
self=self,
node_name=self.get_node_name(1),
file_name=file_name,
)
self.run_command(command)
def export_filesystem(self):
"""Export filesystem from all nodes."""
log.info('Exporting filesystem')
self.create_filesystem_exports_script()
for node in range(1, self.nodes+1):
self.upload_filesystem_exports_script(node)
self.execute_filesystem_exports_script(node)
def create_filesystem_exports_script(self):
"""Create script to export filesystem from cluster."""
log.info('Creating filesystem export script')
script = """#!/bin/env sh
echo "Installing nsf-utils"
yum install -y nfs-utils
echo "Exporting fileset {self.volume_local}"
cat << EOT >> /etc/exports
{self.volume_local} *(rw,sync,no_root_squash,no_subtree_check)
EOT
echo "Binding {self.volume_local}"
mkdir {self.volume_local}
mount -o bind /gpfs/gpfsA{self.volume_local} {self.volume_local}
exportfs -r
exportfs
systemctl enable nfs-server
systemctl start nfs-server
systemctl status nfs-server
echo "Force listing of first-level directory on {self.volume_local}"
ls {self.volume_local} > /dev/nul
""".lstrip().format(
self=self,
)
if not self.dry_run:
with open(self.filesystem_exports_script_file_name, 'w') as output_file:
output_file.write(script)
def upload_filesystem_exports_script(self, node):
"""Upload script to export filesystem from cluster.
Args:
node: Node index.
"""
node_name = self.get_node_name(node)
log.info('Uploading filesystem exports script to %s', node_name)
file_name = os.path.basename(self.filesystem_exports_script_file_name)
command = (
'gcloud compute scp --project={self.project}'
' {self.filesystem_exports_script_file_name} root@{node_name}:/var/tmp/{file_name}'
).format(
self=self,
node_name=node_name,
file_name=file_name,
)
self.run_command(command)
def execute_filesystem_exports_script(self, node):
"""Execute provisioning script on first node.
Args:
node: Node index.
"""
node_name = self.get_node_name(node)
log.info('Executing provisioning script on %s', node_name)
file_name = os.path.basename(self.filesystem_exports_script_file_name)
command = (
'gcloud compute ssh --project={self.project} root@{node_name}'
' --command="sh /var/tmp/{file_name}"'
).format(
self=self,
node_name=node_name,
file_name=file_name,
)
self.run_command(command)
| {
"content_hash": "225b079b9efc6d753485a414691512d5",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 250,
"avg_line_length": 32.4338747099768,
"alnum_prop": 0.6308748837542028,
"repo_name": "GoogleCloudPlatform/solutions-cloud-orchestrate",
"id": "d8acd0871bde4bc04c06122750e2fa9c09b80d9b",
"size": "14565",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cli/src/orchestrate/systems/gpfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1318"
},
{
"name": "PowerShell",
"bytes": "4997"
},
{
"name": "Python",
"bytes": "391725"
},
{
"name": "Shell",
"bytes": "24291"
}
],
"symlink_target": ""
} |
import pandas as pd
import numpy as np
from scipy.optimize import minimize
from ex5_utils import *
import scipy.io
import matplotlib.pyplot as plt
# Part 1 -- Loading and visualizing data
raw_mat = scipy.io.loadmat("ex5data1.mat")
X = raw_mat.get("X")
y = raw_mat.get("y")
ytest = raw_mat.get("ytest")
yval = raw_mat.get("yval")
Xtest = raw_mat.get("Xtest")
Xval = raw_mat.get("Xval")
plt.plot(X, y, 'rx', markersize=10, linewidth=1.5)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.show()
# Part 2 -- Regularized Linear Regression Cost
full_X = np.hstack((np.ones_like(y), X))
theta = np.array([1,1])
J, g = linearRegCostFunction(theta,full_X,y,0.0)
# Part 3 -- Reguliarized Linear Regression Gradient
J, g = linearRegCostFunction(theta,full_X,y,1.0)
# Part 4 -- Train Linear Regression
reg_param = 0
est_theta = trainLinearReg(full_X,y,reg_param)
# Plot linear fit based on estimated parameters
plt.plot(X, y, 'rx', markersize=10, linewidth=1.5)
plt.plot(X,np.dot(full_X,est_theta),'b-',linewidth=2)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.show()
# Part 5 -- Learning Curve for Linear Regression
reg_param = 0.0
full_Xval = np.hstack((np.ones_like(yval),Xval))
error_train, error_val = learningCurve(full_X,y,full_Xval,yval,reg_param)
plt.plot(range(len(X)), error_train, range(len(X)), error_val);
plt.title('Learning curve for linear regression')
plt.legend(['Train', 'Cross Validation'])
plt.xlabel('Number of training examples')
plt.ylabel('Error')
plt.show()
# Part 6 -- Feature Mapping for Polynomial Regression
p = 8
X_poly = polyFeatures(X,p)
X_poly, mu, sigma = featureNormalize(X_poly)
X_poly = np.hstack((np.ones_like(y),X_poly))
X_poly_test = polyFeatures(Xtest,p)
X_poly_test = np.divide(X_poly_test - mu, sigma)
X_poly_test = np.hstack((np.ones_like(ytest),X_poly_test))
X_poly_val = polyFeatures(Xval,p)
X_poly_val = np.divide(X_poly_val - mu, sigma)
X_poly_val = np.hstack((np.ones_like(yval),X_poly_val))
# Part 7 -- Learning Curve for Polynomial Regression
reg_param = 1.0
est_theta = trainLinearReg(X_poly,y,reg_param)
plt.plot(X, y, 'rx', markersize=10, linewidth=1.5)
plotFit(np.min(X), np.max(X), mu, sigma, est_theta, p)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.show()
error_train, error_val = learningCurve(X_poly,y,X_poly_val,yval,reg_param)
plt.plot(range(len(X)), error_train, range(len(X)), error_val);
plt.title('Learning curve for linear regression')
plt.legend(['Train', 'Cross Validation'])
plt.xlabel('Number of training examples')
plt.ylabel('Error')
plt.show()
# Part 8 -- Validation for selecting regularization parameter
lambda_vec, error_train, error_val = validationCurve(full_X,y,full_Xval,yval)
plt.plot(lambda_vec, error_train, lambda_vec, error_val);
plt.title('Selecting \lambda using a cross validation set')
plt.legend(['Train', 'Cross Validation'])
plt.xlabel('lambda')
plt.ylabel('Error')
plt.show() | {
"content_hash": "1737b750d8ce2ce3ecdcfffe4d08d69c",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 77,
"avg_line_length": 31.705263157894738,
"alnum_prop": 0.7138114209827358,
"repo_name": "lukemans/Hello-world",
"id": "435547c62792561ab2e8651d69bdfdfa47fae96f",
"size": "3012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex5.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1048478"
},
{
"name": "Python",
"bytes": "57769"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0024_auto_20161017_1201'),
]
operations = [
migrations.AlterField(
model_name='eventagendaitem',
name='bill',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='related_agenda_items', to='councilmatic_core.Bill'),
),
]
| {
"content_hash": "49970dfabc3984b86f0c21bc952cc5a6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 158,
"avg_line_length": 28.210526315789473,
"alnum_prop": 0.6585820895522388,
"repo_name": "datamade/django-councilmatic",
"id": "8c751e6d4fb57c0082be168db4c8a089a1287733",
"size": "608",
"binary": false,
"copies": "1",
"ref": "refs/heads/2.5",
"path": "councilmatic_core/migrations/0025_auto_20161017_1640.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "73072"
},
{
"name": "HTML",
"bytes": "164464"
},
{
"name": "Java",
"bytes": "504830"
},
{
"name": "JavaScript",
"bytes": "78854"
},
{
"name": "Python",
"bytes": "202625"
}
],
"symlink_target": ""
} |
import sys
import os
import os.path as op
import subprocess
import time
import json
from distutils.version import LooseVersion
from mne import pick_types
from mne.utils import logger, set_log_file
from mne.report import Report
from mne.io.constants import FIFF
def get_data_picks(inst, meg_combined=False):
"""Get data channel indices as separate list of tuples
Parameters
----------
inst : instance of mne.measuerment_info.Info
The info
meg_combined : bool
Whether to return combined picks for grad and mag.
Returns
-------
picks_list : list of tuples
The list of tuples of picks and the type string.
"""
info = inst.info
picks_list = []
has_mag, has_grad, has_eeg = [k in inst for k in ('mag', 'grad', 'eeg')]
if has_mag and (meg_combined is not True or not has_grad):
picks_list.append(
(pick_types(info, meg='mag', eeg=False, stim=False), 'mag')
)
if has_grad and (meg_combined is not True or not has_mag):
picks_list.append(
(pick_types(info, meg='grad', eeg=False, stim=False), 'grad')
)
if has_mag and has_grad and meg_combined is True:
picks_list.append(
(pick_types(info, meg=True, eeg=False, stim=False), 'meg')
)
if has_eeg:
picks_list.append(
(pick_types(info, meg=False, eeg=True, stim=False), 'eeg')
)
return picks_list
def fname_to_string(fname):
"""Return given file as sring
Parameters
----------
fname : str
absolute path to file.
"""
with open(fname) as fid:
string = fid.read()
return string
def _get_git_head(path):
"""Aux function to read HEAD from git"""
if not isinstance(path, str):
raise ValueError('path must be a string, you passed a {}'.format(
type(path))
)
if not op.exists(path):
raise ValueError('This path does not exist: {}'.format(path))
command = ('cd {gitpath}; '
'git rev-parse --verify HEAD').format(gitpath=path)
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
shell=True)
proc_stdout = process.communicate()[0].strip()
del process
return proc_stdout
def get_versions(sys):
"""Import stuff and get versions if module
Parameters
----------
sys : module
The sys module object.
Returns
-------
module_versions : dict
The module names and corresponding versions.
"""
module_versions = {}
for name, module in sys.modules.items():
if '.' in name:
continue
module_version = LooseVersion(getattr(module, '__version__', None))
module_version = getattr(module_version, 'vstring', None)
if module_version is None:
module_version = None
elif 'git' in module_version or '.dev' in module_version:
git_path = op.dirname(op.realpath(module.__file__))
head = _get_git_head(git_path)
module_version += '-HEAD:{}'.format(head)
module_versions[name] = module_version
return module_versions
def create_run_id():
"""Get the run hash
Returns
-------
run_id : str
A a unique string in the 'YY-m-d_H_M_S' format, for example:
'2015-04-28_11-42-32'
"""
return time.strftime('%Y-%m-%d_%H-%M-%S', time.gmtime())
def setup_provenance(script, results_dir, config=None, use_agg=True):
"""Setup provenance tracking
Parameters
----------
script : str
The script that was executed.
results_dir : str
The results directory.
config : None | str
The name of the config file. By default, the function expects the
config to be under `__script__/' named `config.py`
use_agg : bool
Whether to use the 'Agg' backend for matplotlib or not.
Returns
-------
report : mne.report.Report
The mne report.
Side-effects
------------
- make results dir if it does not exists
- sets log file for sterr output
- writes log file with runtime information
"""
if use_agg is True:
import matplotlib
matplotlib.use('Agg')
if not op.isfile(script):
raise ValueError('sorry, this is not a script!')
step = op.splitext(op.split(script)[1])[0]
results_dir = op.join(results_dir, step)
if not op.exists(results_dir):
logger.info('generating results dir')
os.mkdir(results_dir)
run_id = create_run_id()
logger.info('generated run id: %s' % run_id)
logger.info('preparing logging:')
logging_dir = op.join(results_dir, run_id)
logger.info('... making logging directory: %s' % logging_dir)
os.mkdir(logging_dir)
modules = get_versions(sys)
runtime_log = op.join(logging_dir, 'run_time.json')
with open(runtime_log, 'w') as fid:
json.dump(modules, fid)
logger.info('... writing runtime info to: %s' % runtime_log)
std_logfile = op.join(logging_dir, 'run_output.log')
script_code = op.join(logging_dir, 'script.py')
with open(script_code, 'w') as fid:
with open(script) as script_fid:
source_code = script_fid.read()
fid.write(source_code)
logger.info('... logging source code of calling script')
if config is None:
config = op.join(op.dirname(script), 'config.py')
if op.isfile(config):
config_code = op.join(logging_dir, 'config.py')
with open(config_code, 'w') as fid:
with open(config) as config_fid:
source_code = config_fid.read()
fid.write(source_code)
logger.info('... logging source code of config.')
else:
logger.info('... No config found. Logging nothing.')
logger.info('... preparing Report')
report = Report(title=step)
report.data_path = logging_dir
logger.info('... setting logfile: %s' % std_logfile)
set_log_file(std_logfile)
return report, run_id, results_dir, logger
def set_eog_ecg_channels(raw, eog_ch='EEG062', ecg_ch='EEG063'):
"""Set the EOG and ECG channels
Will modify the channel info in place.
Parameters
----------
raw : instance of Raw
The raw object.
eog_ch : list | str
EOG channel name(s).
ecg_ch : list | str
ECG channel name(s).
"""
if isinstance(eog_ch, basestring):
eog_ch = [eog_ch]
if isinstance(ecg_ch, basestring):
ecg_ch = [ecg_ch]
for channel in eog_ch:
raw.info['chs'][raw.ch_names.index(channel)]['kind'] = FIFF.FIFFV_EOG_CH
for channel in ecg_ch:
raw.info['chs'][raw.ch_names.index(channel)]['kind'] = FIFF.FIFFV_ECG_CH
| {
"content_hash": "8eca95cbce803cb4945e4a73350dbf8b",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 80,
"avg_line_length": 29.946902654867255,
"alnum_prop": 0.5960401891252955,
"repo_name": "cmoutard/meeg-preprocessing",
"id": "838657a15037c2e6c4aa6bf2c0500c5f21225f6d",
"size": "6850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meeg_preprocessing/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "37189"
}
],
"symlink_target": ""
} |
import sys
from __init__ import read_fastq_sequences
import DBConstants
# A shell interface to the screed FQDBM database writing function
if __name__ == "__main__":
# Make sure the user entered the command line arguments correctly
if len(sys.argv) != 2:
sys.stderr.write("ERROR: USAGE IS: %s <dbfilename>\n" % sys.argv[0]);
exit(1)
filename = sys.argv[1]
read_fastq_sequences(filename)
print "Database saved in %s%s" % (sys.argv[1], DBConstants.fileExtension)
exit(0)
| {
"content_hash": "395efaa96da2c2f498879dd9ec3f8e12",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 77,
"avg_line_length": 31.9375,
"alnum_prop": 0.6673189823874756,
"repo_name": "poojavade/Genomics_Docker",
"id": "45596228699e0da63661199f1708821fd80976e7",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/screed-0.7.1-py2.7.egg/screed/fqdbm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "1265138"
},
{
"name": "C++",
"bytes": "4734960"
},
{
"name": "CSS",
"bytes": "17332"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Groff",
"bytes": "77173"
},
{
"name": "HTML",
"bytes": "395483"
},
{
"name": "Java",
"bytes": "9223"
},
{
"name": "JavaScript",
"bytes": "783663"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Lua",
"bytes": "28217"
},
{
"name": "Makefile",
"bytes": "77825"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Perl",
"bytes": "244796"
},
{
"name": "Python",
"bytes": "54562861"
},
{
"name": "R",
"bytes": "2568"
},
{
"name": "Shell",
"bytes": "40620"
},
{
"name": "Smarty",
"bytes": "21035"
},
{
"name": "TeX",
"bytes": "55310"
}
],
"symlink_target": ""
} |
"""
Challenge #258 [Intermediate] IRC: Responding to commands
https://www.reddit.com/r/dailyprogrammer/comments/4anny5/challenge_258_intermediate_irc_responding_to/
# Description
In the last challenge we initiated a connection to an IRC server. This time we are going to utilise that connection by
responding to user input. On an IRC server you can communicate with other users either directly, or in a group chatroom
known as a channel. Channel names are distinguished from users by a prefixed character (`#` on freenode) in the name.
After connecting to an IRC server you will receive some informational text from the server known as the Message Of The
Day,
or MOTD. The server will buffer any messages (particularly attempts to join channels) sent before it has finished.
The end of the MOTD is marked by the message `RPL_ENDOFMOTD` which is defined as the number `376`. You don't necessarily
have to wait for the end of the MOTD before joining, but I've found it usually works better if you do.
:wolfe.freenode.net 376 GeekBot :End of /MOTD command.
To join a channel you must use the `JOIN` message. It takes a single parameter, which is a comma separated list of one
or
more channels.
JOIN #reddit-dailyprogrammer,#botters-test
Once you have sent this message, you will receive one or more JOIN message(s) back from the server for every channel
you were successfully able to join. The message you receive back will be prefixed with yourself as the origin.
:GeekBot!G33kDude@192-168-1-42.isp.com JOIN #reddit-dailyprogrammer
:GeekBot!G33kDude@192-168-1-42.isp.com JOIN #botters-test
After you've been joined to the channel, you can send text to the channel using the `PRIVMSG` message. It takes two
parameters, the first being the the comma separated list of users or channels to send the text to, and the second being
the
colon prefixed message text.
PRIVMSG #reddit-dailyprogrammer :Hello World!
In addition to being able to send messages, you can receive messages that have been sent to the channel by other users.
You should listen for a phrase prefixed with your name, then respond to that chat message. For example, you might see
the following chat message.
:GeekDude!G33kDude@192-168-1-42.isp.com PRIVMSG #ahkscript :GeekBot: random 20
Your code would parse this message, and see the chatted contents were `GeekBot: random 20`. In response, your program
might
do something like generate a random number, and chat it back.
PRIVMSG #ahkscript :GeekDude: 4 // chosen by fair 20 sided dice roll // guaranteed to be random
# Input Description
In addition to the input from last time's challenge, there will also be two line specifying a channel to join, and a
message to chat upon joining.
chat.freenode.net:6667
Nickname
Username
Real Name
#reddit-dailyprogrammer,#rdp,#botters-test
Hello World!
# Output Description
In addition to the last challenge's output, you must also pick and respond to one or more chat commands. These commands
must take at least one parameter, and the return value should be chatted back to the same channel prefixed with the nick
of the person who invoked the command.
The following code block has the prefix `>` for outgoing messages, and `<` for incoming messages.
>NICK Nickname
>USER Username 0 * :Real Name
<:wolfe.freenode.net NOTICE * :*** Looking up your hostname...
<:wolfe.freenode.net NOTICE * :*** Checking Ident
<:wolfe.freenode.net NOTICE * :*** Found your hostname
<:wolfe.freenode.net NOTICE * :*** No Ident response
<:wolfe.freenode.net 001 Nickname :Welcome to the freenode Internet Relay Chat Network Nickname
--- A bit later ---
<:wolfe.freenode.net 376 MyRC_Bot :End of /MOTD command.
>JOIN #reddit-dailyprogrammer,#rdp,#botters-test
<:GeekBot!G33kDude@192-168-1-42.isp.com JOIN #reddit-dailyprogrammer
>PRIVMSG #reddit-dailyprogrammer :Hello World!
<:GeekBot!G33kDude@192-168-1-42.isp.com JOIN #rdp
>PRIVMSG #rdp :Hello World!
<:GeekBot!G33kDude@192-168-1-42.isp.com JOIN #botters-test
>PRIVMSG #botters-test :Hello World!
--- Wait for chat ---
<:GeekDude!G33kDude@192-168-1-42.isp.com PRIVMSG #reddit-dailyprogrammer :GeekBot: sum 12 8 7 3 5
>PRIVMSG #reddit-dailyprogrammer :GeekDude: The sum is 35
Also, don't forget to return any incoming `PING` messages!
# Challenge Input
Your bot should handle commands sent to it directly as well as through normal channels. When you receive such a message,
the channel parameter of `PRIVMSG` is set to your own nickname.
:GeekDude!G33kDude@192-168-1-42.isp.com PRIVMSG GeekBot :GeekBot: mult 6 9
# Challenge Output
You will have to recognize that the message has been sent directly to you, so you can send your own reply directly back.
If you tried to send to the same destination as the original message (as you would with a regular channel message),
you would end up sending the chat to yourself.
PRIVMSG GeekDude :GeekDude: 42
# Bonus
When communicating with the bot directly via private message, nickname prefixes for calling commands and for return
values should be optional. For example, the following should work:
<:GeekDude!G33kDude@192-168-1-42.isp.com PRIVMSG GeekBot :GeekBot: div 1 9801
>PRIVMSG GeekDude :GeekDude: 0.00010203...
<:GeekDude!G33kDude@192-168-1-42.isp.com PRIVMSG GeekBot :div 1 9801
>PRIVMSG GeekDude :0.00010203...
# Notes
Be careful not to allow your bot to generate any newlines in response to a command. For example, if your bot did hex to
ascii conversion (`GeekBot: hex2ascii 0D0A`) someone could potentially cause the bot to send a new protocol message,
which
could do all sorts of nasty things. This includes sending the `QUIT` message which would disconnect the bot, or making
it
spam people potentially getting it banned. If your bot is registered to an account, someone could use this technique to
delete the account, or reset the password.
To verify your code is joining channels and chatting correctly, I suggest joining the channel(s) in advance using an
IRC client, such as the web based http://webchat.freenode.net/.
You can see the full original IRC specification at https://tools.ietf.org/html/rfc1459. See also,
http://ircdocs.horse/specs/.
[A Regular Expression For IRC Messages](https://mybuddymichael.com/writings/a-regular-expression-for-irc-messages.html)
I get the distinct feeling I've missed something, so if you see anything off let me know.
"""
def main():
pass
if __name__ == "__main__":
main()
| {
"content_hash": "df2c981ccfa4325f15911235ff414101",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 120,
"avg_line_length": 59.80733944954128,
"alnum_prop": 0.7623868691517104,
"repo_name": "DayGitH/Python-Challenges",
"id": "35ab010d95436337b2910fd17d9d2e5441d8651b",
"size": "6519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DailyProgrammer/DP20160322B.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "5002"
},
{
"name": "Python",
"bytes": "2471582"
}
],
"symlink_target": ""
} |
"""Automatic addition of additional markup to the doc strings used by pygrametl,
which should allow them to be readable in the source code and in the
documentation after Sphinx has processed them.
"""
# Copyright (c) 2014-2020, Aalborg University (pygrametl@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import sys
def correct_docstring(app, what, name, obj, options, lines):
"""Makes some correction to the markup, this should keep it readable in
the source files, and having the output formatted using Sphinx.
"""
# Iteration is immutable to prevent lines from being skipped
for index, value in enumerate(lines):
# Adds additional backslashes to keep escape sequences as text
if '\\t' in value or '\\n' in value:
lines[index] = lines[index].replace("\\", "\\\\")
# Escapes * in argument descriptions to stop Sphinx using them as
# markup
if '*' in value:
lines[index] = escape_star(value)
# Formatting of the arguments header with bold and a newline
if value == 'Arguments:' or value == 'Keyword arguments:':
lines[index] = '**' + value + '**'
lines.insert(index + 1, '')
def escape_star(line):
"""Escape all unmatched stars (*) so Sphinx know they aren't markup"""
line_split = line.split()
for index, value in enumerate(line_split):
# Star is only added to the end of the word, if the are used for markup
if not value.endswith('*'):
line_split[index] = line_split[index].replace("*", "\\*")
return ' '.join(line_split)
def correct_signature(app, what, name, obj, options, signature,
return_annotation):
"""Makes some correction to the markup, to prevent Sphinx from using escape
sequences instead of just printing them"""
# Returns the signature are empty, instead of doing None checks everywhere
if not signature:
return(signature, return_annotation)
# Adds additional backslashes to keep escape sequences as text
if '\\t' in signature or '\\n' in signature:
signature = signature.replace("\\", "\\\\")
# Removes the address added by Sphinx if a function pointer have defaults
if "<function" in signature:
signature = correct_function_pointers(obj, signature)
# Side effects are discarded, so we have to return a tuple with new strings
return(signature, return_annotation)
def correct_function_pointers(obj, signature):
"""Manuel mapping of function pointers with addresses to their original
names, it is needed until Sphinx Issue #759 have been resolved.
"""
# Signatures can belong to either a function, method or object, depending
# on what version of python is used. Extration of docstrings from objects
# does in some versions of python require accessing the method first.
if hasattr(obj, "func_defaults"):
filename = obj.__code__.co_filename
lineno = obj.__code__.co_firstlineno
source_code_line = read_function_signature(filename, lineno)
elif hasattr(obj, "__code__"):
filename = obj.__code__.co_filename
lineno = obj.__code__.co_firstlineno
source_code_line = read_function_signature(filename, lineno)
else:
filename = obj.__init__.__code__.co_filename
lineno = obj.__init__.__code__.co_firstlineno
source_code_line = read_function_signature(filename, lineno)
# The line of source code read from the file, and the original signature, is
# split into a list of parameters, allowing the function names from the line
# of source code read to easily substitute the memory addresses present in
# the original signature given by Sphinx
signature_split = signature.split(',')
source_code_line_split = source_code_line.split(',')
# Function name, def, self, and the ending colon are stripped to match the
# original signature read by Sphinx, making substituting each part trivial
param_start_index = source_code_line_split[0].find('(')
source_code_line_split[0] = source_code_line_split[0][param_start_index:]
source_code_line_split[-1] = source_code_line_split[-1][0:-1]
if source_code_line_split[0] == '(self':
del(source_code_line_split[0])
source_code_line_split[0] = '(' + source_code_line_split[0]
# Finally we substitute the pointers with the matching line from source
# code
result_string_list = []
for sig, source in zip(signature_split, source_code_line_split):
if '<function ' in sig:
result_string_list.append(source)
else:
result_string_list.append(sig)
# The function pointer block is just replaced with the function_name
return ','.join(result_string_list)
def read_function_signature(filename, lineno):
# The line number is subtracted by one to make it match the one produced by
# the enumerator, as the line number starts from one and the enumerator
# from zero
lineno = lineno - 1
# We read through the file until we line number passed, the reader is then
# "activated" and we make a copy of all lines read until we match a ":"
# indicating the end of the function signature which is all we need.
function_signature = ""
file_handle = open(filename)
reached_function_signature = False
for file_index, line in enumerate(file_handle):
if file_index == lineno:
reached_function_signature = True
if reached_function_signature:
function_signature += line.strip()
if line.endswith(':\n'):
file_handle.close()
break
# Finally the all white space is removed from the signature to make it
# simpler to process in "correct_function_pointers(obj, signature)"
return function_signature # .strip()
def setup(app):
"""Initial setup that connects the plug-in to Sphinx"""
# Connection of functions to events raised by Sphinx's autodoc plug-in
# Documentation: http://sphinx-doc.org/ext/autodoc.html
app.connect('autodoc-process-docstring', correct_docstring)
app.connect('autodoc-process-signature', correct_signature)
| {
"content_hash": "62b1b6e6ebf171dfdd421ea04c570e72",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 80,
"avg_line_length": 42.4180790960452,
"alnum_prop": 0.6885988279168886,
"repo_name": "chrthomsen/pygrametl",
"id": "178ede7b9147dc21ce69a2a3f2418490547b22ac",
"size": "7508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/_exts/autoformat.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Java",
"bytes": "1899"
},
{
"name": "Python",
"bytes": "349091"
}
],
"symlink_target": ""
} |
from django.db import migrations
import touchtechnology.common.db.models
class Migration(migrations.Migration):
dependencies = [
("news", "0005_auto_20191122_1340"),
]
operations = [
migrations.AddField(
model_name="article",
name="copy",
field=touchtechnology.common.db.models.HTMLField(
blank=True, verbose_name="Copy"
),
),
]
| {
"content_hash": "597522352cbe43dc227a727bc29761a7",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 61,
"avg_line_length": 21.9,
"alnum_prop": 0.5730593607305936,
"repo_name": "goodtune/vitriolic",
"id": "0aa8e543a08e1368c55b6689cbaf6331cede563d",
"size": "438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "touchtechnology/news/migrations/0006_article_copy_field.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "307509"
},
{
"name": "HTML",
"bytes": "273967"
},
{
"name": "JavaScript",
"bytes": "626908"
},
{
"name": "Less",
"bytes": "1373"
},
{
"name": "Makefile",
"bytes": "369"
},
{
"name": "Python",
"bytes": "962353"
},
{
"name": "Shell",
"bytes": "1490"
},
{
"name": "XSLT",
"bytes": "3510"
}
],
"symlink_target": ""
} |
from django import forms
from django.forms import ValidationError
from generic.models import Module, StaticModuleContent
import datetime
class FilterForm(forms.Form):
""" abstract filter class for filtering contacts"""
def __init__(self, data=None, **kwargs):
self.request = kwargs.pop('request')
if data:
forms.Form.__init__(self, data, **kwargs)
else:
forms.Form.__init__(self, **kwargs)
def filter(self, request, queryset):
raise NotImplementedError("Subclasses of FilterForm must implement the filter() method!")
class ActionForm(forms.Form):
"""Action forms consume a list of selected object from the generic list
view (generic.views.generic), performing an action on them. ActionForm
subclasses are usually tied to a particular view or at least a particular model,
as the peform() method could in theory be passed any iterable in the 'results'
parameter.
"""
def __init__(self, data=None, **kwargs):
self.request = kwargs.pop('request')
if data:
forms.Form.__init__(self, data, **kwargs)
else:
forms.Form.__init__(self, **kwargs)
def perform(self, request, results):
raise NotImplementedError("Subclasses of ActionForm must implement the perform() method!")
class ModuleForm(forms.Form):
""" abstract class for module creation forms"""
def createModule(self, dashboard, view_name, title):
offset = 0
if dashboard.modules.filter(column=0).count():
offset = dashboard.modules.filter(column=0).order_by('-offset')[0].offset + 1
return Module.objects.create(dashboard=dashboard, view_name=view_name, column=0, offset=offset, title=title)
def setModuleParams(self, dashboard, module=None, title=None):
raise NotImplementedError("Subclasses of ModuleForm must implement the setModuleParams() method!")
class DateRangeForm(forms.Form):
start = forms.IntegerField(required=True, widget=forms.HiddenInput())
end = forms.IntegerField(required=True, widget=forms.HiddenInput())
def clean(self):
cleaned_data = self.cleaned_data
try:
start = cleaned_data.get('start')
cleaned_data['start'] = datetime.datetime.fromtimestamp(float(start))
end = cleaned_data.get('end')
cleaned_data['end'] = datetime.datetime.fromtimestamp(float(end))
except TypeError:
raise ValidationError('Need both start and end values that are strings or numbers')
return cleaned_data
class TimeRangeForm(forms.Form):
range = forms.ChoiceField(choices=(('w', 'Previous Calendar Week'), ('m', 'Previous Calendar Month'), ('q', 'Previous calendar quarter'),))
class StaticModuleForm(ModuleForm):
old_content = forms.ModelChoiceField(queryset=StaticModuleContent.objects.all(), required=False, empty_label='Create New')
content = forms.CharField(max_length=5000, required=False)
title = forms.CharField(max_length=30, required=False)
def setModuleParams(self, dashboard, module=None, title=None):
if self.cleaned_data['old_content']:
content = self.cleaned_data['old_content']
else:
content = StaticModuleContent.objects.create(content=self.cleaned_data['content'])
module = module or self.createModule(dashboard, 'generic.views.static_module', title=self.cleaned_data['title'])
module.params.create(module=module, param_name='content_id', param_value=content.pk, is_url_param=True)
return module
| {
"content_hash": "58d56e09d7eb70559360bfa2f9490583",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 143,
"avg_line_length": 43.12048192771084,
"alnum_prop": 0.6811958647666946,
"repo_name": "unicefuganda/edtrac",
"id": "1729e161ec14f55fe058682357177a3624af6ef7",
"size": "3579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edtrac_project/rapidsms_generic/generic/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "277434"
},
{
"name": "JavaScript",
"bytes": "190346"
},
{
"name": "Python",
"bytes": "2621572"
},
{
"name": "Shell",
"bytes": "4755"
}
],
"symlink_target": ""
} |
DOCUMENTATION = '''
---
module: hashivault_rekey
version_added: "3.3.0"
short_description: Hashicorp Vault rekey module
description:
- Module to (update) rekey Hashicorp Vault. Requires that a rekey
be started with hashivault_rekey_init.
options:
url:
description:
- url for vault
default: to environment variable VAULT_ADDR
ca_cert:
description:
- "path to a PEM-encoded CA cert file to use to verify the Vault server TLS certificate"
default: to environment variable VAULT_CACERT
ca_path:
description:
- "path to a directory of PEM-encoded CA cert files to verify the Vault server TLS certificate : if ca_cert is specified, its value will take precedence"
default: to environment variable VAULT_CAPATH
client_cert:
description:
- "path to a PEM-encoded client certificate for TLS authentication to the Vault server"
default: to environment variable VAULT_CLIENT_CERT
client_key:
description:
- "path to an unencrypted PEM-encoded private key matching the client certificate"
default: to environment variable VAULT_CLIENT_KEY
verify:
description:
- "if set, do not verify presented TLS certificate before communicating with Vault server : setting this variable is not recommended except during testing"
default: to environment variable VAULT_SKIP_VERIFY
authtype:
description:
- authentication type to use: token, userpass, github, ldap
default: token
token:
description:
- token for vault
default: to environment variable VAULT_TOKEN
username:
description:
- username to login to vault.
password:
description:
- password to login to vault.
key:
description:
- vault key shard (aka unseal key).
nonce:
description:
- rekey nonce.
'''
EXAMPLES = '''
---
- hosts: localhost
tasks:
- hashivault_rekey:
key: '{{vault_key}}'
nonce: '{{nonce}}'
'''
def main():
argspec = hashivault_argspec()
argspec['key'] = dict(required=True, type='str')
argspec['nonce'] = dict(required=True, type='str')
module = hashivault_init(argspec)
result = hashivault_rekey(module.params)
if result.get('failed'):
module.fail_json(**result)
else:
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.hashivault import *
@hashiwrapper
def hashivault_rekey(params):
key = params.get('key')
nonce = params.get('nonce')
client = hashivault_client(params)
return {'status': client.rekey(key, nonce), 'changed': True}
if __name__ == '__main__':
main()
| {
"content_hash": "770df63ddf99c0b3aeed99660331d308",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 167,
"avg_line_length": 31.244444444444444,
"alnum_prop": 0.6408250355618776,
"repo_name": "cloudvisory/ansible-modules-hashivault",
"id": "e391d435f3df260e5515a9608242f920451ed247",
"size": "2834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ansible/modules/hashivault/hashivault_rekey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88628"
},
{
"name": "Shell",
"bytes": "2831"
}
],
"symlink_target": ""
} |
from kanaria.core.model import ApplicationIndex
from kanaria.core.service.brain import Brain
class kintoneInterface(object):
def __init__(self):
from kanaria.core.environment import Environment
self._env = Environment()
self.service = Environment.get_kintone_service(self._env)
self.db = Environment.get_db(self._env)
def get_application_by_code(self, code):
app_index = self.db.get_collection(ApplicationIndex).find_one({"code": code})
app = None
if app_index:
app_id = app_index["app_id"]
info = self.service.administration().get_app_info(app_id).info
if info:
app = self.service.app(app_id)
return app
def get_application_index(self, app_id):
app_index_dic = self.db.get_collection(ApplicationIndex).find_one({"app_id": app_id})
app_index = ApplicationIndex.deserialize(app_index_dic)
return app_index
def get_member_addresses(self):
export_api = self.service.user_api().for_exporting
users = export_api.get_users().users
addresses = []
for u in users:
addresses.append(u.email)
return addresses
def get_kanaria(self, create_if_not_exist=False):
import os
from pykintone.application_settings.view import View
import pykintone.application_settings.form_field as ff
from pykintone.structure_field import File
app = None
register = lambda a: self.register_application(a.app_id, Brain.MY_NAME, Brain.MY_USER_NAME)
# get from database
app = self.get_application_by_code(Brain.MY_USER_NAME)
# check existence
if not app:
infos = self.service.administration().select_app_info(name=Brain.MY_NAME).infos
if len(infos) > 0:
app = self.service.app(infos[0].app_id)
register(app)
if not app and create_if_not_exist:
app_id = ""
with self.service.administration() as admin:
# create application
created = admin.create_application(Brain.MY_NAME)
app_id = created.app_id
# update general information
icon = File.upload(os.path.join(os.path.dirname(__file__), "./static/icon.png"), admin)
set_settings = admin.general_settings().update({
"icon": {
"type": "FILE",
"file": {
"fileKey": icon.file_key
}
}
})
# create form
fields = [
ff.BaseFormField.create("SINGLE_LINE_TEXT", "subject", "Subject"),
ff.BaseFormField.create("MULTI_LINE_TEXT", "body", "MessageBody"),
ff.BaseFormField.create("SINGLE_LINE_TEXT", "from_address", "From Address"),
ff.BaseFormField.create("SINGLE_LINE_TEXT", "to_address", "To Address"),
ff.BaseFormField.create("FILE", "attached_files", "Attached Files")
]
add_fields = admin.form().add(fields)
# create view
view = View.create("LetterList", fields)
add_views = admin.view().update(view)
if set_settings.ok and add_fields.ok and add_views.ok:
admin._cached_changes = True
else:
raise Exception("Error is occurred when creating kanaria application")
app = self.service.app(app_id)
register(app)
return app
def create_default_application(self, name, code):
from pykintone.application_settings.administrator import Administrator
from pykintone.application_settings.view import View
import pykintone.application_settings.form_field as ff
result = None
with Administrator(self.service.account) as admin:
# create application
result = admin.create_application(name)
# create form
fields = [
ff.BaseFormField.create("SINGLE_LINE_TEXT", "subject", "件名"),
ff.BaseFormField.create("MULTI_LINE_TEXT", "body", "メッセージ"),
ff.BaseFormField.create("SINGLE_LINE_TEXT", "from_address", "報告者"),
ff.BaseFormField.create("FILE", "attached_files", "添付ファイル")
]
update_form = admin.form().add(fields)
# create view
view = View.create("一覧", ["subject", "from_address"])
update_view = admin.view().update(view)
if result.ok and update_form.ok and update_view.ok:
admin._cached_changes = True
else:
raise Exception("Error is occurred when creating default application")
if result.ok:
app = self.service.app(result.app_id)
self.register_application(app.app_id, name, code)
return app
else:
return None
def copy_application(self, app_id, name, code):
result = None
with self.service.administration() as admin:
result = admin.copy_application(name, app_id)
if result.ok:
self.register_application(result.app_id, name, code)
app = self.service.app(result.app_id)
return app
else:
raise Exception("Error occurred when copying the application")
def register_application(self, app_id, name, code):
app_index = ApplicationIndex(app_id, name, code)
self.db.save(app_index)
def find_similar_applications(self, name, find_template=False):
from pykintone.application_settings.administrator import Administrator
# todo: have to implements more flexible search
infos = Administrator(self.service.account).select_app_info(name=name).infos
filtered = []
for i in infos:
template = i.name.startswith(Brain.TEMPLATE_HEADER)
if find_template and template:
filtered.append(i)
elif not find_template and not template:
filtered.append(i)
return filtered
| {
"content_hash": "528a34c15a5015b20d28cff94b89285e",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 103,
"avg_line_length": 38.47239263803681,
"alnum_prop": 0.5732738000318929,
"repo_name": "icoxfog417/kanaria",
"id": "5121c1b4cfa4b1a7fbce5de894a4e555f9b69989",
"size": "6331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kanaria/core/service/kintone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47953"
}
],
"symlink_target": ""
} |
"""Models for menu app."""
from __future__ import unicode_literals
import re
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.core.urlresolvers import reverse, NoReverseMatch
@python_2_unicode_compatible
class Menu(models.Model):
"""Menu model."""
name = models.CharField(max_length=255)
def __str__(self):
"""String representation of Menu."""
return self.name
@python_2_unicode_compatible
class MenuItem(models.Model):
"""MenuItem model."""
menu = models.ForeignKey('Menu',
on_delete=models.CASCADE,
related_name='items',
)
parent = models.ForeignKey('self',
on_delete=models.CASCADE,
related_name='subitems',
blank=True,
null=True,
)
name = models.CharField(max_length=255)
target = models.CharField(max_length=255,
blank=True,
null=True
)
separator = models.BooleanField(default=False)
def __str__(self):
"""String representation of MenuItem."""
return self.name
def get_absolute_url(self):
"""Get the absolute url of MenuItem target."""
target, _, args = self.target.partition(';')
args = re.split(';', args) if args else []
kwargs = {}
for arg in args:
key, value = re.split('=', arg)
kwargs[key] = value
try:
url = reverse(target, kwargs=kwargs)
except NoReverseMatch:
url = self.target or '#'
return url
| {
"content_hash": "49e9a18d364234ee2435f974fa29d67b",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 61,
"avg_line_length": 32.017857142857146,
"alnum_prop": 0.5192414947016174,
"repo_name": "gilmrjc/djangopress",
"id": "aba723841ba55434e203f58701eed094bff025ed",
"size": "1793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangopress/menus/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2829"
},
{
"name": "HTML",
"bytes": "9393"
},
{
"name": "Python",
"bytes": "41625"
},
{
"name": "Shell",
"bytes": "125"
}
],
"symlink_target": ""
} |
__author__ = 'mikeknowles, akoziol'
""" Includes threading found in examples:
http://www.troyfawkes.com/learn-python-multithreading-queues-basics/
http://www.ibm.com/developerworks/aix/library/au-threadingpython/
https://docs.python.org/2/library/threading.html
Revised with speed improvements
"""
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML
from threading import Thread
from Queue import Queue
from collections import defaultdict
from cStringIO import StringIO
from glob import glob
import subprocess, os, time, sys, shlex, re, threading, json, mmap, errno
from argparse import ArgumentParser
parser = ArgumentParser(description='Performs blast analyses to determine presence of custom targets')
parser.add_argument('-p', '--path', required=False,
default='/home/blais/PycharmProjects/pythonGeneSeekr/',
help='Specify path for custom folder locations')
parser.add_argument('-c', '--cutoff', required=False, default=0.8,
help='The identity cutoff value for BLAST matches. Default is 0.8')
parser.add_argument('-s', '--sequencePath', required=False,
default='/home/blais/PycharmProjects/pythonGeneSeekr/sequences',
help='The location of the query sequence files')
parser.add_argument('-t', '--targetPath', required=False,
default='/home/blais/PycharmProjects/pythonGeneSeekr/Organism',
help='The location of the target files')
# Get the arguments into a list
args = vars(parser.parse_args())
# Define variables from the arguments - there may be a more streamlined way to do this
# Add trailing slashes to the path variables to ensure consistent formatting (os.path.join)
path = os.path.join(args['path'], "")
cutoff = float(args['cutoff'])
sequencePath = os.path.join(args['sequencePath'], "")
targetPath = os.path.join(args['targetPath'], "")
def make_path(inPath):
"""from: http://stackoverflow.com/questions/273192/check-if-a-directory-exists-and-create-it-if-necessary \
does what is indicated by the URL"""
try:
os.makedirs(inPath)
# os.chmod(inPath, 0777)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def make_dict():
"""Makes Perl-style dictionaries"""
return defaultdict(make_dict)
# Initialise the count used in the dotter function
count = 0
def dotter():
"""Prints formatted time to stdout at the start of a line, as well as a "."
whenever the length of the line is equal or lesser than 80 "." long"""
# Use a global variable
global count
if count <= 80:
sys.stdout.write('.')
count += 1
else:
sys.stdout.write('\n[%s] .' % (time.strftime("%H:%M:%S")))
count = 1
def makeblastdb(dqueue):
while True: # while daemon
fastapath = dqueue.get() # grabs fastapath from dqueue
# remove the path and the file extension for easier future globbing
db = fastapath.split(".")[0]
nhr = "%s.nhr" % db # add nhr for searching
# print nhr
FNULL = open(os.devnull, 'w') # define /dev/null
if not os.path.isfile(str(nhr)): # if check for already existing dbs
subprocess.call(shlex.split("makeblastdb -in %s -dbtype nucl -out %s" % (fastapath, db)), stdout=FNULL, stderr=FNULL)
# make blastdb
dotter()
dqueue.task_done() # signals to dqueue job is done
sys.exit() # necessary
# Declare queues, list, and dict
dqueue = Queue()
blastqueue = Queue()
parsequeue = Queue()
testqueue = Queue()
plusqueue = Queue()
plusdict = {}
genedict = defaultdict(list)
blastpath = []
# threadlock, useful for overcoming GIL
threadlock = threading.Lock()
def makedbthreads(fastas):
"""Setup and create threads for class"""
# Create and start threads for each fasta file in the list
for i in range(len(fastas)):
# Send the threads to makeblastdb
threads = Thread(target=makeblastdb, args=(dqueue,))
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for fasta in fastas:
# Add the fasta file to the queue
dqueue.put(fasta)
dqueue.join() # wait on the dqueue until everything has been processed
def xmlout(fasta, genome):
"""Parses variables from supplied tuples? dictionaries?"""
global path
pathString = genome.split("/")
# Create the necessary variables from the supplied strings
gene = fasta.split('/')[-1] # split file from path, could use os.path.split
genename = gene.split('.')[0]
genomename = pathString[-1].split('.')[0]
# Create the out variable containing the path and name of BLAST output file
tmpPath = "%stmp" % path
make_path(tmpPath)
out = "%s/%s.%s.xml" % (tmpPath, genomename, genename) # Changed from dictionary to tuple
# Return the parsed variables
return path, gene, genename, genomename, out
def blastparse(blast_handle, genome, gene, analysisType, cutoff):
"""Parses BLAST results, and populates a dictionary with the results"""
global plusdict
records = NCBIXML.parse(blast_handle) # Open record from memory-mapped file
dotter()
incomplete = []
genomeName = os.path.basename(genome).split('.')[0]
for record in records: # This process is just to retrieve HSPs from xml files
for alignment in record.alignments:
for hsp in alignment.hsps:
threadlock.acquire() # precaution
# Calculate the percent identity
percentIdentity = "%.2f" % float(float(hsp.identities) / float(alignment.length) * 100)
# If the results are greater than the cutoff value, add them to the dictionary
if hsp.identities >= alignment.length * cutoff:
plusdict[genomeName][gene][analysisType] = percentIdentity
threadlock.release()
# Exit the loop - I added this as else statement was adding
# partial matches to the "good" matches found above
break
else:
# Make sure that the gene is not already in the dictionary -
# may be redundant with the break statement above2
if gene not in plusdict[genomeName]:
# Puts the HSP in the correct order - hits to the negative strand will be
# reversed compared to what we're looking for
if hsp.sbjct_start < hsp.sbjct_end:
# Append the start coordinates, end coordinates, and the calculated percent ID
incomplete.append((hsp.sbjct_start, hsp.sbjct_end, percentIdentity))
else:
# Reverse the start and end as required
incomplete.append((hsp.sbjct_end, hsp.sbjct_start, percentIdentity))
threadlock.release()
# Once the list is completely populated, find if any partial matches add together for a match that passes the cutoff
if incomplete:
# Initialise totalPercentID as the first percent ID value in the list
totalPercentID = float(sorted(incomplete)[0][2])
# Initialise adjusted percent ID to 0
adjPercentID = 0
# I'm not sure if this is necessary here, but I'm not changing it now
threadlock.acquire()
# Initialise currentEntry to the current entry
currentEntry = [sorted(incomplete)[0][0], sorted(incomplete)[0][1], sorted(incomplete)[0][2]]
# For each entry in the sorted list of incomplete matches
# Should look something like: [(1, 915, 45.15), (892, 2048, 49.28)]
for entry in sorted(incomplete):
# If entry[0] is less than currentEntry[1], which is less than entry[1]
# From the example above: 892 is less than 915 is less than 2048
if entry[0] < currentEntry[1] < entry[1]:
# The fragment length is the length between entry[0] and entry[1]
# e.g. 2048 - 892 = 1156
fragLength = len(range(entry[0], entry[1]))
# Adjusted fragment length is the length between currentEntry[1] and entry[1]
# e.g. 2048 - 915 = 1133
adjFragLength = len(range(currentEntry[1], entry[1]))
# Adjusted percent ID is the currentEntry percent ID + entry percent ID
# times the correction factor of adjust fragment length over fragment length
# e.g. 45.15 + (49.28 * (1133/1156) = 45.15 + (49.28 * 0.9801) = 45.15 + 48.2995 = 93.45
adjPercentID = float(currentEntry[2]) + (float(entry[2]) * adjFragLength / fragLength)
# Set current entry to currentEntry[0], entry[1], adjusted percent ID
# e.g. [1, 2048, 93.45]
currentEntry = [currentEntry[0], entry[1], "%.2f" % adjPercentID]
# Update totalPercentID
totalPercentID = "%.2f" % adjPercentID
# If entry[0] is greater than currentEntry[1] - simpler calculation,
# as I don't have to calculate adjusted fragement lengths, I just need to add the percent IDs
# e.g. [(1, 892, 45.15), (915, 2048, 49.28)] - 915 is greater than 892
elif entry[0] >= currentEntry[1]:
# Get the percent ID from entry[2]
# e.g. 49.28
adjPercentID = float("%.2f" % float(entry[2]))
# Add the adjusted percent ID to the currentEntry percentID
# e.g. 45.15 + 49.28 = 94.43
totalPercentID = "%.2f" % (float(currentEntry[2]) + adjPercentID)
# Set the current entry as above
currentEntry = [currentEntry[0], entry[1], totalPercentID]
# If the total percent ID calculated above is greater than the cutoff, add the results to the dictionary
if totalPercentID > cutoff * 100:
plusdict[genomeName][gene][analysisType] = totalPercentID
threadlock.release() # precaution for populate dictionary with GIL
class runblast(threading.Thread):
def __init__(self, blastqueue):
self.blastqueue = blastqueue
threading.Thread.__init__(self)
def run(self):
while True:
global blastpath, plusdict # global varibles, might be a better way to pipe information
genome, fasta, blastexist, analysisType, cutoff = self.blastqueue.get() # retrieve variables from queue
path, gene, genename, genomename, out = xmlout(fasta, genome) # retrieve from string splitter
#Precaution
threadlock.acquire()
# Add the appropriate variables to blast path
blastpath.append((out, path[-1], gene, genename,)) # tuple-list
try:
plusdict[genomename][genename] = {analysisType: 0}
except KeyError:
plusdict[genomename] = {}
plusdict[genomename][genename] = {analysisType: 0}
threadlock.release()
# Checks to see if this BLAST search has previously been performed
if not os.path.isfile(out):
# Print a dot for each gene, genome combination processed
dotter()
# Open the output file for writing
file = open(out, 'w')
# Run the BioPython BLASTn module with the genome as query, fasta(target gene) as db,
# a mild evalue of 0.1, and XML formatted output
# Removed perc_identity=percentIdentity from the call, as this allows more flexibility for parsing files
# with different cutoff values - if I want to re-analyse a search with a lower cutoff, I won't have to
# re-perform the BLAST search each time
db = fasta.split(".")[0]
blastn = NcbiblastnCommandline(query=genome, db=db, evalue=0.1, outfmt=5)
# Note that there is no output file specified - the search results are currently stored in stdout
stdout, stderr = blastn()
# Search stdout for matches - if the term Hsp appears (the .find function will NOT
# return -1), a match has been found, and stdout is written to file
if stdout.find('Hsp') != -1:
blast_handle = StringIO(stdout) # Convert string to IO object for use in SearchIO using StringIO
blastparse(blast_handle, genome, genename, analysisType, cutoff) # parse the data already in memory
file.write(stdout) # write the result
# Close the file
file.close()
# If the BLAST results file already exists is not empty, then parse the results
elif os.path.getsize(out) != 0:
# Open the file
handle = open(out)
# Read the file into memory
mm = mmap.mmap(handle.fileno(), 0, access=mmap.ACCESS_READ)
# Parse the file in a multithreaded manner
parsequeue.put((out, genome, genename, mm, analysisType, cutoff))
# Join all the threads
parsequeue.join()
# Error catching?
if not any(blastpath):
print out
self.blastqueue.task_done()
def blastnthreads(fastas, genomes, analysisType, cutoff):
"""Setup and create threads for blastn and xml path"""
blastexist = {}
# Create threads for each gene, genome combination
for genome in genomes:
for fasta in fastas:
# Add the appropriate variables to the threads
blastqueue.put((genome, fasta, blastexist, analysisType, cutoff))
blastqueue.join()
class multiparser(threading.Thread): # Had to convert this to a class to integrate threading lock
def __init__(self, parsequeue):
self.parsequeue = parsequeue
threading.Thread.__init__(self)
def run(self):
while True: # General Loop
global plusdict, genedict # Import global elements to populate, there may be a better way to do this
xml, genome, gene, mm, analysisType, cutoff = self.parsequeue.get() # Retrieve dara from queue
blastparse(mm, genome, gene, analysisType, cutoff)
mm.close()
self.parsequeue.task_done()
def organismChooser(path,targetPath):
"""Allows the user to choose which organism to be used in the analyses"""
# Initialise a count variable to be used in extracting the desired entry from a list of organisms
count = 0
# Check to see if the supplied targetPath has .fa files - if it does, then the default directory structure is probably
# not being followed, so the target files will be in targetPath
foldertest = glob("%s/*.fa*" % targetPath)
if foldertest:
# Set the required variables as necessary
# queryGenes are presumably the genes found by foldertest
queryGenes = foldertest
# There are likely not going to be qualityGenes included in a custom analysis
qualityGenes = []
# Organism name is simply the name of the folder containing the targets
organismName = targetPath.split("/")[-2]
else:
# Get a list of the organisms in the (default) Organism subfolder
orgList = glob("%sOrganism/*" % path)
# Iterate through the sorted list
for folder in sorted(orgList):
# Ensure that folder is, in actuality, a folder
if os.path.isdir(folder):
# Print out the folder names and the count
print "[%s]: %s" % (count, os.path.split(folder)[1])
count += 1
# Get the user input - the number entered corresponds to the list index
response = input("Please select an organism: ")
# Get the organism path into a variable
organism = sorted(orgList)[int(response)]
organismName = os.path.split(organism)[1]
# Put the query and quality genes into lists
queryGenes = glob("%s/query_genes/*.fa" % organism)
qualityGenes = glob("%s/qualityTest/*.tfa" % organism)
return queryGenes, qualityGenes, organismName
def blaster(path, cutoff, sequencePath, targetPath):
"""
The blaster function is the stack manager of the module
markers are the the target fasta folder that with be db'd and BLAST'd against strains folder
out is the working directory where the blastxml folder will be placed
name is the partial title of the csv output
ALL PATHS REQUIRE TRAILING SLASHES!!!
"""
# Time is used to calculate length of the analyses
start = time.time()
# Import global variables
global count, genedict, blastpath, plusdict
# Initialise genedict
genedict = defaultdict(list)
blastpath = []
# Run organism chooser to allow the user to choose which databases to use
# returns the organism name, and lists of
queryGenes, qualityGenes, organismName = organismChooser(path, targetPath)
# Get the genome files into a list - note that they must be in the "sequences" subfolder of the path,
# and the must have a file extension beginning with ".fa"
strains = glob("%s*.fa*" % sequencePath)
# Create the threads for the BLAST analysis
for i in range(len(strains)):
threads = runblast(blastqueue)
threads.setDaemon(True)
parthreads = multiparser(parsequeue)
parthreads.setDaemon(True)
threads.start()
parthreads.start()
sys.stdout.write("[%s] Creating necessary databases for BLAST" % (time.strftime("%H:%M:%S")))
# Push targets to threads
makedbthreads(queryGenes)
# Quality test genes are optional, so only run the qualityGenes if the folder exists
if qualityGenes:
makedbthreads(qualityGenes)
# There appears to be a race condition going on with the creation of the BLAST databases and the running of BLAST.
print "\n[%s] BLAST database(s) created" % (time.strftime("%H:%M:%S"))
print "[%s] Now performing and parsing BLAST database searches" % (time.strftime("%H:%M:%S"))
sys.stdout.write('[%s] ' % (time.strftime("%H:%M:%S")))
# Make blastn threads and retrieve xml file locations
blastnthreads(queryGenes, strains, "query", cutoff)
# qualityGenes optional
if qualityGenes:
blastnthreads(qualityGenes, strains, "quality", cutoff)
# Initialise types dictionary
types = {}
# Populate types
types["query"] = queryGenes
if qualityGenes:
types["quality"] = qualityGenes
# print json.dumps(plusdict, sort_keys=True, indent=4, separators=(',', ': '))
csvheader = ''
# Loop through the analysis types, and make outputs as required
for analysisType in types:
# Initialise variables
row = ""
rowcount = 0
# plusdict contains all results - format: plusdict[genomeName][gene][analysisType] = totalPercentID
for genomerow in plusdict:
# The first cell contains the word "Strain"
csvheader = 'Strain'
# Append the genome names taken from genomerow
row += "\n" + genomerow.split('/')[-1].split('.')[0]
# Increment rowcount
rowcount += 1
# For each gene in the appropriate analysis type
for generow in sorted(types[analysisType]):
# Extract the gene name from generow
genename = os.path.basename(generow).split('.')[0]
# If the gene name is not already in the csvheader variable
if genename not in csvheader:
# Append the gene name in a comma-separated format
csvheader += ',' + genename
# Format the results - if the percent ID is less than 100
identity = plusdict[genomerow][genename][analysisType]
if cutoff * 100 < float(identity) < 100:
# Append the percent ID (and a "%") to the row variable
row += ',' + str(plusdict[genomerow][genename][analysisType]) + "%"
# Otherwise, add a "+" to represent a 100% match
elif identity == 0:
row += ',N'
# If the analysisType is 0 in the dictionary, then there were no matches.
# This is shown by an 'N'
else:
row += ',+'
# Open the csv report in the appropriate location - add the organism name and the date to keep reports unique
make_path("%sreports" % path)
with open("%sreports/%s_%s_results_%s.csv" % (path, organismName, analysisType, time.strftime("%Y.%m.%d.%H.%M.%S")), 'wb') as csvfile:
# Write the header and the rows
csvfile.write(csvheader)
csvfile.write(row)
# Calculate the elapsed time
end = time.time() - start
# Friendly exit statement
print "\n[%s] Elapsed time for GeneSeeking is %.2f seconds with %.2f seconds per genome" \
% (time.strftime("%H:%M:%S"), end, end/float(len(strains)))
# Run the blaster function
blaster(path, cutoff, sequencePath, targetPath)
| {
"content_hash": "e4ccb9c02dee028e560d20f3143c118d",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 142,
"avg_line_length": 49.5635103926097,
"alnum_prop": 0.6214528679931037,
"repo_name": "adamkoziol/pythonGeneSeekr",
"id": "1af2bff8d717a2f2367f419e98e8443541407421",
"size": "21480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GeneSeekr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34205"
}
],
"symlink_target": ""
} |
from ClassyClient import ClassyClient
from ClassyClientResponse import ClassyClientResponse
from Exceptions import ClassyAuthError, ClassyRequestError, ClassyNotACollection
| {
"content_hash": "522595017cbb8e62c4a11f8ba560e7d4",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 80,
"avg_line_length": 57.666666666666664,
"alnum_prop": 0.9075144508670521,
"repo_name": "dnussbaum/classy-python-client-library",
"id": "1c78306792a4b80e30e9fa8bfdc3930aacce2966",
"size": "173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classyclient/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6374"
}
],
"symlink_target": ""
} |
import uuid
class User(object):
def __init__(self):
self.id = uuid.uuid4().hex
self.username = None
self.passwordHash = None
self.authToken = None
self.clientId = None
self.files = None
self.apiCredits = 15
self.apiChallenge = None
self.apiResponse = None
class File(object):
def __init__(self):
self.id = uuid.uuid4().hex
self.name = None
self.size = None
self.originalSize = None
self.deltasize = 0 #This is used during file updates
self.status = None
self.blocks = []
self.clientId = None
self.userId = None
self.hash = None
class Block(object):
def __init__(self):
self.id = uuid.uuid4().hex
self.fileId = None
self.offset = None
self.shardCount = None
self.onlineShards = None
self.shards = []
self.hash = None
class Shard(object):
def __init__(self):
self.id = uuid.uuid4().hex
self.size = None
self.offset = None
self.clientId = None
self.blockId = None
self.fileId = None
self.status = None
self.hash = None
class Client(object):
def __init__(self):
self.id = uuid.uuid4().hex
self.userId = None
self.ip = None
self.systemStatus = None
self.initStatus = None
self.userQuota = None
self.systemQuota = None
self.userSpace = None
self.systemSpace = None
self.userReservedSpace = None
self.systemReservedSpace = None
self.shards = [] | {
"content_hash": "2f5c455d1a616398ba6040c9a0d12bc5",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 56,
"avg_line_length": 23.161290322580644,
"alnum_prop": 0.6288300835654597,
"repo_name": "somethingnew2-0/CS739-ShareBox",
"id": "1b0e0a31da70ba96017d0e95df698e12216b1890",
"size": "1463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django-server/server/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "82558"
},
{
"name": "Python",
"bytes": "31202"
},
{
"name": "Shell",
"bytes": "949"
},
{
"name": "Thrift",
"bytes": "417"
}
],
"symlink_target": ""
} |
from mongoengine import connect, Document
import pytest
DB_NAME = 'test_mongonengine_objectidmapfield'
@pytest.fixture(autouse=True)
def doctests_fixture(doctest_namespace):
doctest_namespace['Document'] = Document
@pytest.fixture(scope='session', autouse=True)
def conn(request):
conn = connect(DB_NAME)
def teardown():
conn.drop_database(DB_NAME)
request.addfinalizer(teardown)
return conn
| {
"content_hash": "dd2ff6cc9c323c7f4e0a466af942cef9",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 46,
"avg_line_length": 20.38095238095238,
"alnum_prop": 0.7313084112149533,
"repo_name": "peergradeio/mongoengine-objectidmapfield",
"id": "caf62f8c3a041697550e4fd6df89b240837e3936",
"size": "428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8628"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.